complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
11
filter_queryset
def filter_queryset(self, request, queryset, view): fields = set(view.get_available_fields(queryset.model, db_fields_only=True)) # Locale is a database field, but we provide a separate filter for it if "locale" in fields: fields.remove("locale") for field_name, value in request.GET.items(): if field_name in fields: try: field = queryset.model._meta.get_field(field_name) except LookupError: field = None # Convert value into python try: if isinstance( field, (models.BooleanField, models.NullBooleanField) ): value = parse_boolean(value) elif isinstance(field, (models.IntegerField, models.AutoField)): value = int(value) elif isinstance(field, models.ForeignKey): value = field.target_field.get_prep_value(value) except ValueError as e: raise BadRequestError( "field filter error. '%s' is not a valid value for %s (%s)" % (value, field_name, str(e)) ) if isinstance(field, TaggableManager): for tag in value.split(","): queryset = queryset.filter(**{field_name + "__name": tag}) # Stick a message on the queryset to indicate that tag filtering has been performed # This will let the do_search method know that it must raise an error as searching # and tag filtering at the same time is not supported queryset._filtered_by_tag = True else: queryset = queryset.filter(**{field_name: value}) return queryset
d10f15e55806c6944827d801cd9c2d53f5da4186
21
filters.py
359
Reformat with black
15,909
0
758
220
108
72,652
162
wagtail
39
wagtail/api/v2/filters.py
Python
31
{ "docstring": "\n This performs field level filtering on the result set\n Eg: ?title=James Joyce\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
https://github.com/wagtail/wagtail.git
2
generate_operation_regex
def generate_operation_regex(): operations = [] for item in OP_TO_SNUBA_FUNCTION.values(): operations += list(item.keys()) return rf"({'|'.join(map(str, operations))})" OP_REGEX = generate_operation_regex() AVAILABLE_OPERATIONS = { type_: sorted(mapping.keys()) for type_, mapping in OP_TO_SNUBA_FUNCTION.items() } OPERATIONS_TO_ENTITY = { op: entity for entity, operations in AVAILABLE_OPERATIONS.items() for op in operations } # ToDo add guages/summaries METRIC_TYPE_TO_ENTITY: Mapping[MetricType, EntityKey] = { "counter": EntityKey.MetricsCounters, "set": EntityKey.MetricsSets, "distribution": EntityKey.MetricsDistributions, } FIELD_ALIAS_MAPPINGS = {"project": "project_id"}
3535fa83a743f6967a92e0afdbba2b633236bf67
12
utils.py
216
fix(metrics): Restrict public name regex [TET-111] (#35305) * fix(metrics): Restrict public name regex Restricts the public name regex for metrics by separating out the public name regex and the mri name regex from the shared field regex and removing characters that are not expected to be in public facing names * Update mri regex and public regex + add tests * Add better tests * PR comments
18,716
0
89
31
46
91,126
64
sentry
27
src/sentry/snuba/metrics/utils.py
Python
5
{ "docstring": "\n Generates a regex of all supported operations defined in OP_TO_SNUBA_FUNCTION\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
https://github.com/getsentry/sentry.git
1
getbbox
def getbbox(self, text, *args, **kwargs): width, height = self.font.getsize(text) return 0, 0, width, height
c854bf8d1c05022bec4309fbf6b547e494db9373
9
ImageFont.py
52
add getbbox and getlength to basic ImageFont and update related tests
69,963
0
35
34
11
243,053
14
Pillow
9
src/PIL/ImageFont.py
Python
3
{ "docstring": "\n Returns bounding box (in pixels) of given text.\n\n .. versionadded:: 9.2.0\n\n :param text: Text to render.\n :param mode: Used by some graphics drivers to indicate what mode the\n driver prefers; if empty, the renderer may return either\n mode. Note that the mode is always a string, to simplify\n C-level implementations.\n\n :return: ``(left, top, right, bottom)`` bounding box\n ", "language": "en", "n_whitespaces": 160, "n_words": 57, "vocab_size": 49 }
https://github.com/python-pillow/Pillow.git
5
_validate
def _validate(self) -> None: if self._job == "missing-frames" and self._output == "move": logger.warning("Missing_frames was selected with move output, but there will " "be nothing to move. Defaulting to output: console") self._output = "console" if self._type == "faces" and self._job != "multi-faces": logger.error("The selected folder is not valid. Faces folder (-fc) is only " "supported for 'multi-faces'") sys.exit(1)
e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1
11
jobs.py
116
Alignments Tool - Typing, Documentation + Re-org
21,124
0
172
59
46
101,720
57
faceswap
10
tools/alignments/jobs.py
Python
10
{ "docstring": " Check that the selected type is valid for selected task and job ", "language": "en", "n_whitespaces": 13, "n_words": 12, "vocab_size": 11 }
https://github.com/deepfakes/faceswap.git
2
pagerank_numpy
def pagerank_numpy(G, alpha=0.85, personalization=None, weight="weight", dangling=None): msg = "networkx.pagerank_numpy is deprecated and will be removed in NetworkX 3.0, use networkx.pagerank instead." warn(msg, DeprecationWarning, stacklevel=2) import numpy as np if len(G) == 0: return {} M = google_matrix( G, alpha, personalization=personalization, weight=weight, dangling=dangling ) # use numpy LAPACK solver eigenvalues, eigenvectors = np.linalg.eig(M.T) ind = np.argmax(eigenvalues) # eigenvector of largest eigenvalue is at ind, normalized largest = np.array(eigenvectors[:, ind]).flatten().real norm = largest.sum() return dict(zip(G, map(float, largest / norm)))
2a05ccdb07cff88e56661dee8a9271859354027f
13
pagerank_alg.py
212
Remove redundant py2 numeric conversions (#5661) * Remove redundant float conversion * Remove redundant int conversion * Use integer division Co-authored-by: Miroslav Šedivý <6774676+eumiro@users.noreply.github.com>
42,053
0
133
137
65
176,718
77
networkx
32
networkx/algorithms/link_analysis/pagerank_alg.py
Python
14
{ "docstring": "Returns the PageRank of the nodes in the graph.\n\n PageRank computes a ranking of the nodes in the graph G based on\n the structure of the incoming links. It was originally designed as\n an algorithm to rank web pages.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph. Undirected graphs will be converted to a directed\n graph with two directed edges for each undirected edge.\n\n alpha : float, optional\n Damping parameter for PageRank, default=0.85.\n\n personalization: dict, optional\n The \"personalization vector\" consisting of a dictionary with a\n key some subset of graph nodes and personalization value each of those.\n At least one personalization value must be non-zero.\n If not specfiied, a nodes personalization value will be zero.\n By default, a uniform distribution is used.\n\n weight : key, optional\n Edge data key to use as weight. If None weights are set to 1.\n\n dangling: dict, optional\n The outedges to be assigned to any \"dangling\" nodes, i.e., nodes without\n any outedges. The dict key is the node the outedge points to and the dict\n value is the weight of that outedge. By default, dangling nodes are given\n outedges according to the personalization vector (uniform if not\n specified) This must be selected to result in an irreducible transition\n matrix (see notes under google_matrix). It may be common to have the\n dangling dict to be the same as the personalization dict.\n\n Returns\n -------\n pagerank : dictionary\n Dictionary of nodes with PageRank as value.\n\n Examples\n --------\n >>> G = nx.DiGraph(nx.path_graph(4))\n >>> pr = nx.pagerank_numpy(G, alpha=0.9)\n\n Notes\n -----\n The eigenvector calculation uses NumPy's interface to the LAPACK\n eigenvalue solvers. This will be the fastest and most accurate\n for small graphs.\n\n This implementation works with Multi(Di)Graphs. For multigraphs the\n weight between two nodes is set to be the sum of all edge weights\n between those nodes.\n\n See Also\n --------\n pagerank, pagerank_scipy, google_matrix\n\n References\n ----------\n .. [1] A. Langville and C. Meyer,\n \"A survey of eigenvector methods of web information retrieval.\"\n http://citeseer.ist.psu.edu/713792.html\n .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,\n The PageRank citation ranking: Bringing order to the Web. 1999\n http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf\n ", "language": "en", "n_whitespaces": 556, "n_words": 344, "vocab_size": 202 }
https://github.com/networkx/networkx.git
1
decode_predictions
def decode_predictions(preds, top=5): return imagenet_utils.decode_predictions(preds, top=top) preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format( mode="", ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TORCH, error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC, ) decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ DOC = setattr(DenseNet121, "__doc__", DenseNet121.__doc__ + DOC) setattr(DenseNet169, "__doc__", DenseNet169.__doc__ + DOC) setattr(DenseNet201, "__doc__", DenseNet201.__doc__ + DOC)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
8
densenet.py
149
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,037
0
38
20
25
269,365
33
keras
18
keras/applications/densenet.py
Python
2
{ "docstring": "\n\n Reference:\n - [Densely Connected Convolutional Networks](\n https://arxiv.org/abs/1608.06993) (CVPR 2017)\n\n Optionally loads weights pre-trained on ImageNet.\n Note that the data format convention used by the model is\n the one specified in your Keras config at `~/.keras/keras.json`.\n\n Note: each Keras Application expects a specific kind of input preprocessing.\n For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your\n inputs before passing them to the model.\n\n Args:\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization),\n 'imagenet' (pre-training on ImageNet),\n or the path to the weights file to be loaded.\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(224, 224, 3)` (with `'channels_last'` data format)\n or `(3, 224, 224)` (with `'channels_first'` data format).\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(200, 200, 3)` would be one valid value.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional block.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional block, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n classifier_activation: A `str` or callable. The activation function to use\n on the \"top\" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the \"top\" layer.\n When loading pretrained weights, `classifier_activation` can only\n be `None` or `\"softmax\"`.\n\n Returns:\n A Keras model instance.\n", "language": "en", "n_whitespaces": 500, "n_words": 307, "vocab_size": 181 }
https://github.com/keras-team/keras.git
1
test_tika_parse_unreachable
def test_tika_parse_unreachable(self): html = '<html><head><meta http-equiv="content-type" content="text/html; charset=UTF-8"></head><body><p>Some Text</p></body></html>' # Check if exception is raised when Tika cannot be reached. self.parser.tika_server = "" self.assertRaises(ParseError, self.parser.tika_parse, html)
4aa318598fd0dc6c5d4e08dd2a13e7bf614511ec
9
test_parsers.py
54
add test comments
117,110
0
61
30
25
320,280
26
paperless-ngx
8
src/paperless_mail/tests/test_parsers.py
Python
4
{ "docstring": "\n GIVEN:\n - Fresh start\n WHEN:\n - tika parsing is called but tika is not available\n THEN:\n - a ParseError Exception is thrown\n ", "language": "en", "n_whitespaces": 84, "n_words": 22, "vocab_size": 17 }
https://github.com/paperless-ngx/paperless-ngx.git
7
test_train_model_from_files
def test_train_model_from_files(self): df = pd.DataFrame({ 'x1': [x for x in range(100, 210)] + [x for x in range(100, 210)], 'x2': [x * 2 for x in range(100, 210)] + [x * 3 for x in range(100, 210)], 'y': [x * 3 for x in range(100, 210)] + [x * 2 for x in range(100, 210)] }) file_predictor_name = "predictor_from_file" self.upload_ds(df, self.file_datasource_name) self.verify_file_ds(self.file_datasource_name) _query = f self.query(_query) self.check_predictor_readiness(file_predictor_name)
7c02e15aa403a4ca1fa34489dd2df9136d6c961c
15
test_mysql_api.py
228
Projects structure (#3532) Projects structure
25,923
0
155
142
29
117,210
67
mindsdb
14
tests/integration_tests/flows/test_mysql_api.py
Python
16
{ "docstring": "\n CREATE MODEL {file_predictor_name}\n from files (select * from {self.file_datasource_name})\n predict y;\n ", "language": "en", "n_whitespaces": 52, "n_words": 11, "vocab_size": 10 }
https://github.com/mindsdb/mindsdb.git
10
_search_cb
def _search_cb(self, found, *, tab, old_match, options, text, prev): # :search/:search-next without reverse -> down # :search/:search-next with reverse -> up # :search-prev without reverse -> up # :search-prev with reverse -> down going_up = options['reverse'] ^ prev if found: if not config.val.search.wrap_messages: return # Check if the match count change is opposite to the search direction if old_match.current > 0: if not going_up: if old_match.current > tab.search.match.current: message.info("Search hit BOTTOM, continuing at TOP", replace="search-hit-msg") elif old_match.current == tab.search.match.current: message.info("Search hit BOTTOM", replace="search-hit-msg") elif going_up: if old_match.current < tab.search.match.current: message.info("Search hit TOP, continuing at BOTTOM", replace="search-hit-msg") elif old_match.current == tab.search.match.current: message.info("Search hit TOP", replace="search-hit-msg") else: message.warning(f"Text '{text}' not found on page!", replace='find-in-page')
265b018c172f8c1f6d9e7f8850256363f0629f82
19
commands.py
276
Add a SearchMatch helper class
117,399
0
546
161
62
320,858
112
qutebrowser
19
qutebrowser/browser/commands.py
Python
21
{ "docstring": "Callback called from search/search_next/search_prev.\n\n Args:\n found: Whether the text was found.\n tab: The AbstractTab in which the search was made.\n old_match: The previously active search match before the search was\n performed.\n options: The options (dict) the search was made with.\n text: The text searched for.\n prev: Whether we're searching backwards (i.e. :search-prev)\n ", "language": "en", "n_whitespaces": 154, "n_words": 52, "vocab_size": 38 }
https://github.com/qutebrowser/qutebrowser.git
3
from_decimal
def from_decimal(cls, dec): from decimal import Decimal if isinstance(dec, numbers.Integral): dec = Decimal(int(dec)) elif not isinstance(dec, Decimal): raise TypeError( "%s.from_decimal() only takes Decimals, not %r (%s)" % (cls.__name__, dec, type(dec).__name__)) return cls(*dec.as_integer_ratio())
8198943edd73a363c266633e1aa5b2a9e9c9f526
15
fractions.py
114
add python 3.10.4 for windows
54,739
0
119
70
30
217,383
32
XX-Net
13
python3.10.4/Lib/fractions.py
Python
9
{ "docstring": "Converts a finite Decimal instance to a rational number, exactly.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/XX-net/XX-Net.git
2
forward
def forward(self, feats, img_metas): y = self.last_feat_conv(feats[-1]) for i in range(self.num_inputs - 2, -1, -1): x = feats[i] cur_feat = self.lateral_convs[i](x) y = cur_feat + \ F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest') y = self.output_convs[i](y) mask_feature = self.mask_feature(y) memory = feats[-1] return mask_feature, memory @PLUGIN_LAYERS.register_module()
0932ab787d58eead15b5f823fbcca5351ceb90f7
@PLUGIN_LAYERS.register_module()
15
pixel_decoder.py
186
add msdeformattn pixel decoder (#7466) fix typo rm img_metas rename in pixel_decoder update comments rename fix typo generae points with MlvlPointGenerator
70,240
1
142
113
32
244,103
42
mmdetection
22
mmdet/models/plugins/pixel_decoder.py
Python
11
{ "docstring": "\n Args:\n feats (list[Tensor]): Feature maps of each level. Each has\n shape of (batch_size, c, h, w).\n img_metas (list[dict]): List of image information. Pass in\n for creating more accurate padding mask. Not used here.\n\n Returns:\n tuple: a tuple containing the following:\n - mask_feature (Tensor): Shape (batch_size, c, h, w).\n - memory (Tensor): Output of last stage of backbone.\\\n Shape (batch_size, c, h, w).\n ", "language": "en", "n_whitespaces": 200, "n_words": 62, "vocab_size": 47 }
https://github.com/open-mmlab/mmdetection.git
1
primary_calendar_error
async def primary_calendar_error() -> ClientError | None: return None @pytest.fixture(autouse=True)
3f8c896cb20526ef59e285b1f0eeb9b4e734efee
@pytest.fixture(autouse=True)
7
test_config_flow.py
37
Set user friendly name for Google Calendar config entry (#72243) * Set user friendly name for Google Calendar config entry * Add a new auth implementation for use during the config flow
99,984
1
15
11
10
301,136
10
core
5
tests/components/google/test_config_flow.py
Python
3
{ "docstring": "Fixture for tests to inject an error during calendar lookup.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
6
test_get_release_wheel_url
def test_get_release_wheel_url(): # This should be a commit for which wheels have already been built for # all platforms and python versions at # `s3://ray-wheels/releases/2.2.0/<commit>/`. test_commits = {"2.2.0": "b6af0887ee5f2e460202133791ad941a41f15beb"} for sys_platform in ["darwin", "linux", "win32"]: for py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS: for version, commit in test_commits.items(): if sys_platform == "win32" and py_version == (3, 6): # Windows wheels are not built for py3.6 anymore continue url = get_release_wheel_url(commit, sys_platform, version, py_version) assert requests.head(url).status_code == 200, url
98fef7732852cdb3e9377cd87c1ee1085b894928
15
test_runtime_env.py
136
[runtime env] Support python 3.10 for runtime_env conda (#30970) Signed-off-by: Archit Kulkarni <architkulkarni@users.noreply.github.com> conda environments are isolated, so when runtime_env sets up a conda environment it must download the Ray wheel into the conda environment. It must download the wheel that matches the current Python and Ray version running, otherwise there will be incompatibility issues between the workers that use this runtime_env and the other workers and Ray processes. This PR updates the wheel name format logic to support Python 3.10.
31,197
0
193
80
53
137,593
74
ray
14
python/ray/tests/test_runtime_env.py
Python
9
{ "docstring": "Test the code that generates the filenames of the `release` branch wheels.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 10 }
https://github.com/ray-project/ray.git
4
_validate_extra
def _validate_extra(extra, conn_id) -> None: if extra is None: return None try: extra_parsed = json.loads(extra) if not isinstance(extra_parsed, dict): warnings.warn( "Encountered JSON value in `extra` which does not parse as a dictionary in " f"connection {conn_id!r}. From Airflow 3.0, the `extra` field must contain a JSON " "representation of a Python dict.", DeprecationWarning, stacklevel=3, ) except json.JSONDecodeError: warnings.warn( f"Encountered non-JSON in `extra` field for connection {conn_id!r}. Support for " "non-JSON `extra` will be removed in Airflow 3.0", DeprecationWarning, stacklevel=2, ) return None
3aebb21c523c0eea0d4a1518d502ff95fd98011b
14
connection.py
132
Deprecate non-JSON conn.extra (#21816) Connection extra field is generally assumed to be JSON but we don't actually require it. Here we deprecate non-JSON extra so that in 3.0 we can require it. Further, we require that it not just be any json but must also parse as dict, because a string value such as '"hi"' or '[1,2,3]' is json, but a very bad practice.
8,648
0
357
74
59
45,563
82
airflow
13
airflow/models/connection.py
Python
25
{ "docstring": "\n Here we verify that ``extra`` is a JSON-encoded Python dict. From Airflow 3.0, we should no\n longer suppress these errors but raise instead.\n ", "language": "en", "n_whitespaces": 46, "n_words": 23, "vocab_size": 22 }
https://github.com/apache/airflow.git
1
algdiv
def algdiv(a, b): c0 = 0.833333333333333e-01 c1 = -0.277777777760991e-02 c2 = 0.793650666825390e-03 c3 = -0.595202931351870e-03 c4 = 0.837308034031215e-03 c5 = -0.165322962780713e-02 h = a / b c = h / (1 + h) x = h / (1 + h) d = b + (a - 0.5) # Set sN = (1 - x**n)/(1 - x) x2 = x * x s3 = 1.0 + (x + x2) s5 = 1.0 + (x + x2 * s3) s7 = 1.0 + (x + x2 * s5) s9 = 1.0 + (x + x2 * s7) s11 = 1.0 + (x + x2 * s9) # Set w = del(b) - del(a + b) # where del(x) is defined by ln(gamma(x)) = (x - 0.5)*ln(x) - x + 0.5*ln(2*pi) + del(x) t = (1.0 / b) ** 2 w = ((((c5 * s11 * t + c4 * s9) * t + c3 * s7) * t + c2 * s5) * t + c1 * s3) * t + c0 w = w * (c / b) # Combine the results u = d * lax.log1p(a / b) v = a * (lax.log(b) - 1.0) return jnp.where(u <= v, (w - v) - u, (w - u) - v)
9f811ba54d16d1acc4c0950dd3608d0b59d1af82
21
betaln.py
362
Address drastic slowdown in mypy runtime
27,264
0
290
269
82
122,898
209
jax
28
jax/_src/third_party/scipy/betaln.py
Python
23
{ "docstring": "\n Compute ``log(gamma(a))/log(gamma(a + b))`` when ``b >= 8``.\n\n Derived from scipy's implmentation of `algdiv`_.\n\n This differs from the scipy implementation in that it assumes a <= b\n because recomputing ``a, b = jnp.minimum(a, b), jnp.maximum(a, b)`` might\n be expensive and this is only called by ``betaln``.\n\n .. _algdiv:\n https://github.com/scipy/scipy/blob/c89dfc2b90d993f2a8174e57e0cbc8fbe6f3ee19/scipy/special/cdflib/algdiv.f\n ", "language": "en", "n_whitespaces": 78, "n_words": 49, "vocab_size": 47 }
https://github.com/google/jax.git
1
_disabled
def _disabled(self, *args, **kwargs) -> NoReturn: raise TypeError(f"'{type(self).__name__}' does not support mutable operations.")
6ba2a67556526db2e5b0b60a566b5f2039cf4a46
12
frozen.py
46
TYP: annotate functions that always error with NoReturn (#48002)
40,216
0
27
20
13
168,150
13
pandas
8
pandas/core/indexes/frozen.py
Python
5
{ "docstring": "\n This method will not function because object is immutable.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
https://github.com/pandas-dev/pandas.git
2
_set_preview_feed
def _set_preview_feed(self): retval = {} for idx, side in enumerate(("a", "b")): logger.debug("Setting preview feed: (side: '%s')", side) preview_images = self._config.get("preview_images", 14) preview_images = min(max(preview_images, 2), 16) batchsize = min(len(self._images[side]), preview_images) retval[side] = self._load_generator(idx).minibatch_ab(self._images[side], batchsize, side, do_shuffle=True, is_preview=True) logger.debug("Set preview feed. Batchsize: %s", batchsize) return retval
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
14
_base.py
184
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
19,874
0
395
116
38
100,389
45
faceswap
20
plugins/train/trainer/_base.py
Python
14
{ "docstring": " Set the preview feed for this feeder.\n\n Creates a generator from :class:`lib.training_data.TrainingDataGenerator` specifically\n for previews for the feeder.\n\n Returns\n -------\n dict\n The side (\"a\" or \"b\") as key, :class:`~lib.training_data.TrainingDataGenerator` as\n value.\n ", "language": "en", "n_whitespaces": 96, "n_words": 31, "vocab_size": 26 }
https://github.com/deepfakes/faceswap.git
1
test_multiple_containers
def test_multiple_containers(self): rq = ForwardMsgQueue() self.assertTrue(rq.is_empty()) rq.enqueue(NEW_SESSION_MSG)
704eab3478cf69847825b23dabf15813a8ac9fa2
9
forward_msg_queue_test.py
49
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
26,316
0
35
114
7
118,611
7
streamlit
8
lib/tests/streamlit/forward_msg_queue_test.py
Python
13
{ "docstring": "Deltas should only be coalesced if they're in the same container", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/streamlit/streamlit.git
1
test_simple_class_node_json_serde
def test_simple_class_node_json_serde(serve_instance): hello_actor = ClassHello.bind() original_dag_node = hello_actor.hello.bind() _test_deployment_json_serde_helper( original_dag_node, expected_num_deployments=1, ) model_actor = Model.bind(1) original_dag_node = model_actor.forward.bind(1) _test_deployment_json_serde_helper( original_dag_node, expected_num_deployments=1, ) model_actor = Model.bind(1, ratio=0.5) original_dag_node = model_actor.forward.bind(1) _test_deployment_json_serde_helper( original_dag_node, expected_num_deployments=1, )
89b0b82c13568254d179e63cb1a43b95fe632bd9
9
test_json_serde.py
142
[Deployment Graph] Move `Deployment` creation outside to build function (#26129)
27,557
0
114
91
16
124,273
33
ray
13
python/ray/serve/tests/test_json_serde.py
Python
19
{ "docstring": "\n Test the following behavior\n 1) Ray DAG node can go through full JSON serde cycle\n 2) Ray DAG node and deserialized DAG node produces same actor instances\n with same method call output\n 3) Ray DAG node can go through multiple rounds of JSON serde and still\n provides the same value as if it's only JSON serde once\n Against following test cases\n - Simple class with no args\n - Simple class with only args, all primitive types\n - Simple class with args + kwargs, all primitive types\n - Simple chain of class method calls, all primitive types\n ", "language": "en", "n_whitespaces": 177, "n_words": 96, "vocab_size": 54 }
https://github.com/ray-project/ray.git
1
evaluate
def evaluate(self, system_id=1, rouge_args=None): self.write_config(system_id=system_id) options = self.__get_options(rouge_args) command = [self._bin_path] + options self.log.info("Running ROUGE with command {}".format(" ".join(command))) rouge_output = check_output(command).decode("UTF-8") return rouge_output
487162262196bead8d9b4c2306f313b8f64edf9b
12
bs_pyrouge.py
119
Add model Prohetnet (#1698) * add Prohetnet model * update prohetnet * update format * pre commit * add prophetnet example * update tokenizer.py,run_train.sh,train_prophetnet.py * remove evaluate/gigaword/__init__.py Co-authored-by: smallv0221 <33639025+smallv0221@users.noreply.github.com>
118,144
0
73
70
19
322,359
24
PaddleNLP
16
examples/text_summarization/prophetnet/evaluate/cnndm/bs_pyrouge.py
Python
7
{ "docstring": "\n Run ROUGE to evaluate the system summaries in system_dir against\n the model summaries in model_dir. The summaries are assumed to\n be in the one-sentence-per-line HTML format ROUGE understands.\n\n system_id: Optional system ID which will be printed in\n ROUGE's output.\n\n Returns: Rouge output as string.\n\n ", "language": "en", "n_whitespaces": 115, "n_words": 44, "vocab_size": 33 }
https://github.com/PaddlePaddle/PaddleNLP.git
3
execute_test_case
def execute_test_case(self, test_case, kill_time=None): # type: (AutomotiveTestCaseABC, Optional[float]) -> None test_case.pre_execute( self.socket, self.target_state, self.configuration) try: test_case_kwargs = self.configuration[test_case.__class__.__name__] except KeyError: test_case_kwargs = dict() if kill_time: max_execution_time = max(int(kill_time - time.time()), 5) cur_execution_time = test_case_kwargs.get("execution_time", 1200) test_case_kwargs["execution_time"] = min(max_execution_time, cur_execution_time) log_interactive.debug("[i] Execute test_case %s with args %s", test_case.__class__.__name__, test_case_kwargs) test_case.execute(self.socket, self.target_state, **test_case_kwargs) test_case.post_execute( self.socket, self.target_state, self.configuration) self.check_new_states(test_case) self.check_new_testcases(test_case)
e6eaa484b8fa3d10051e82f5a784fe8dedbd5592
15
executor.py
234
Add assert to GMLAN Scanner to enforce fast fail on to many open TestSockets Fix bugs in TestSocket Fix bugs in the AutomotiveScanner execution_time handling Simplify test code for UDS_Scanner and reuse ObjectPipes to avoid mass creation
52,688
0
292
148
48
209,484
57
scapy
26
scapy/contrib/automotive/scanner/executor.py
Python
19
{ "docstring": "\n This function ensures the correct execution of a testcase, including\n the pre_execute, execute and post_execute.\n Finally the testcase is asked if a new edge or a new testcase was\n generated.\n\n :param test_case: A test case to be executed\n :param kill_time: If set, this defines the maximum execution time for\n the current test_case\n :return: None\n ", "language": "en", "n_whitespaces": 136, "n_words": 54, "vocab_size": 44 }
https://github.com/secdev/scapy.git
7
update
def update(self): if not self.ax.get_visible() or self.ax.figure._cachedRenderer is None: return False if self.useblit: if self.background is not None: self.canvas.restore_region(self.background) else: self.update_background(None) for artist in self.artists + self._get_animated_artists(): if artist.stale: self.ax.draw_artist(artist) self.canvas.blit(self.ax.bbox) else: self.canvas.draw_idle() return False
da31ed386482845629a8505f81810ddb341514fb
14
widgets.py
177
Fix drawing animated artists changed in selector callback
22,575
0
200
108
26
107,061
35
matplotlib
19
lib/matplotlib/widgets.py
Python
15
{ "docstring": "Draw using blit() or draw_idle(), depending on ``self.useblit``.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/matplotlib/matplotlib.git
3
synchronize_labels
def synchronize_labels(self, axis=None): if axis is None: self._deferred_index = True self._deferred_column = True elif axis == 0: self._deferred_index = True else: self._deferred_column = True
3c740dbfcdd69ddc3ab45a42be996e5c61104342
10
dataframe.py
70
FEAT-#3111: Ensure relabeling Modin Frame does not lose partition shape (#3662) Co-authored-by: Devin Petersohn <devin.petersohn@gmail.com> Signed-off-by: Naren Krishna <naren@ponder.io>
35,205
0
96
42
15
152,959
24
modin
5
modin/core/dataframe/pandas/dataframe/dataframe.py
Python
8
{ "docstring": "\n Set the deferred axes variables for the ``PandasDataframe``.\n\n Parameters\n ----------\n axis : int, default: None\n The deferred axis.\n 0 for the index, 1 for the columns.\n ", "language": "en", "n_whitespaces": 84, "n_words": 26, "vocab_size": 20 }
https://github.com/modin-project/modin.git
3
_impute_values
def _impute_values(self, features): if self.verbosity > 1: print("Imputing missing values in feature set") if self._fitted_imputer is None: self._fitted_imputer = SimpleImputer(strategy="median") self._fitted_imputer.fit(features) return self._fitted_imputer.transform(features)
388616b6247ca4ea8de4e2f340d6206aee523541
12
base.py
91
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,601
0
84
53
21
181,817
23
tpot
10
tpot/base.py
Python
7
{ "docstring": "Impute missing values in a feature set.\n\n Parameters\n ----------\n features: array-like {n_samples, n_features}\n A feature matrix\n\n Returns\n -------\n array-like {n_samples, n_features}\n ", "language": "en", "n_whitespaces": 81, "n_words": 21, "vocab_size": 17 }
https://github.com/EpistasisLab/tpot.git
2
_multi_dot_three
def _multi_dot_three(A, B, C, precision): a0, a1b0 = A.shape b1c0, c1 = C.shape # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1 cost1 = a0 * b1c0 * (a1b0 + c1) # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1 cost2 = a1b0 * c1 * (a0 + b1c0) if cost1 < cost2: return jnp.dot(jnp.dot(A, B, precision=precision), C, precision=precision) else: return jnp.dot(A, jnp.dot(B, C, precision=precision), precision=precision)
2416d154355f19e77b5c1ddf1de1f8552e4a98ad
13
linalg.py
155
Call _check_arraylike for jnp.linalg & jnp.fft functions
27,182
0
79
103
39
122,415
64
jax
14
jax/_src/third_party/numpy/linalg.py
Python
9
{ "docstring": "\n Find the best order for three arrays and do the multiplication.\n For three arguments `_multi_dot_three` is approximately 15 times faster\n than `_multi_dot_matrix_chain_order`\n ", "language": "en", "n_whitespaces": 27, "n_words": 22, "vocab_size": 20 }
https://github.com/google/jax.git
1
test_dqn
def test_dqn(self): config = dqn.DQNConfig() # Soft-Q for DQN. config.exploration(exploration_config={"type": "SoftQ", "temperature": 0.5}) config.debugging(seed=42) do_test_log_likelihood(dqn.DQN, config)
2ed09c54459cc3f74e2dab13406018698559856c
11
test_compute_log_likelihoods.py
81
[RLlib] Move all config validation logic into AlgorithmConfig classes. (#29854)
30,746
0
58
47
16
135,845
16
ray
11
rllib/policy/tests/test_compute_log_likelihoods.py
Python
5
{ "docstring": "Tests, whether DQN correctly computes logp in soft-q mode.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/ray-project/ray.git
3
_extract_future_flags
def _extract_future_flags(globs): flags = 0 for fname in __future__.all_feature_names: feature = globs.get(fname, None) if feature is getattr(__future__, fname): flags |= feature.compiler_flag return flags
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
doctest.py
69
add python 3.10.4 for windows
56,906
0
60
43
19
223,448
23
XX-Net
10
python3.10.4/Lib/doctest.py
Python
7
{ "docstring": "\n Return the compiler-flags associated with the future features that\n have been imported into the given namespace (globs).\n ", "language": "en", "n_whitespaces": 27, "n_words": 17, "vocab_size": 15 }
https://github.com/XX-net/XX-Net.git
1
test_send_html_email
def test_send_html_email(self): send_mail( "Test HTML subject", "TEXT content", ["has.html@email.com"], html_message="<h2>Test HTML content</h2>", ) send_mail("Test TEXT subject", "TEXT content", ["mr.plain.text@email.com"]) # Check that the emails were sent self.assertEqual(len(mail.outbox), 2) # check that the first email is the HTML email email_message = mail.outbox[0] self.assertEqual(email_message.subject, "Test HTML subject") self.assertEqual( email_message.alternatives, [("<h2>Test HTML content</h2>", "text/html")] ) self.assertEqual( email_message.body, "TEXT content" ) # note: plain text will always be added to body, even with alternatives self.assertEqual(email_message.to, ["has.html@email.com"]) # confirm that without html_message kwarg we do not get 'alternatives' email_message = mail.outbox[1] self.assertEqual(email_message.subject, "Test TEXT subject") self.assertEqual(email_message.alternatives, []) self.assertEqual(email_message.body, "TEXT content") self.assertEqual(email_message.to, ["mr.plain.text@email.com"])
d10f15e55806c6944827d801cd9c2d53f5da4186
10
tests.py
261
Reformat with black
15,875
0
304
151
67
72,341
97
wagtail
13
wagtail/admin/tests/tests.py
Python
23
{ "docstring": "Test that the kwarg 'html_message' works as expected on send_mail by creating 'alternatives' on the EmailMessage object", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 15 }
https://github.com/wagtail/wagtail.git
1
set_expired_status
def set_expired_status(): # filter out submitted non expired quotations whose validity has been ended cond = "`tabQuotation`.docstatus = 1 and `tabQuotation`.status != 'Expired' and `tabQuotation`.valid_till < %s" # check if those QUO have SO against it so_against_quo = # if not exists any SO, set status as Expired frappe.db.multisql( { "mariadb": .format( cond=cond, so_against_quo=so_against_quo ), "postgres": .format( cond=cond, so_against_quo=so_against_quo ), }, (nowdate()), ) @frappe.whitelist()
74a782d81d8f8c4a4d9214a9c06377e5e6e464dd
@frappe.whitelist()
12
quotation.py
111
refactor: DB independent quoting and truthy/falsy values (#31358) * refactor: DB independent quoting and truthy/falsy values * style: reformat to black spec * fix: ifnull -> coalesce * fix: coalesce -> Coalesce * fix: revert pypika comparison * refactor: convert queries to QB * fix: incorrect value types for query `=` query makes no sense with list of values * fix: remove warehouse docstatus condition * fix: keep using base rate as rate Co-authored-by: Ankush Menat <ankush@frappe.io>
14,922
1
47
56
54
68,856
64
erpnext
9
erpnext/selling/doctype/quotation/quotation.py
Python
20
{ "docstring": "\n\t\tSELECT\n\t\t\tso.name FROM `tabSales Order` so, `tabSales Order Item` so_item\n\t\tWHERE\n\t\t\tso_item.docstatus = 1 and so.docstatus = 1\n\t\t\tand so_item.parent = so.name\n\t\t\tand so_item.prevdoc_docname = `tabQuotation`.nameUPDATE `tabQuotation` SET `tabQuotation`.status = 'Expired' WHERE {cond} and not exists({so_against_quo})UPDATE `tabQuotation` SET status = 'Expired' FROM `tabSales Order`, `tabSales Order Item` WHERE {cond} and not exists({so_against_quo})", "language": "en", "n_whitespaces": 47, "n_words": 52, "vocab_size": 28 }
https://github.com/frappe/erpnext.git
6
check_freezing_date
def check_freezing_date(posting_date, adv_adj=False): if not adv_adj: acc_frozen_upto = frappe.db.get_value("Accounts Settings", None, "acc_frozen_upto") if acc_frozen_upto: frozen_accounts_modifier = frappe.db.get_value( "Accounts Settings", None, "frozen_accounts_modifier" ) if getdate(posting_date) <= getdate(acc_frozen_upto) and ( frozen_accounts_modifier not in frappe.get_roles() or frappe.session.user == "Administrator" ): frappe.throw( _("You are not authorized to add or update entries before {0}").format( formatdate(acc_frozen_upto) ) )
494bd9ef78313436f0424b918f200dab8fc7c20b
18
general_ledger.py
157
style: format code with black
13,794
0
37
92
41
65,117
52
erpnext
16
erpnext/accounts/general_ledger.py
Python
15
{ "docstring": "\n\tNobody can do GL Entries where posting date is before freezing date\n\texcept authorized person\n\n\tAdministrator has all the roles so this check will be bypassed if any role is allowed to post\n\tHence stop admin to bypass if accounts are freezed\n\t", "language": "en", "n_whitespaces": 38, "n_words": 42, "vocab_size": 38 }
https://github.com/frappe/erpnext.git
1
test_run_binary
def test_run_binary(self, df, flex, comparison_op): arith = comparison_op.__name__ with option_context("compute.use_numexpr", False): other = df.copy() + 1 expr._MIN_ELEMENTS = 0 expr.set_test_mode(True) result, expected = self.call_op(df, other, flex, arith) used_numexpr = expr.get_test_result() assert used_numexpr, "Did not use numexpr as expected." tm.assert_equal(expected, result) # FIXME: dont leave commented-out # series doesn't uses vec_compare instead of numexpr... # for i in range(len(df.columns)): # binary_comp = other.iloc[:, i] + 1 # self.run_binary(df.iloc[:, i], binary_comp, flex)
2e5b05e5773f02b653d66373787b493bc3cf3abc
11
test_expressions.py
134
STYLE: use option_context almost always (#45407)
39,456
0
187
80
58
163,539
70
pandas
20
pandas/tests/test_expressions.py
Python
10
{ "docstring": "\n tests solely that the result is the same whether or not numexpr is\n enabled. Need to test whether the function does the correct thing\n elsewhere.\n ", "language": "en", "n_whitespaces": 55, "n_words": 25, "vocab_size": 20 }
https://github.com/pandas-dev/pandas.git
2
wagtail_site
def wagtail_site(context): try: request = context["request"] except KeyError: return None return Site.find_for_request(request=request)
d10f15e55806c6944827d801cd9c2d53f5da4186
10
wagtailcore_tags.py
50
Reformat with black
16,194
0
38
28
11
73,961
12
wagtail
6
wagtail/core/templatetags/wagtailcore_tags.py
Python
6
{ "docstring": "\n Returns the Site object for the given request\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 7 }
https://github.com/wagtail/wagtail.git
3
get_user_configured_option_names
def get_user_configured_option_names(self) -> Set[str]: return { field for field, value in self.dict().items() if value is not DEFAULT.VALUE } @PublicAPI(stability="beta")
18b38c5e23fc54bf0baf7e7dbfcb07640e81f5ef
@PublicAPI(stability="beta")
11
schema.py
72
[Serve] Track user-configured options in Serve deployments (#28313)
28,630
1
50
36
18
128,216
19
ray
12
python/ray/serve/schema.py
Python
8
{ "docstring": "Get set of names for all user-configured options.\n\n Any field not set to DEFAULT.VALUE is considered a user-configured option.\n ", "language": "en", "n_whitespaces": 33, "n_words": 19, "vocab_size": 17 }
https://github.com/ray-project/ray.git
2
setmodulation
def setmodulation(self, modu): # type: (int) -> bool # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11 # noqa: E501 self._check_npcap_requirement() _modus = { 0: "dsss", 1: "fhss", 2: "irbaseband", 3: "ofdm", 4: "hrdss", 5: "erp", 6: "ht", 7: "vht", 8: "ihv", 9: "mimo-ofdm", 10: "mimo-ofdm", } m = _modus.get(modu, "unknown") if isinstance(modu, int) else modu return self._npcap_set("modu", str(m))
a2b7a28faff1db058dd22ce097a268e0ad5d1d33
10
__init__.py
159
[Hinty] Core typing: windows (#3684) * Core typing: windows Co-authored-by: Pierre <pierre@droids-corp.org>
52,781
0
232
92
50
209,791
54
scapy
11
scapy/arch/windows/__init__.py
Python
17
{ "docstring": "Set the interface modulation. It can be:\n - 0: dsss\n - 1: fhss\n - 2: irbaseband\n - 3: ofdm\n - 4: hrdss\n - 5: erp\n - 6: ht\n - 7: vht\n - 8: ihv\n - 9: mimo-ofdm\n - 10: mimo-ofdm\n - the value directly\n Only available with Npcap.", "language": "en", "n_whitespaces": 174, "n_words": 48, "vocab_size": 35 }
https://github.com/secdev/scapy.git
3
make_distribution_for_install_requirement
def make_distribution_for_install_requirement(install_req): # type: (InstallRequirement) -> AbstractDistribution # Editable requirements will always be source distributions. They use the # legacy logic until we create a modern standard for them. if install_req.editable: return SourceDistribution(install_req) # If it's a wheel, it's a WheelDistribution if install_req.is_wheel: return WheelDistribution(install_req) # Otherwise, a SourceDistribution return SourceDistribution(install_req)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
9
__init__.py
58
upd; format
12,243
0
92
31
39
60,680
51
transferlearning
6
.venv/lib/python3.8/site-packages/pip/_internal/distributions/__init__.py
Python
6
{ "docstring": "Returns a Distribution for the given InstallRequirement", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/jindongwang/transferlearning.git
5
_discover_all_providers_from_packages
def _discover_all_providers_from_packages(self) -> None: for entry_point, dist in entry_points_with_dist('apache_airflow_provider'): package_name = dist.metadata['name'] if self._provider_dict.get(package_name) is not None: continue log.debug("Loading %s from package %s", entry_point, package_name) version = dist.version provider_info = entry_point.load()() self._provider_schema_validator.validate(provider_info) provider_info_package_name = provider_info['package-name'] if package_name != provider_info_package_name: raise Exception( f"The package '{package_name}' from setuptools and " f"{provider_info_package_name} do not match. Please make sure they are aligned" ) if package_name not in self._provider_dict: self._provider_dict[package_name] = ProviderInfo(version, provider_info, 'package') else: log.warning( "The provider for package '%s' could not be registered from because providers for that " "package name have already been registered", package_name, )
b5a786b38148295c492da8ab731d5e2f6f86ccf7
14
providers_manager.py
221
Suppress import errors for providers from sources (#22579) When we are running airflow locally with providers installed from sources, often many providers will be discovered which we haven't installed the deps for. This generally results in a very large amount of traceback logging, which has a very negative effect on usefulness of terminal output. Here we suppress this error logging for providers that are installed from sources.
8,934
0
403
126
72
46,606
94
airflow
20
airflow/providers_manager.py
Python
31
{ "docstring": "\n Discovers all providers by scanning packages installed. The list of providers should be returned\n via the 'apache_airflow_provider' entrypoint as a dictionary conforming to the\n 'airflow/provider_info.schema.json' schema. Note that the schema is different at runtime\n than provider.yaml.schema.json. The development version of provider schema is more strict and changes\n together with the code. The runtime version is more relaxed (allows for additional properties)\n and verifies only the subset of fields that are needed at runtime.\n ", "language": "en", "n_whitespaces": 123, "n_words": 73, "vocab_size": 55 }
https://github.com/apache/airflow.git
1
load
def load(self, loader): loader.add_option( "modify_body", Sequence[str], [], )
fdde9ba3b3caaa2654048cec0af07bfcc3a6a3f8
8
modifybody.py
37
use Python 3.9+ typing
73,629
0
44
23
8
251,191
8
mitmproxy
6
mitmproxy/addons/modifybody.py
Python
9
{ "docstring": "\n Replacement pattern of the form \"[/flow-filter]/regex/[@]replacement\", where\n the separator can be any character. The @ allows to provide a file path that\n is used to read the replacement string.\n ", "language": "en", "n_whitespaces": 74, "n_words": 29, "vocab_size": 26 }
https://github.com/mitmproxy/mitmproxy.git
3
get_ready
def get_ready(self) -> Dict[ActorHandle, List[Any]]: ready_requests_dict = defaultdict(list) ready_requests, self._pending_remotes = ray.wait( self._pending_remotes, timeout=self._ray_wait_timeout_s, num_returns=len(self._pending_remotes), ) if not self._return_object_refs: objs = ray.get(ready_requests) else: objs = ready_requests for req, obj in zip(ready_requests, objs): actor = self._pending_to_actor[req] self._remote_requests_in_flight[actor].remove(req) ready_requests_dict[actor].append(obj) del self._pending_to_actor[req] del ready_requests return dict(ready_requests_dict)
eaed256d6863c529b8ada42514f7fba12d146f22
12
parallel_requests.py
193
[RLlib] Async parallel execution manager. (#24423)
31,919
0
205
125
35
140,298
43
ray
29
rllib/execution/parallel_requests.py
Python
26
{ "docstring": "Get results that are ready to be returned\n\n Returns:\n A dictionary of actor handles to lists of returns from tasks that were\n previously submitted to this actor pool that are now ready to be returned.\n If return_object_refs\n\n ", "language": "en", "n_whitespaces": 86, "n_words": 37, "vocab_size": 27 }
https://github.com/ray-project/ray.git
1
set_link_objects
def set_link_objects(self, objects): self.objects = objects[:] # -- Private utility methods -------------------------------------- # (here for the convenience of subclasses) # Helper method to prep compiler in subclass compile() methods
8198943edd73a363c266633e1aa5b2a9e9c9f526
8
ccompiler.py
31
add python 3.10.4 for windows
56,651
0
52
16
26
222,578
29
XX-Net
3
python3.10.4/Lib/distutils/ccompiler.py
Python
2
{ "docstring": "Set the list of object files (or analogues) to be included in\n every link to 'objects'. This does not affect any standard object\n files that the linker may include by default (such as system\n libraries).\n ", "language": "en", "n_whitespaces": 64, "n_words": 35, "vocab_size": 31 }
https://github.com/XX-net/XX-Net.git
2
get_serve_client
def get_serve_client(self): from ray.serve.context import get_global_client from ray.serve.exceptions import RayServeException try: return get_global_client(_health_check_controller=True) except RayServeException: logger.debug("There's no Serve app running on this Ray cluster.") return None
4ab97399cda80be1e146946c43d6fb2926248b28
11
serve_agent.py
70
[Serve] Only start Serve in the CLI through the `serve deploy` command (#27063) These Serve CLI commands start Serve if it's not already running: * `serve deploy` * `serve config` * `serve status` * `serve shutdown` #27026 introduces the ability to specify a `host` and `port` in the Serve config file. However, once Serve starts running, changing these options requires tearing down the entire Serve application and relaunching it. This limitation is an issue because users can inadvertently start Serve by running one of the `GET`-based CLI commands (i.e. `serve config` or `serve status`) before running `serve deploy`. This change makes `serve deploy` the only CLI command that can start a Serve application on a Ray cluster. The other commands have updated behavior when Serve is not yet running on the cluster. * `serve config`: prints an empty config body. ```yaml import_path: '' runtime_env: {} deployments: [] ``` * `serve status`: prints an empty status body, with a new `app_status` `status` value: `NOT_STARTED`. ```yaml app_status: status: NOT_STARTED message: '' deployment_timestamp: 0 deployment_statuses: [] ``` * `serve shutdown`: performs a no-op.
28,078
0
94
42
23
126,155
26
ray
11
dashboard/modules/serve/serve_agent.py
Python
8
{ "docstring": "Gets the ServeControllerClient to the this cluster's Serve app.\n\n return: If Serve is running on this Ray cluster, returns a client to\n the Serve controller. If Serve is not running, returns None.\n ", "language": "en", "n_whitespaces": 57, "n_words": 32, "vocab_size": 22 }
https://github.com/ray-project/ray.git
6
resize_image_type0
def resize_image_type0(self, img): limit_side_len = self.max_side_len h, w, _ = img.shape # limit the max side if max(h, w) > limit_side_len: if h > w: ratio = float(limit_side_len) / h else: ratio = float(limit_side_len) / w else: ratio = 1. resize_h = int(h * ratio) resize_w = int(w * ratio) resize_h = int(round(resize_h / 32) * 32) resize_w = int(round(resize_w / 32) * 32) try: if int(resize_w) <= 0 or int(resize_h) <= 0: return None, (None, None) img = cv2.resize(img, (int(resize_w), int(resize_h))) except: print(img.shape, resize_w, resize_h) sys.exit(0) ratio_h = resize_h / float(h) ratio_w = resize_w / float(w) # return img, np.array([h, w]) return img, [ratio_h, ratio_w]
9b3119dfb63c4cbb7acfb9f1f1c09ac24e6d68d2
14
processor.py
302
add module
9,744
0
340
190
66
49,439
106
PaddleHub
23
modules/image/text_recognition/ppocrv3_det_ch/processor.py
Python
24
{ "docstring": "\n resize image to a size multiple of 32 which is required by the network\n args:\n img(array): array with shape [h, w, c]\n return(tuple):\n img, (ratio_h, ratio_w)\n ", "language": "en", "n_whitespaces": 77, "n_words": 26, "vocab_size": 26 }
https://github.com/PaddlePaddle/PaddleHub.git
1
test_finds_message
def test_finds_message(self) -> None: # The other user sends some messages self.helper.send(self.room, body="Hi!", tok=self.other_access_token) self.helper.send(self.room, body="There!", tok=self.other_access_token) channel = self.make_request( "POST", "/search?access_token=%s" % (self.access_token,), { "search_categories": { "room_events": {"keys": ["content.body"], "search_term": "Hi"} } }, ) # Check we get the results we expect -- one search result, of the sent # messages self.assertEqual(channel.code, 200) results = channel.json_body["search_categories"]["room_events"] self.assertEqual(results["count"], 1) self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") # No context was requested, so we should get none. self.assertEqual(results["results"][0]["context"], {})
2ffaf30803f93273a4d8a65c9e6c3110c8433488
16
test_rooms.py
277
Add type hints to `tests/rest/client` (#12108) * Add type hints to `tests/rest/client` * newsfile * fix imports * add `test_account.py` * Remove one type hint in `test_report_event.py` * change `on_create_room` to `async` * update new functions in `test_third_party_rules.py` * Add `test_filter.py` * add `test_rooms.py` * change to `assertEquals` to `assertEqual` * lint
71,583
0
264
159
60
247,312
73
synapse
15
tests/rest/client/test_rooms.py
Python
21
{ "docstring": "\n The search functionality will search for content in messages if asked to\n do so.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
https://github.com/matrix-org/synapse.git
2
conv_block
def conv_block(cls, inputs, filters, idx, recursions): name = f"conv{idx}" var_x = inputs for i in range(1, recursions + 1): rec_name = f"{name}_{i}" var_x = ZeroPadding2D(1, name=f"{rec_name}.zeropad")(var_x) var_x = Conv2D(filters, kernel_size=3, strides=1, activation="relu", name=rec_name)(var_x) return var_x
aa39234538a8f83e6aa2b60b8275a570e8876ac2
14
s3fd.py
135
Update all Keras Imports to be conditional (#1214) * Remove custom keras importer * first round keras imports fix * launcher.py: Remove KerasFinder references * 2nd round keras imports update (lib and extract) * 3rd round keras imports update (train) * remove KerasFinder from tests * 4th round keras imports update (tests)
19,921
0
207
78
28
100,443
35
faceswap
16
plugins/extract/detect/s3fd.py
Python
12
{ "docstring": " First round convolutions with zero padding added.\n\n Parameters\n ----------\n inputs: tensor\n The input tensor to the convolution block\n filters: int\n The number of filters\n idx: int\n The layer index for naming\n recursions: int\n The number of recursions of the block to perform\n\n Returns\n -------\n tensor\n The output tensor from the convolution block\n ", "language": "en", "n_whitespaces": 178, "n_words": 52, "vocab_size": 34 }
https://github.com/deepfakes/faceswap.git
4
get_bazel_path
def get_bazel_path(bazel_path_flag): for path in filter(None, get_bazel_paths(bazel_path_flag)): version = get_bazel_version(path) if version is not None and version >= (5, 0, 0): return path, ".".join(map(str, version)) print("Cannot find or download a suitable version of bazel." "Please install bazel >= 5.0.0.") sys.exit(-1)
2388e353da9768a5e714a83b360bac1e920ff7ae
14
build.py
112
Increase bazel version to 5.0.0 to match TensorFlow (https://github.com/tensorflow/tensorflow/commit/8871926b0aa9d5b0e819d12f6945bce752fde610).
26,549
0
62
67
36
119,172
40
jax
13
build/build.py
Python
8
{ "docstring": "Returns the path to a Bazel binary, downloading Bazel if not found. Also,\n checks Bazel's version is at least newer than 5.0.0\n\n A manual version check is needed only for really old bazel versions.\n Newer bazel releases perform their own version check against .bazelversion\n (see for details\n https://blog.bazel.build/2019/12/19/bazel-2.0.html#other-important-changes).\n ", "language": "en", "n_whitespaces": 54, "n_words": 48, "vocab_size": 41 }
https://github.com/google/jax.git
3
build_ddp
def build_ddp(model, device='cuda', *args, **kwargs): assert device in ['cuda', 'mlu'], 'Only available for cuda or mlu devices.' if device == 'cuda': model = model.cuda() elif device == 'mlu': from mmcv.device.mlu import MLUDistributedDataParallel ddp_factory['mlu'] = MLUDistributedDataParallel model = model.mlu() return ddp_factory[device](model, *args, **kwargs)
24f2fdb38481e6c013a588660c044e410148ce1e
11
util_distribution.py
133
fix lint (#7793)
70,288
0
85
78
34
244,223
42
mmdetection
10
mmdet/utils/util_distribution.py
Python
9
{ "docstring": "Build DistributedDataParallel module by device type.\n\n If device is cuda, return a MMDistributedDataParallel model;\n if device is mlu, return a MLUDistributedDataParallel model.\n\n Args:\n model (:class:`nn.Module`): module to be parallelized.\n device (str): device type, mlu or cuda.\n\n Returns:\n :class:`nn.Module`: the module to be parallelized\n\n References:\n .. [1] https://pytorch.org/docs/stable/generated/torch.nn.parallel.\n DistributedDataParallel.html\n ", "language": "en", "n_whitespaces": 114, "n_words": 48, "vocab_size": 37 }
https://github.com/open-mmlab/mmdetection.git
4
detect_current_mode
def detect_current_mode(cls) -> _FaultTolerantMode: env_value = os.getenv("PL_FAULT_TOLERANT_TRAINING", "0").lower() # the int values are kept for backwards compatibility, but long-term we want to keep only the strings if env_value in ("0", "disabled"): return _FaultTolerantMode.DISABLED elif env_value in ("1", "automatic"): return _FaultTolerantMode.AUTOMATIC elif env_value in ("2", "manual"): return _FaultTolerantMode.MANUAL raise MisconfigurationException( "The environment flag `PL_FAULT_TOLERANT_TRAINING` should be either 'disabled', 'automatic', or 'manual'." )
a610e043d797ca0bae1ce186829fece79077407a
11
enums.py
122
Add typing for utilities/enums.py (#11298)
69,581
0
161
66
52
241,553
61
lightning
11
pytorch_lightning/utilities/enums.py
Python
12
{ "docstring": "This classmethod detects if `Fault Tolerant` is activated and maps its value to `_FaultTolerantMode`.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
https://github.com/Lightning-AI/lightning.git
5
_path_from_module
def _path_from_module(self, module): # See #21874 for extended discussion of the behavior of this method in # various cases. # Convert to list because __path__ may not support indexing. paths = list(getattr(module, "__path__", [])) if len(paths) != 1: filename = getattr(module, "__file__", None) if filename is not None: paths = [os.path.dirname(filename)] else: # For unknown reasons, sometimes the list returned by __path__ # contains duplicates that must be removed (#25246). paths = list(set(paths)) if len(paths) > 1: raise ImproperlyConfigured( "The app module %r has multiple filesystem locations (%r); " "you must configure this app with an AppConfig subclass " "with a 'path' class attribute." % (module, paths) ) elif not paths: raise ImproperlyConfigured( "The app module %r has no filesystem location, " "you must configure this app with an AppConfig subclass " "with a 'path' class attribute." % module ) return paths[0]
9c19aff7c7561e3a82978a272ecdaad40dda5c00
15
config.py
191
Refs #33476 -- Reformatted code with Black.
50,287
0
432
108
91
203,297
142
django
13
django/apps/config.py
Python
21
{ "docstring": "Attempt to determine app's filesystem path from its module.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/django/django.git
4
value_as_datetime
def value_as_datetime(self) -> tp.Tuple[datetime, datetime] | None: if self.value is None: return None v1, v2 = self.value if isinstance(v1, numbers.Number): d1 = datetime.utcfromtimestamp(v1 / 1000) else: d1 = v1 if isinstance(v2, numbers.Number): d2 = datetime.utcfromtimestamp(v2 / 1000) else: d2 = v2 return d1, d2 value = NonNullable(Tuple(Datetime, Datetime), help=) value_throttled = Readonly(NonNullable(Tuple(Datetime, Datetime)), help=) start = NonNullable(Datetime, help=) end = NonNullable(Datetime, help=) step = Int(default=3_600_000, help=) format = Override(default="%d %b %Y %H:%M:%S") #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
c9751009161f092b2e403d8cccccf5252c0dce1a
11
sliders.py
267
Add DatetimeRangeSlider (#12034) * Add DatetimeRangeSlider * Add tests * Add docs
53,206
0
204
87
49
212,222
81
bokeh
26
bokeh/models/widgets/sliders.py
Python
16
{ "docstring": " Convenience property to retrieve the value tuple as a tuple of\n datetime objects.\n \n Initial or selected range.\n \n Initial or selected value, throttled to report only on mouseup.\n \n The minimum allowable value.\n \n The maximum allowable value.\n \n The step between consecutive values, in units of milliseconds.\n Default is one hour.\n ", "language": "en", "n_whitespaces": 101, "n_words": 48, "vocab_size": 38 }
https://github.com/bokeh/bokeh.git
5
spectral_graph_forge
def spectral_graph_forge(G, alpha, transformation="identity", seed=None): import numpy as np import scipy as sp import scipy.stats # call as sp.stats available_transformations = ["identity", "modularity"] alpha = np.clip(alpha, 0, 1) A = nx.to_numpy_array(G) n = A.shape[1] level = int(round(n * alpha)) if transformation not in available_transformations: msg = f"{transformation!r} is not a valid transformation. " msg += f"Transformations: {available_transformations}" raise nx.NetworkXError(msg) K = np.ones((1, n)) @ A B = A if transformation == "modularity": B -= K.T @ K / K.sum() # Compute low-rank approximation of B evals, evecs = np.linalg.eigh(B) k = np.argsort(np.abs(evals))[::-1] # indices of evals in descending order evecs[:, k[np.arange(level, n)]] = 0 # set smallest eigenvectors to 0 B = evecs @ np.diag(evals) @ evecs.T if transformation == "modularity": B += K.T @ K / K.sum() B = np.clip(B, 0, 1) np.fill_diagonal(B, 0) for i in range(n - 1): B[i, i + 1 :] = sp.stats.bernoulli.rvs(B[i, i + 1 :], random_state=seed) B[i + 1 :, i] = np.transpose(B[i, i + 1 :]) H = nx.from_numpy_array(B) return H
8bea55e3071ed71eab4fb6650a45f0cdf5c911d4
13
spectral_graph_forge.py
494
Remove `_mat_spect_approx` in favor of simpler procedure (#5624) * Replace _mat_spect_approx func internal usage. * Rm _mat_spect_approx helper function.
41,994
0
293
306
105
176,595
169
networkx
45
networkx/generators/spectral_graph_forge.py
Python
30
{ "docstring": "Returns a random simple graph with spectrum resembling that of `G`\n\n This algorithm, called Spectral Graph Forge (SGF), computes the\n eigenvectors of a given graph adjacency matrix, filters them and\n builds a random graph with a similar eigenstructure.\n SGF has been proved to be particularly useful for synthesizing\n realistic social networks and it can also be used to anonymize\n graph sensitive data.\n\n Parameters\n ----------\n G : Graph\n alpha : float\n Ratio representing the percentage of eigenvectors of G to consider,\n values in [0,1].\n transformation : string, optional\n Represents the intended matrix linear transformation, possible values\n are 'identity' and 'modularity'\n seed : integer, random_state, or None (default)\n Indicator of numpy random number generation state.\n See :ref:`Randomness<randomness>`.\n\n Returns\n -------\n H : Graph\n A graph with a similar eigenvector structure of the input one.\n\n Raises\n ------\n NetworkXError\n If transformation has a value different from 'identity' or 'modularity'\n\n Notes\n -----\n Spectral Graph Forge (SGF) generates a random simple graph resembling the\n global properties of the given one.\n It leverages the low-rank approximation of the associated adjacency matrix\n driven by the *alpha* precision parameter.\n SGF preserves the number of nodes of the input graph and their ordering.\n This way, nodes of output graphs resemble the properties of the input one\n and attributes can be directly mapped.\n\n It considers the graph adjacency matrices which can optionally be\n transformed to other symmetric real matrices (currently transformation\n options include *identity* and *modularity*).\n The *modularity* transformation, in the sense of Newman's modularity matrix\n allows the focusing on community structure related properties of the graph.\n\n SGF applies a low-rank approximation whose fixed rank is computed from the\n ratio *alpha* of the input graph adjacency matrix dimension.\n This step performs a filtering on the input eigenvectors similar to the low\n pass filtering common in telecommunications.\n\n The filtered values (after truncation) are used as input to a Bernoulli\n sampling for constructing a random adjacency matrix.\n\n References\n ----------\n .. [1] L. Baldesi, C. T. Butts, A. Markopoulou, \"Spectral Graph Forge:\n Graph Generation Targeting Modularity\", IEEE Infocom, '18.\n https://arxiv.org/abs/1801.01715\n .. [2] M. Newman, \"Networks: an introduction\", Oxford university press,\n 2010\n\n Examples\n --------\n >>> G = nx.karate_club_graph()\n >>> H = nx.spectral_graph_forge(G, 0.3)\n >>>\n ", "language": "en", "n_whitespaces": 582, "n_words": 358, "vocab_size": 213 }
https://github.com/networkx/networkx.git
3
find_loader
def find_loader(self, fullname): _warnings.warn("FileFinder.find_loader() is deprecated and " "slated for removal in Python 3.12; use find_spec() instead", DeprecationWarning) spec = self.find_spec(fullname) if spec is None: return None, [] return spec.loader, spec.submodule_search_locations or []
8198943edd73a363c266633e1aa5b2a9e9c9f526
9
_bootstrap_external.py
79
add python 3.10.4 for windows
55,131
0
123
46
29
218,101
33
XX-Net
10
python3.10.4/Lib/importlib/_bootstrap_external.py
Python
8
{ "docstring": "Try to find a loader for the specified module, or the namespace\n package portions. Returns (loader, list-of-portions).\n\n This method is deprecated. Use find_spec() instead.\n\n ", "language": "en", "n_whitespaces": 46, "n_words": 24, "vocab_size": 23 }
https://github.com/XX-net/XX-Net.git
5
sys_path
def sys_path(self): # type: () -> List[str] from .vendor.vistir.compat import JSONDecodeError current_executable = Path(sys.executable).as_posix() if not self.python or self.python == current_executable: return sys.path elif any([sys.prefix == self.prefix, not self.is_venv]): return sys.path cmd_args = [self.python, "-c", "import json, sys; print(json.dumps(sys.path))"] path, _ = vistir.misc.run( cmd_args, return_object=False, nospin=True, block=True, combine_stderr=False, write_to_stdout=False, ) try: path = json.loads(path.strip()) except JSONDecodeError: path = sys.path return path
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
12
environment.py
212
Issue 4993 Add standard pre commit hooks and apply linting. (#4994) * Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
3,055
0
255
134
48
19,671
61
pipenv
28
pipenv/environment.py
Python
21
{ "docstring": "\n The system path inside the environment\n\n :return: The :data:`sys.path` from the environment\n :rtype: list\n ", "language": "en", "n_whitespaces": 43, "n_words": 14, "vocab_size": 11 }
https://github.com/pypa/pipenv.git
5
parquet_to_arrow
def parquet_to_arrow(sources, destination): stream = None if isinstance(destination, str) else destination disable = not utils.is_progress_bar_enabled() with ArrowWriter(path=destination, stream=stream) as writer: for source in utils.tqdm(sources, unit="sources", disable=disable): pf = pa.parquet.ParquetFile(source) for i in utils.tqdm(range(pf.num_row_groups), unit="row_groups", leave=False, disable=disable): df = pf.read_row_group(i).to_pandas() for col in df.columns: df[col] = df[col].apply(json.loads) reconstructed_table = pa.Table.from_pandas(df) writer.write_table(reconstructed_table) return destination
6ed6ac9448311930557810383d2cfd4fe6aae269
17
arrow_writer.py
236
Better TQDM output (#3654) * Show progress bar when generating examples * Consistent utils.is_progress_bar_enabled calls * Fix tqdm in notebook * Add missing params to DatasetDict.map * Specify total in tqdm progress bar in map * Fix total computation * Small fix * Add desc to map_nested * Add more precise descriptions to download * Address comments * Fix docstring * Final changes * Minor change
21,787
0
175
150
41
104,228
52
datasets
35
src/datasets/arrow_writer.py
Python
13
{ "docstring": "Convert parquet files to arrow file. Inputs can be str paths or file-like objects", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
https://github.com/huggingface/datasets.git
3
cursor_width
def cursor_width(self) -> int: if self.placeholder and not self.value: return cell_len(self.placeholder) return self._position_to_cell(len(self.value)) + 1
e61eaf7597a1050e80e7ce029737b5544743a2f5
11
_input.py
65
replace TextInput with Input
44,947
0
47
39
14
185,240
15
textual
8
src/textual/widgets/_input.py
Python
5
{ "docstring": "Get the width of the input (with extra space for cursor at the end).", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 12 }
https://github.com/Textualize/textual.git
1
test_bundled_aggregations_with_filter
def test_bundled_aggregations_with_filter(self) -> None: self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") # Note that the sync filter does not include "unsigned" as a field. filter = urllib.parse.quote_plus( b'{"event_fields": ["content", "event_id"], "room": {"timeline": {"limit": 3}}}' ) channel = self.make_request( "GET", f"/sync?filter={filter}", access_token=self.user_token ) self.assertEqual(200, channel.code, channel.json_body) # Ensure the timeline is limited, find the parent event. room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] self.assertTrue(room_timeline["limited"]) parent_event = self._find_event_in_chunk(room_timeline["events"]) # Ensure there's bundled aggregations on it. self.assertIn("unsigned", parent_event) self.assertIn("m.relations", parent_event["unsigned"])
96274565ff0dbb7d21b02b04fcef115330426707
11
test_relations.py
211
Fix bundling aggregations if unsigned is not a returned event field. (#12234) An error occured if a filter was supplied with `event_fields` which did not include `unsigned`. In that case, bundled aggregations are still added as the spec states it is allowed for servers to add additional fields.
71,871
0
196
120
59
247,719
69
synapse
22
tests/rest/client/test_relations.py
Python
21
{ "docstring": "\n If \"unsigned\" is an omitted field (due to filtering), adding the bundled\n aggregations should not break.\n\n Note that the spec allows for a server to return additional fields beyond\n what is specified.\n ", "language": "en", "n_whitespaces": 68, "n_words": 32, "vocab_size": 29 }
https://github.com/matrix-org/synapse.git
2
sample_crop_box
def sample_crop_box(self, img_size, results): assert isinstance(img_size, tuple) h, w = img_size[:2] key_masks = results['polys'] x_valid_array = np.ones(w, dtype=np.int32) y_valid_array = np.ones(h, dtype=np.int32) selected_mask = key_masks[np.random.randint(0, len(key_masks))] selected_mask = selected_mask.reshape((-1, 2)).astype(np.int32) max_x_start = max(np.min(selected_mask[:, 0]) - 2, 0) min_x_end = min(np.max(selected_mask[:, 0]) + 3, w - 1) max_y_start = max(np.min(selected_mask[:, 1]) - 2, 0) min_y_end = min(np.max(selected_mask[:, 1]) + 3, h - 1) # for key in results.get('mask_fields', []): # if len(results[key].masks) == 0: # continue # masks = results[key].masks for mask in key_masks: # assert len(mask) == 1 mask = mask.reshape((-1, 2)).astype(np.int32) clip_x = np.clip(mask[:, 0], 0, w - 1) clip_y = np.clip(mask[:, 1], 0, h - 1) min_x, max_x = np.min(clip_x), np.max(clip_x) min_y, max_y = np.min(clip_y), np.max(clip_y) x_valid_array[min_x - 2:max_x + 3] = 0 y_valid_array[min_y - 2:max_y + 3] = 0 min_w = int(w * self.min_side_ratio) min_h = int(h * self.min_side_ratio) x1, x2 = self.sample_valid_start_end(x_valid_array, min_w, max_x_start, min_x_end) y1, y2 = self.sample_valid_start_end(y_valid_array, min_h, max_y_start, min_y_end) return np.array([x1, y1, x2, y2])
9f62b610dea6161627200ed85d92e19b1923279a
14
fce_aug.py
561
add fcenet
4,526
0
507
370
98
23,176
161
PaddleOCR
45
ppocr/data/imaug/fce_aug.py
Python
27
{ "docstring": "Generate crop box and make sure not to crop the polygon instances.\n\n Args:\n img_size (tuple(int)): The image size (h, w).\n results (dict): The results dict.\n ", "language": "en", "n_whitespaces": 61, "n_words": 25, "vocab_size": 22 }
https://github.com/PaddlePaddle/PaddleOCR.git
7
dt64arr_to_periodarr
def dt64arr_to_periodarr(data, freq, tz=None): if not isinstance(data.dtype, np.dtype) or data.dtype.kind != "M": raise ValueError(f"Wrong dtype: {data.dtype}") if freq is None: if isinstance(data, ABCIndex): data, freq = data._values, data.freq elif isinstance(data, ABCSeries): data, freq = data._values, data.dt.freq elif isinstance(data, (ABCIndex, ABCSeries)): data = data._values reso = get_unit_from_dtype(data.dtype) freq = Period._maybe_convert_freq(freq) base = freq._period_dtype_code return c_dt64arr_to_periodarr(data.view("i8"), base, tz, reso=reso), freq
25749d29dbbb8c6ae7a05f4661948d03c17b20ae
14
period.py
229
ENH: DTA.to_period support non-nano (#47324) * ENH: DTA.to_period support non-nano * update test
39,943
0
132
142
42
167,144
58
pandas
21
pandas/core/arrays/period.py
Python
14
{ "docstring": "\n Convert an datetime-like array to values Period ordinals.\n\n Parameters\n ----------\n data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]\n freq : Optional[Union[str, Tick]]\n Must match the `freq` on the `data` if `data` is a DatetimeIndex\n or Series.\n tz : Optional[tzinfo]\n\n Returns\n -------\n ordinals : ndarray[int64]\n freq : Tick\n The frequency extracted from the Series or DatetimeIndex if that's\n used.\n\n ", "language": "en", "n_whitespaces": 117, "n_words": 55, "vocab_size": 44 }
https://github.com/pandas-dev/pandas.git
2
get_2d_sincos_pos_embed
def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False): grid_h = tf.range(grid_size, dtype=tf.float32) grid_w = tf.range(grid_size, dtype=tf.float32) grid = tf.meshgrid(grid_w, grid_h) # here w goes first grid = tf.stack(grid, axis=0) grid = tf.reshape(grid, [2, 1, grid_size, grid_size]) pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) if add_cls_token: pos_embed = tf.concat([tf.zeros((1, embed_dim)), pos_embed], axis=0) return pos_embed
5b40a37bc4da9dc6cd33876ce9bb3f7f48450a03
14
modeling_tf_vit_mae.py
176
Add TF ViT MAE (#16255) * ported TFViTMAEIntermediate and TFViTMAEOutput. * added TFViTMAEModel and TFViTMAEDecoder. * feat: added a noise argument in the implementation for reproducibility. * feat: vit mae models with an additional noise argument for reproducibility. Co-authored-by: ariG23498 <aritra.born2fly@gmail.com> Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
6,659
0
81
118
32
36,650
46
transformers
19
src/transformers/models/vit_mae/modeling_tf_vit_mae.py
Python
10
{ "docstring": "\n Create 2D sin/cos positional embeddings.\n\n Args:\n embed_dim (`int`):\n Embedding dimension.\n grid_size (`int`):\n The grid height and width.\n add_cls_token (`bool`, *optional*, defaults to `False`):\n Whether or not to add a classification (CLS) token.\n\n Returns:\n (`tf.Tensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the position\n embeddings (with or without classification token)\n ", "language": "en", "n_whitespaces": 130, "n_words": 49, "vocab_size": 44 }
https://github.com/huggingface/transformers.git
1
df_update
def df_update(self, other, **kwargs): # noqa: PR02 return BinaryDefault.register(pandas.DataFrame.update, inplace=True)( self, other=other, **kwargs )
57e29bc5d82348006c5170ef9ac0a9eedcd9acf9
10
query_compiler.py
56
REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514) Co-authored-by: Rehan Sohail Durrani <rdurrani@berkeley.edu> Signed-off-by: jeffreykennethli <jkli@ponder.io>
35,634
0
47
36
14
153,819
14
modin
10
modin/core/storage_formats/base/query_compiler.py
Python
4
{ "docstring": "\n Update values of `self` using non-NA values of `other` at the corresponding positions.\n\n If axes are not equal, perform frames alignment first.\n\n Parameters\n ----------\n other : BaseQueryCompiler\n Frame to grab replacement values from.\n join : {\"left\"}\n Specify type of join to align frames if axes are not equal\n (note: currently only one type of join is implemented).\n overwrite : bool\n Whether to overwrite every corresponding value of self, or only if it's NAN.\n filter_func : callable(pandas.Series, pandas.Series) -> numpy.ndarray<bool>\n Function that takes column of the self and return bool mask for values, that\n should be overwritten in the self frame.\n errors : {\"raise\", \"ignore\"}\n If \"raise\", will raise a ``ValueError`` if `self` and `other` both contain\n non-NA data in the same place.\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with updated values.\n ", "language": "en", "n_whitespaces": 351, "n_words": 142, "vocab_size": 100 }
https://github.com/modin-project/modin.git
1
total_seconds
def total_seconds(self) -> npt.NDArray[np.float64]: return self._maybe_mask_results(1e-9 * self.asi8, fill_value=None)
3350f95c017a68644e8577651af743413532356f
9
timedeltas.py
48
REF: share DTA/TDA/PA arithmetic methods (#47205) * REF: share DTA/TDA/PA arithmetic methods * troubleshoot npdev build
39,917
0
23
32
9
167,007
9
pandas
9
pandas/core/arrays/timedeltas.py
Python
56
{ "docstring": "\n Return total duration of each element expressed in seconds.\n\n This method is available directly on TimedeltaArray, TimedeltaIndex\n and on Series containing timedelta values under the ``.dt`` namespace.\n\n Returns\n -------\n seconds : [ndarray, Float64Index, Series]\n When the calling object is a TimedeltaArray, the return type\n is ndarray. When the calling object is a TimedeltaIndex,\n the return type is a Float64Index. When the calling object\n is a Series, the return type is Series of type `float64` whose\n index is the same as the original.\n\n See Also\n --------\n datetime.timedelta.total_seconds : Standard library version\n of this method.\n TimedeltaIndex.components : Return a DataFrame with components of\n each Timedelta.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))\n >>> s\n 0 0 days\n 1 1 days\n 2 2 days\n 3 3 days\n 4 4 days\n dtype: timedelta64[ns]\n\n >>> s.dt.total_seconds()\n 0 0.0\n 1 86400.0\n 2 172800.0\n 3 259200.0\n 4 345600.0\n dtype: float64\n\n **TimedeltaIndex**\n\n >>> idx = pd.to_timedelta(np.arange(5), unit='d')\n >>> idx\n TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq=None)\n\n >>> idx.total_seconds()\n Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0],\n dtype='float64')\n ", "language": "en", "n_whitespaces": 569, "n_words": 172, "vocab_size": 105 }
https://github.com/pandas-dev/pandas.git
1
batch_shuffle
def batch_shuffle(index_array, batch_size): batch_count = int(len(index_array) / batch_size) # to reshape we need to be cleanly divisible by batch size # we stash extra items and reappend them after shuffling last_batch = index_array[batch_count * batch_size :] index_array = index_array[: batch_count * batch_size] index_array = index_array.reshape((batch_count, batch_size)) np.random.shuffle(index_array) index_array = index_array.flatten() return np.append(index_array, last_batch)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
11
training_utils_v1.py
119
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,870
0
83
73
42
271,853
53
keras
13
keras/engine/training_utils_v1.py
Python
8
{ "docstring": "Shuffles an array in a batch-wise fashion.\n\n Useful for shuffling HDF5 arrays\n (where one cannot access arbitrary indices).\n\n Args:\n index_array: array of indices to be shuffled.\n batch_size: integer.\n\n Returns:\n The `index_array` array, shuffled in a batch-wise fashion.\n ", "language": "en", "n_whitespaces": 73, "n_words": 37, "vocab_size": 32 }
https://github.com/keras-team/keras.git
31
_intervals
def _intervals(self, sym): from sympy.solvers.inequalities import _solve_inequality assert isinstance(self, Piecewise)
498015021131af4dbb07eb110e5badaba8250c7b
7
piecewise.py
36
Updated import locations
47,746
0
31
577
10
196,246
10
sympy
9
sympy/functions/elementary/piecewise.py
Python
82
{ "docstring": "Return a list of unique tuples, (a, b, e, i), where a and b\n are the lower and upper bounds in which the expression e of\n argument i in self is defined and $a < b$ (when involving\n numbers) or $a \\le b$ when involving symbols.\n\n If there are any relationals not involving sym, or any\n relational cannot be solved for sym, NotImplementedError is\n raised. The calling routine should have removed such\n relationals before calling this routine.\n\n The evaluated conditions will be returned as ranges.\n Discontinuous ranges will be returned separately with\n identical expressions. The first condition that evaluates to\n True will be returned as the last tuple with a, b = -oo, oo.\n ", "language": "en", "n_whitespaces": 198, "n_words": 114, "vocab_size": 84 }
https://github.com/sympy/sympy.git
4
get
def get(identifier): if isinstance(identifier, dict): return deserialize(identifier) elif isinstance(identifier, str): return deserialize(str(identifier)) elif callable(identifier): return identifier else: raise ValueError( f'Could not interpret metric identifier: {identifier}')
b4dca51d0558e788f62a96d1009a07f773a202f4
12
__init__.py
89
Refactor disparate metrics-related files into a single metrics folder. Further work may be needed to split up the long file with individual metric definitions. However having a single file per metric may be too granular. TBD. PiperOrigin-RevId: 425248502
79,746
0
49
51
21
268,879
25
keras
8
keras/metrics/__init__.py
Python
10
{ "docstring": "Retrieves a Keras metric as a `function`/`Metric` class instance.\n\n The `identifier` may be the string name of a metric function or class.\n\n >>> metric = tf.keras.metrics.get(\"categorical_crossentropy\")\n >>> type(metric)\n <class 'function'>\n >>> metric = tf.keras.metrics.get(\"CategoricalCrossentropy\")\n >>> type(metric)\n <class '...metrics.CategoricalCrossentropy'>\n\n You can also specify `config` of the metric to this function by passing dict\n containing `class_name` and `config` as an identifier. Also note that the\n `class_name` must map to a `Metric` class\n\n >>> identifier = {\"class_name\": \"CategoricalCrossentropy\",\n ... \"config\": {\"from_logits\": True}}\n >>> metric = tf.keras.metrics.get(identifier)\n >>> type(metric)\n <class '...metrics.CategoricalCrossentropy'>\n\n Args:\n identifier: A metric identifier. One of None or string name of a metric\n function/class or metric configuration dictionary or a metric function or\n a metric class instance\n\n Returns:\n A Keras metric as a `function`/ `Metric` class instance.\n\n Raises:\n ValueError: If `identifier` cannot be interpreted.\n ", "language": "en", "n_whitespaces": 184, "n_words": 132, "vocab_size": 72 }
https://github.com/keras-team/keras.git
2
get_attrtext
def get_attrtext(value): m = _non_attribute_end_matcher(value) if not m: raise errors.HeaderParseError( "expected attrtext but found {!r}".format(value)) attrtext = m.group() value = value[len(attrtext):] attrtext = ValueTerminal(attrtext, 'attrtext') _validate_xtext(attrtext) return attrtext, value
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
_header_value_parser.py
106
add python 3.10.4 for windows
56,942
0
71
61
23
223,513
29
XX-Net
12
python3.10.4/Lib/email/_header_value_parser.py
Python
10
{ "docstring": "attrtext = 1*(any non-ATTRIBUTE_ENDS character)\n\n We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the\n token's defects list if we find non-attrtext characters. We also register\n defects for *any* non-printables even though the RFC doesn't exclude all of\n them, because we follow the spirit of RFC 5322.\n\n ", "language": "en", "n_whitespaces": 64, "n_words": 48, "vocab_size": 39 }
https://github.com/XX-net/XX-Net.git
4
from_dict
def from_dict(data, npartitions, orient="columns", dtype=None, columns=None): collection_types = {type(v) for v in data.values() if is_dask_collection(v)} if collection_types: raise NotImplementedError( "from_dict doesn't currently support Dask collections as inputs. " f"Objects of type {collection_types} were given in the input dict." ) return from_pandas( pd.DataFrame.from_dict(data, orient, dtype, columns), npartitions, )
c4d35f5515191409913827fd4faa3b69a3d7399a
12
io.py
116
Backend library dispatching for IO in Dask-Array and Dask-DataFrame (#9475)
36,860
0
112
72
43
157,120
47
dask
15
dask/dataframe/io/io.py
Python
11
{ "docstring": "\n Construct a Dask DataFrame from a Python Dictionary\n\n Parameters\n ----------\n data : dict\n Of the form {field : array-like} or {field : dict}.\n npartitions : int\n The number of partitions of the index to create. Note that depending on\n the size and index of the dataframe, the output may have fewer\n partitions than requested.\n orient : {'columns', 'index', 'tight'}, default 'columns'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass 'columns'\n (default). Otherwise if the keys should be rows, pass 'index'.\n If 'tight', assume a dict with keys\n ['index', 'columns', 'data', 'index_names', 'column_names'].\n dtype: bool\n Data type to force, otherwise infer.\n columns: string, optional\n Column labels to use when ``orient='index'``. Raises a ValueError\n if used with ``orient='columns'`` or ``orient='tight'``.\n\n Examples\n --------\n >>> import dask.dataframe as dd\n >>> ddf = dd.from_dict({\"num1\": [1, 2, 3, 4], \"num2\": [7, 8, 9, 10]}, npartitions=2)\n ", "language": "en", "n_whitespaces": 276, "n_words": 152, "vocab_size": 111 }
https://github.com/dask/dask.git
2
_is_unpacked_egg
def _is_unpacked_egg(path): return ( _is_egg_path(path) and os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) )
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
12
__init__.py
57
upd; format
13,197
0
33
33
10
63,195
10
transferlearning
6
.venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py
Python
5
{ "docstring": "\n Determine if given path appears to be an unpacked egg.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
https://github.com/jindongwang/transferlearning.git
4
shuffle
def shuffle(self, items, key): hashes = {} for item in items: hashed = self._hash_item(item, key) if hashed in hashes: msg = "item {!r} has same hash {!r} as item {!r}".format( item, hashed, hashes[hashed], ) raise RuntimeError(msg) hashes[hashed] = item return [hashes[hashed] for hashed in sorted(hashes)]
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
runner.py
115
Refs #33476 -- Reformatted code with Black.
51,514
0
208
75
34
206,404
45
django
12
django/test/runner.py
Python
13
{ "docstring": "\n Return a new list of the items in a shuffled order.\n\n The `key` is a function that accepts an item in `items` and returns\n a string unique for that item that can be viewed as a string id. The\n order of the return value is deterministic. It depends on the seed\n and key function but not on the original order.\n ", "language": "en", "n_whitespaces": 103, "n_words": 60, "vocab_size": 41 }
https://github.com/django/django.git
1
test_transform_inverse_transform_round_trip
def test_transform_inverse_transform_round_trip(SPCA): rng = np.random.RandomState(0) n_samples, n_features = 10, 5 X = rng.randn(n_samples, n_features) n_components = n_features spca = SPCA( n_components=n_components, alpha=1e-12, ridge_alpha=1e-12, random_state=0 ) X_trans_spca = spca.fit_transform(X) assert_allclose(spca.inverse_transform(X_trans_spca), X)
01fcf8a0acc7e6517faa4fc6887eb45f5d2ea77b
9
test_sparse_pca.py
118
ENH add inverse_transform in *SparsePCA (#23905)
76,298
0
64
79
24
260,503
30
scikit-learn
19
sklearn/decomposition/tests/test_sparse_pca.py
Python
10
{ "docstring": "Check the `transform` and `inverse_transform` round trip with no loss of\n information.\n ", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 12 }
https://github.com/scikit-learn/scikit-learn.git
3
extract_operations
def extract_operations(self) -> List[str]: if not self.step: return [] try: operations = re.findall(r'[-+*^/]', self.step) except TypeError as e: print(f"TYPE: {type(self.step)}") print(f"STEP: {self.step}") raise e return operations
0f129e9c38b6b10d80982ecc412785db62842938
15
step_by_step.py
111
ROSCOE suite of metrics (#4839) * ROSCOE suite of metrics * updating tests * lint * fixing protobuf version to stop cleaninstall failures * updating requirements * convert to absolute path * moving tests because of the dependency issues * adding new dependencies in tests * add test dependencies * fixing deps * updating task list * checklist deps can't be installed on circleci * actually fix protobuf version * protobuf range * protobuf conflict with google-api-core * return tests * convert imports to absolute path * trying checklist again * trying to avoid checklist failures * checklist to teacher tests * add user option to avoid installation failure * jupiter as well * typo * moving into virtual env setup * user param not allowed in virtual env * move spacy to circleCI because it's big * replace local model with HF * fixes based on comments * remove unused nli scores, fix tests * Added path to BART model Co-authored-by: Spencer Poff <spencerpoff@gmail.com>
47,260
0
116
54
24
195,437
26
ParlAI
12
parlai/tasks/reasoning/reason_types/step_by_step.py
Python
13
{ "docstring": "\n Finds all instances of the math operations: -, +, *, ^, / in the step.\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 14 }
https://github.com/facebookresearch/ParlAI.git
1
less_equal
def less_equal(x, y): return tf.less_equal(x, y) @keras_export("keras.backend.maximum") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.maximum") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
7
backend.py
57
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,224
1
12
17
9
269,604
9
keras
10
keras/backend.py
Python
2
{ "docstring": "Element-wise truth value of (x <= y).\n\n Args:\n x: Tensor or variable.\n y: Tensor or variable.\n\n Returns:\n A bool tensor.\n ", "language": "en", "n_whitespaces": 50, "n_words": 20, "vocab_size": 17 }
https://github.com/keras-team/keras.git
7
update_reference_in_journal_entry
def update_reference_in_journal_entry(d, journal_entry, do_not_save=False): jv_detail = journal_entry.get("accounts", {"name": d["voucher_detail_no"]})[0] if flt(d["unadjusted_amount"]) - flt(d["allocated_amount"]) != 0: # adjust the unreconciled balance amount_in_account_currency = flt(d["unadjusted_amount"]) - flt(d["allocated_amount"]) amount_in_company_currency = amount_in_account_currency * flt(jv_detail.exchange_rate) jv_detail.set(d["dr_or_cr"], amount_in_account_currency) jv_detail.set( "debit" if d["dr_or_cr"] == "debit_in_account_currency" else "credit", amount_in_company_currency, ) else: journal_entry.remove(jv_detail) # new row with references new_row = journal_entry.append("accounts") new_row.update((frappe.copy_doc(jv_detail)).as_dict()) new_row.set(d["dr_or_cr"], d["allocated_amount"]) new_row.set( "debit" if d["dr_or_cr"] == "debit_in_account_currency" else "credit", d["allocated_amount"] * flt(jv_detail.exchange_rate), ) new_row.set( "credit_in_account_currency" if d["dr_or_cr"] == "debit_in_account_currency" else "debit_in_account_currency", 0, ) new_row.set("credit" if d["dr_or_cr"] == "debit_in_account_currency" else "debit", 0) new_row.set("reference_type", d["against_voucher_type"]) new_row.set("reference_name", d["against_voucher"]) new_row.against_account = cstr(jv_detail.against_account) new_row.is_advance = cstr(jv_detail.is_advance) new_row.docstatus = 1 # will work as update after submit journal_entry.flags.ignore_validate_update_after_submit = True if not do_not_save: journal_entry.save(ignore_permissions=True)
494bd9ef78313436f0424b918f200dab8fc7c20b
13
utils.py
495
style: format code with black
13,890
0
75
283
76
65,410
112
erpnext
26
erpnext/accounts/utils.py
Python
34
{ "docstring": "\n\tUpdates against document, if partial amount splits into rows\n\t", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/frappe/erpnext.git
3
test_sync_call_healthy_only
def test_sync_call_healthy_only(self): actors = [Actor.remote(i) for i in range(4)] manager = FaultTolerantActorManager(actors=actors) results = [] for _ in range(10): results.extend( manager.foreach_actor( lambda w: w.call(), healthy_only=True ).ignore_errors() ) # Wait for actors to recover. wait_for_restore() # Notice that since we only fire calls against healthy actors, # we wouldn't be aware that the actors have been recovered. # So once an actor is taken out of the lineup (10% chance), # it will not go back in, and we should have few results here. # Basically takes us 10 calls to kill all the actors. # Note that we can hardcode 10 here because we are using deterministic # sequences of random numbers. self.assertEqual(len(results), 10)
d329147ae28c57b290f6b932f9f3044523f67c4e
16
test_actor_manager.py
144
[RLlib] Introduce FaultTolerantActorManager (#29703) Signed-off-by: Jun Gong <jungong@anyscale.com>
30,658
0
298
83
86
135,567
114
ray
20
rllib/utils/tests/test_actor_manager.py
Python
12
{ "docstring": "Test synchronous remote calls to only healthy actors.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/ray-project/ray.git
1
test_base_form_class_used
def test_base_form_class_used(self): edit_url = reverse( "wagtailadmin_pages:add", args=("tests", "formclassadditionalfieldpage", self.test_page.id), ) response = self.client.get(edit_url) self.assertContains( response, '<input type="text" name="code" required id="id_code" maxlength="5" />', html=True, )
d10f15e55806c6944827d801cd9c2d53f5da4186
12
test_revisions.py
84
Reformat with black
15,715
0
121
50
22
71,720
24
wagtail
12
wagtail/admin/tests/pages/test_revisions.py
Python
11
{ "docstring": "First ensure that the non-model field is appearing in edit.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/wagtail/wagtail.git
3
components
def components(self) -> Dict[str, BaseComponent]: all_components = self._find_all_components() return {component.name: component for component in all_components if component.name is not None}
f6e3a639063887f9f5b27f574a04c7fe602b3185
9
base.py
61
Prevent losing names of utilized components when loaded from config (#2525) * Prevent losing names of utilized components when loaded from config * Update Documentation & Code Style * update test * fix failing tests * Update Documentation & Code Style * fix even more tests * Update Documentation & Code Style * incorporate review feedback Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
75,070
0
41
39
18
257,346
20
haystack
9
haystack/pipelines/base.py
Python
7
{ "docstring": "\n Returns all components used by this pipeline.\n Note that this also includes such components that are being utilized by other components only and are not being used as a pipeline node directly.\n ", "language": "en", "n_whitespaces": 54, "n_words": 32, "vocab_size": 24 }
https://github.com/deepset-ai/haystack.git
5
_ensure_iterable_column_indexer
def _ensure_iterable_column_indexer(self, column_indexer): ilocs: Sequence[int] if is_integer(column_indexer): ilocs = [column_indexer] elif isinstance(column_indexer, slice): ilocs = np.arange(len(self.obj.columns))[column_indexer] elif isinstance(column_indexer, np.ndarray) and is_bool_dtype( column_indexer.dtype ): ilocs = np.arange(len(column_indexer))[column_indexer] else: ilocs = column_indexer return ilocs
b5c6e4713ae4397cd047cb41f11aca4d27fb6096
16
indexing.py
143
CLN: suppress warnings (#45212)
39,403
0
143
89
23
163,213
32
pandas
17
pandas/core/indexing.py
Python
13
{ "docstring": "\n Ensure that our column indexer is something that can be iterated over.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
https://github.com/pandas-dev/pandas.git
1
_start
def _start(self) -> int: warnings.warn( self._deprecation_message.format("_start", "start"), FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.start
2f8d0a36703e81e4dca52ca9fe4f58c910c1b304
12
range.py
69
PERF cache find_stack_level (#48023) cache stacklevel
40,259
0
73
41
12
168,252
12
pandas
13
pandas/core/indexes/range.py
Python
13
{ "docstring": "\n The value of the `start` parameter (``0`` if this was not supplied).\n\n .. deprecated:: 0.25.0\n Use ``start`` instead.\n ", "language": "en", "n_whitespaces": 52, "n_words": 18, "vocab_size": 18 }
https://github.com/pandas-dev/pandas.git
1
test_update_notice_user_name_when_changed
def test_update_notice_user_name_when_changed(self) -> None: server_notice_request_content = { "user_id": self.other_user, "content": {"msgtype": "m.text", "body": "test msg one"}, } self.make_request( "POST", self.url, access_token=self.admin_user_tok, content=server_notice_request_content, ) # simulate a change in server config after a server restart. new_display_name = "new display name" self.server_notices_manager._config.servernotices.server_notices_mxid_display_name = ( new_display_name ) self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all() self.make_request( "POST", self.url, access_token=self.admin_user_tok, content=server_notice_request_content, ) invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0) notice_room_id = invited_rooms[0].room_id self.helper.join( room=notice_room_id, user=self.other_user, tok=self.other_user_token ) notice_user_state_in_room = self.helper.get_state( notice_room_id, "m.room.member", self.other_user_token, state_key="@notices:test", ) self.assertEqual(notice_user_state_in_room["displayname"], new_display_name)
2e2d8cc2f9b9af5f8b48d75e22c474e08feca236
11
test_server_notice.py
282
Update the server notices user profile in room if changed. (#12115)
72,024
0
383
175
57
247,957
74
synapse
31
tests/rest/admin/test_server_notice.py
Python
38
{ "docstring": "\n Tests that existing server notices user name in room is updated after\n server notice config changes.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
https://github.com/matrix-org/synapse.git
1
_create_basic_room
def _create_basic_room(self) -> Tuple[int, object]: channel = self.make_request( "POST", "/createRoom", {}, ) return channel.code, channel.json_body
6a6e1e8c0711939338f25d8d41d1e4d33d984949
9
test_rooms.py
58
Fix room creation being rate limited too aggressively since Synapse v1.69.0. (#14314) * Introduce a test for the old behaviour which we want to restore * Reintroduce the old behaviour in a simpler way * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) <oliverw@matrix.org> * Use 1 credit instead of 2 for creating a room: be more lenient than before Notably, the UI in Element Web was still broken after restoring to prior behaviour. After discussion, we agreed that it would be sensible to increase the limit. Signed-off-by: Olivier Wilkinson (reivilibre) <oliverw@matrix.org>
73,110
0
76
35
15
249,764
15
synapse
9
tests/rest/client/test_rooms.py
Python
10
{ "docstring": "\n Tries to create a basic room and returns the response code.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
https://github.com/matrix-org/synapse.git
15
boundary_slice
def boundary_slice(df, start, stop, right_boundary=True, left_boundary=True, kind=None): if len(df.index) == 0: return df if PANDAS_GT_131: if kind is not None: warnings.warn( "The `kind` argument is no longer used/supported. " "It will be dropped in a future release.", category=FutureWarning, ) kind_opts = {} kind = "loc" else: kind = kind or "loc" kind_opts = {"kind": kind} if kind == "loc" and not df.index.is_monotonic_increasing: # Pandas treats missing keys differently for label-slicing # on monotonic vs. non-monotonic indexes # If the index is monotonic, `df.loc[start:stop]` is fine. # If it's not, `df.loc[start:stop]` raises when `start` is missing if start is not None: if left_boundary: df = df[df.index >= start] else: df = df[df.index > start] if stop is not None: if right_boundary: df = df[df.index <= stop] else: df = df[df.index < stop] return df result = getattr(df, kind)[start:stop] if not right_boundary and stop is not None: right_index = result.index.get_slice_bound(stop, "left", **kind_opts) result = result.iloc[:right_index] if not left_boundary and start is not None: left_index = result.index.get_slice_bound(start, "right", **kind_opts) result = result.iloc[left_index:] return result
5e8a4813cf948250608b16747773a7dc52088eb6
16
methods.py
378
Deprecate `is_monotonic` (#8653) This PR deprecates `is_monotonic` to follow what `pandas` is doing upstream. This also resolves some test failures in our `upstream` build
36,453
0
488
233
95
155,711
171
dask
22
dask/dataframe/methods.py
Python
35
{ "docstring": "Index slice start/stop. Can switch include/exclude boundaries.\n\n Examples\n --------\n >>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])\n >>> boundary_slice(df, 2, None)\n x\n 2 20\n 2 30\n 3 40\n 4 50\n >>> boundary_slice(df, 1, 3)\n x\n 1 10\n 2 20\n 2 30\n 3 40\n >>> boundary_slice(df, 1, 3, right_boundary=False)\n x\n 1 10\n 2 20\n 2 30\n\n Empty input DataFrames are returned\n\n >>> df_empty = pd.DataFrame()\n >>> boundary_slice(df_empty, 1, 3)\n Empty DataFrame\n Columns: []\n Index: []\n ", "language": "en", "n_whitespaces": 184, "n_words": 80, "vocab_size": 49 }
https://github.com/dask/dask.git
3
_ray
def _ray(self) -> "ray": global ray if ray is None: try: import ray except ImportError as exc: raise RuntimeError( "Using the `RayTaskRunner` requires `ray` to be installed." ) from exc return ray
f97603bba836c215e153d7d3d5b3b9de4d0ae822
13
task_runners.py
61
First draft `RayTaskRunner` implementation
10,926
0
146
33
29
53,859
32
prefect
6
src/prefect/task_runners.py
Python
14
{ "docstring": "\n Delayed import of `ray` allowing configuration of the task runner\n without the extra installed and improves `prefect` import times.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 16 }
https://github.com/PrefectHQ/prefect.git
1
preprocess_WEBP
def preprocess_WEBP(self, image): save_kwargs = { "quality": self.WEBP_QUAL, "lossless": self.LOSSLESS_WEBP, "icc_profile": image.info.get("icc_profile", ""), } return (image, save_kwargs)
5d1a36b9aaf408016957db04f86397b2e53c2500
11
utils.py
74
Better media thumbnails including WebP support (#9988) * Add thumbnail app * Update get_thumbnail_size method and add tests * Add logic for creating thumbnails * Update logic for getting thumbnail * Allow defining format for tumbnail generation * Clear handle_thumbnail views * Add prepare_image_proxy_url method * Use ImageField for user avatar * Allow defining thumbnail format when querying user avatar * Use ImageField for category backgound_image * Use ImageField for Collection backgound_image * Use ImageField for ProductMedia image * Ensure that thumbnails are deleted when category background_image is changed or deleted * Ensure that thumbnails are deleted when collection background_image is changed or deleted * Update product media deleteion task and failing tests * Delete thumbnail from storage when thumbnail objects is deleted * Fix import in product test_bulk_delete * Drop create_thumbnails command * Update Product.thumbnail resolver * Update OrderLine thumbnail resolver * Add missing ADDED_IN_35 and PREVIEW_FEATURE labels * Update account and product signals - ensure the image is deleted from storage * Refactor product_images methods * Add signal for product media image delete * Drop create_thumbnails method and not longer valid settings fields * Clean the ProcessedImage class * Drop versatileimagefield from INSTALLED_APPS * Update changelog * Drop comments from ThumbnailFormat * Add get_image_or_proxy_url method * Apply reiew suggestions - add ThumbnailField and use get_image_or_proxy_ur when it's possible * Update changelog * Replace ADDED_IN_35 with ADDED_IN_36 label * Update changelog Co-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>
5,150
0
78
43
17
27,993
17
saleor
8
saleor/thumbnail/utils.py
Python
7
{ "docstring": "Receive a PIL Image instance of a WEBP and return 2-tuple.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/saleor/saleor.git
3
spherical_bessel_fn
def spherical_bessel_fn(n, x=None, polys=False): if x is None: x = Dummy("x") f = dup_spherical_bessel_fn_minus if n < 0 else dup_spherical_bessel_fn return named_poly(abs(n), f, ZZ, "", (QQ(1)/x,), polys)
d1d46df73ebaad94089847558d00a8b7269f554d
11
orthopolys.py
93
Run orthopolys and appellseqs through a common interface Including unifying the two Chebyshev generators into one function. There are also two kinds of Hermite polynomials, and they too share the same recurrence, but the second type He_n(x) (aka the probabilist, reduced or small polynomials) will not be added here.
49,353
0
46
60
24
199,697
27
sympy
12
sympy/polys/orthopolys.py
Python
5
{ "docstring": "\n Coefficients for the spherical Bessel functions.\n\n These are only needed in the jn() function.\n\n The coefficients are calculated from:\n\n fn(0, z) = 1/z\n fn(1, z) = 1/z**2\n fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z)\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n\n Examples\n ========\n\n >>> from sympy.polys.orthopolys import spherical_bessel_fn as fn\n >>> from sympy import Symbol\n >>> z = Symbol(\"z\")\n >>> fn(1, z)\n z**(-2)\n >>> fn(2, z)\n -1/z + 3/z**3\n >>> fn(3, z)\n -6/z**2 + 15/z**4\n >>> fn(4, z)\n 1/z - 45/z**3 + 105/z**5\n\n ", "language": "en", "n_whitespaces": 195, "n_words": 105, "vocab_size": 75 }
https://github.com/sympy/sympy.git
4
_try_import_backends
def _try_import_backends(self) -> _BackendImports: # pylint: disable=unused-import results = _BackendImports() try: from qutebrowser.qt import webkit as QtWebKit from qutebrowser.qt.webkit import qWebKitVersion from qutebrowser.qt import webkit as QtWebKitWidgets except (ImportError, ValueError) as e: results.webkit_error = str(e) else: if not qtutils.is_new_qtwebkit(): results.webkit_error = "Unsupported legacy QtWebKit found" try: from qutebrowser.qt import webengine as QtWebEngineWidgets except (ImportError, ValueError) as e: results.webengine_error = str(e) return results
d47cfd99d7374a41b4c228c21221c7850e65d0b1
12
backendproblem.py
166
Run scripts/dev/rewrite_qt_imports.sh
117,494
0
217
100
37
321,054
62
qutebrowser
20
qutebrowser/misc/backendproblem.py
Python
17
{ "docstring": "Check whether backends can be imported and return BackendImports.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/qutebrowser/qutebrowser.git
3
test_sync_in_area
async def test_sync_in_area(area_on_device, hass, registries): area = registries.area.async_create("Living Room") device = registries.device.async_get_or_create( config_entry_id="1234", manufacturer="Someone", model="Some model", sw_version="Some Version", connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) registries.device.async_update_device( device.id, area_id=area.id if area_on_device else None ) entity = registries.entity.async_get_or_create( "light", "test", "1235", suggested_object_id="demo_light", device_id=device.id, ) entity = registries.entity.async_update_entity( entity.entity_id, area_id=area.id if not area_on_device else None ) light = DemoLight( None, "Demo Light", state=False, hs_color=(180, 75), effect_list=LIGHT_EFFECT_LIST, effect=LIGHT_EFFECT_LIST[0], ) light.hass = hass light.entity_id = entity.entity_id await light.async_update_ha_state() config = MockConfig(should_expose=lambda _: True, entity_config={}) events = async_capture_events(hass, EVENT_SYNC_RECEIVED) result = await sh.async_handle_message( hass, config, "test-agent", {"requestId": REQ_ID, "inputs": [{"intent": "action.devices.SYNC"}]}, const.SOURCE_CLOUD, ) assert result == { "requestId": REQ_ID, "payload": { "agentUserId": "test-agent", "devices": [ { "id": "light.demo_light", "name": {"name": "Demo Light"}, "traits": [ trait.TRAIT_BRIGHTNESS, trait.TRAIT_ONOFF, trait.TRAIT_COLOR_SETTING, trait.TRAIT_MODES, ], "type": const.TYPE_LIGHT, "willReportState": False, "attributes": { "availableModes": [ { "name": "effect", "name_values": [ {"lang": "en", "name_synonym": ["effect"]} ], "ordered": False, "settings": [ { "setting_name": "rainbow", "setting_values": [ { "lang": "en", "setting_synonym": ["rainbow"], } ], }, { "setting_name": "none", "setting_values": [ {"lang": "en", "setting_synonym": ["none"]} ], }, ], } ], "colorModel": "hsv", "colorTemperatureRange": { "temperatureMinK": 2000, "temperatureMaxK": 6535, }, }, "deviceInfo": { "manufacturer": "Someone", "model": "Some model", "swVersion": "Some Version", }, "roomHint": "Living Room", } ], }, } await hass.async_block_till_done() assert len(events) == 1 assert events[0].event_type == EVENT_SYNC_RECEIVED assert events[0].data == {"request_id": REQ_ID, "source": "cloud"}
1bc8770b51658f0dc1bd076b392d70be5a7433bc
26
test_smart_home.py
798
Remove area_id from entity_registry.async_get_or_create (#77700) * Remove area_id from entity_registry.async_get_or_create * Adjust tests * Fix lying comment in test
104,292
0
1,944
466
139
305,503
213
core
55
tests/components/google_assistant/test_smart_home.py
Python
105
{ "docstring": "Test a sync message where room hint comes from area.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
1
get_document_chosen_response
def get_document_chosen_response(request, document): return render_modal_workflow( request, None, None, None, json_data={ "step": "document_chosen", "result": { "id": document.id, "title": document.title, "url": document.url, "filename": document.filename, "edit_link": reverse("wagtaildocs:edit", args=(document.id,)), }, }, )
d10f15e55806c6944827d801cd9c2d53f5da4186
17
chooser.py
121
Reformat with black
16,329
0
187
74
25
74,901
28
wagtail
11
wagtail/documents/views/chooser.py
Python
17
{ "docstring": "\n helper function: given a document, return the modal workflow response that returns that\n document back to the calling page\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 17 }
https://github.com/wagtail/wagtail.git
1
call
def call(self, *args, **kwargs): warnings.warn( "'call()' method is deprecated. " + "Use '__call__()' instead", DeprecationWarning, ) return self.__call__(*args, **kwargs)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
9
__init__.py
58
upd; format
13,507
0
69
34
19
63,798
19
transferlearning
8
.venv/lib/python3.8/site-packages/pip/_vendor/tenacity/__init__.py
Python
6
{ "docstring": "Use ``__call__`` instead because this method is deprecated.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/jindongwang/transferlearning.git
5
execute
def execute(filters=None): filters = filters if isinstance(filters, frappe._dict) else frappe._dict(filters) if not filters: filters.setdefault('fiscal_year', get_fiscal_year(nowdate())[0]) filters.setdefault('company', frappe.db.get_default("company")) region = frappe.db.get_value("Company", filters={"name": filters.company}, fieldname=["country"]) if region != 'United States': return [], [] columns = get_columns() conditions = "" if filters.supplier_group: conditions += "AND s.supplier_group = %s" %frappe.db.escape(filters.get("supplier_group")) data = frappe.db.sql(.format(conditions=conditions), { "fiscal_year": filters.fiscal_year, "company": filters.company }, as_dict=True) return columns, data
fe4b6771b5fd935ed278cf553c864a18e3356a33
14
irs_1099.py
283
refactor: Remove dead code (#30140)
13,666
0
40
171
46
64,559
59
erpnext
25
erpnext/regional/report/irs_1099/irs_1099.py
Python
41
{ "docstring": "\n\t\tSELECT\n\t\t\ts.supplier_group as \"supplier_group\",\n\t\t\tgl.party AS \"supplier\",\n\t\t\ts.tax_id as \"tax_id\",\n\t\t\tSUM(gl.debit_in_account_currency) AS \"payments\"\n\t\tFROM\n\t\t\t`tabGL Entry` gl\n\t\t\t\tINNER JOIN `tabSupplier` s\n\t\tWHERE\n\t\t\ts.name = gl.party\n\t\t\t\tAND s.irs_1099 = 1\n\t\t\t\tAND gl.fiscal_year = %(fiscal_year)s\n\t\t\t\tAND gl.party_type = \"Supplier\"\n\t\t\t\tAND gl.company = %(company)s\n\t\t\t\t{conditions}\n\n\t\tGROUP BY\n\t\t\tgl.party\n\n\t\tORDER BY\n\t\t\tgl.party DESC", "language": "en", "n_whitespaces": 30, "n_words": 49, "vocab_size": 36 }
https://github.com/frappe/erpnext.git
1
get_student_group_strength
def get_student_group_strength(student_group): student_group_strength = frappe.db.sql( , student_group, )[0][0] return student_group_strength
494bd9ef78313436f0424b918f200dab8fc7c20b
11
student_batch_wise_attendance.py
41
style: format code with black
14,064
0
4
26
9
65,944
10
erpnext
6
erpnext/education/report/student_batch_wise_attendance/student_batch_wise_attendance.py
Python
7
{ "docstring": "select count(*) from `tabStudent Group Student`\n\t\twhere parent = %s and active=1", "language": "en", "n_whitespaces": 10, "n_words": 12, "vocab_size": 12 }
https://github.com/frappe/erpnext.git
1
source
def source(self) -> "np.ndarray": assert self._source is not None return self._source
7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5
7
preview_tk.py
34
Training - Use custom preview pop-out
20,979
0
32
19
10
101,569
11
faceswap
3
lib/training/preview_tk.py
Python
4
{ "docstring": " :class:`PIL.Image.Image`: The current source preview image ", "language": "en", "n_whitespaces": 7, "n_words": 6, "vocab_size": 6 }
https://github.com/deepfakes/faceswap.git
1
summer
def summer(): set_cmap('summer') # Autogenerated by boilerplate.py. Do not edit as changes will be lost.
032316bc6c7798fca6c82de24167c975f237687f
8
pyplot.py
22
Cleanup documentation generation for pyplot - remove the awkward `pyplot.plotting()` function, which only served as a namespace to take up the docs for pyplot and output them via `.. autofunction` - Instead generate the same information using `.. autosummary::`. We have to list the desired methods here explicitly. I've added a test that these are the same as previously auto-generated in the `plotting()` docstring. If we change anything in pyplot, we'll be notified through the test failure that we have to adapt the autosummary list. - Removed the docstring generation logic `_setup_pyplot_info_docstrings()`. Apart from generating the `plotting()` docstring, this added docstrings to the pyplot colormap setters. Instead, we now add these docstrings directly via boilerplate.py Co-authored-by: Elliott Sales de Andrade <quantum.analyst@gmail.com>
23,227
0
21
9
15
108,516
15
matplotlib
2
lib/matplotlib/pyplot.py
Python
2
{ "docstring": "\n Set the colormap to 'summer'.\n\n This changes the default colormap as well as the colormap of the current\n image if there is one. See ``help(colormaps)`` for more information.\n ", "language": "en", "n_whitespaces": 41, "n_words": 28, "vocab_size": 22 }
https://github.com/matplotlib/matplotlib.git
1
test_all_users
def test_all_users(self) -> None: self._create_users(2) channel = self.make_request( "GET", self.url + "?deactivated=true", {}, access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(3, len(channel.json_body["users"])) self.assertEqual(3, channel.json_body["total"]) # Check that all fields are available self._check_fields(channel.json_body["users"])
901b264c0c88f39cbfb8b2229e0dc57968882658
11
test_user.py
157
Add type hints to `tests/rest/admin` (#11851)
71,062
0
137
96
29
246,168
30
synapse
16
tests/rest/admin/test_user.py
Python
15
{ "docstring": "\n List all users, including deactivated users.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
https://github.com/matrix-org/synapse.git
2
test_ssh_sync
def test_ssh_sync(): experiment_name = "cloud_ssh_sync" indicator_file = f"/tmp/{experiment_name}_indicator"
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
8
run_cloud_test.py
30
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
30,062
0
17
70
7
133,619
8
ray
3
release/tune_tests/cloud_tests/workloads/run_cloud_test.py
Python
16
{ "docstring": "\n SSH syncing, so:\n\n syncer=\"auto\"\n upload_dir=None\n\n Expected results after first checkpoint:\n\n - 4 trials are running\n - At least one trial ran on the head node\n - At least one trial ran remotely\n - Driver has trial checkpoints from head node trial\n - Driver has trial checkpoints from remote node trials\n - Remote trial dirs only have data for one trial\n - Remote trial dirs have checkpoints for node-local trials\n\n Then, remote checkpoint directories are cleaned up.\n\n Expected results after second checkpoint:\n\n - 4 trials are running\n - All trials progressed with training\n\n ", "language": "en", "n_whitespaces": 185, "n_words": 92, "vocab_size": 47 }
https://github.com/ray-project/ray.git
3
parse_time_mapped_ti_count
def parse_time_mapped_ti_count(self) -> Optional[int]: total = 0 for value in self._get_expansion_kwargs().values(): if not isinstance(value, MAPPABLE_LITERAL_TYPES): # None literal type encountered, so give up return None total += len(value) return total
91832a42d8124b040073481fd93c54e9e64c2609
10
mappedoperator.py
77
Expand mapped tasks at DagRun.Veriy_integrity (#22679) Create the necessary task instances for a mapped task at dagrun.verify_integrity Co-authored-by: Ash Berlin-Taylor <ash@apache.org>
9,032
0
110
46
26
46,884
30
airflow
11
airflow/models/mappedoperator.py
Python
13
{ "docstring": "\n Number of mapped TaskInstances that can be created at DagRun create time.\n\n :return: None if non-literal mapped arg encountered, or else total number of mapped TIs this task\n should have\n ", "language": "en", "n_whitespaces": 63, "n_words": 30, "vocab_size": 27 }
https://github.com/apache/airflow.git
7
convert_path
def convert_path(pathname): if os.sep == '/': return pathname if not pathname: return pathname if pathname[0] == '/': raise ValueError("path '%s' cannot be absolute" % pathname) if pathname[-1] == '/': raise ValueError("path '%s' cannot end with '/'" % pathname) paths = pathname.split('/') while os.curdir in paths: paths.remove(os.curdir) if not paths: return os.curdir return os.path.join(*paths)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
util.py
163
upd; format
12,879
0
122
93
32
62,143
53
transferlearning
11
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/util.py
Python
15
{ "docstring": "Return 'pathname' as a name that will work on the native filesystem.\n\n The path is split on '/' and put back together again using the current\n directory separator. Needed because filenames in the setup script are\n always supplied in Unix style, and have to be converted to the local\n convention before we can actually use them in the filesystem. Raises\n ValueError on non-Unix-ish systems if 'pathname' either starts or\n ends with a slash.\n ", "language": "en", "n_whitespaces": 96, "n_words": 73, "vocab_size": 60 }
https://github.com/jindongwang/transferlearning.git
5
_async_update_data
async def _async_update_data(self) -> _DataT: interval = self.update_interval if ( interval is not None and self.last_update_success and self.data and self.push_coordinator.active(interval) ): data = self.data else: data = await super()._async_update_data() return data
653805584bac4907ba6da527071bceb9d7313019
14
coordinator.py
96
Improve `lookin` generic typing (#84636)
97,031
0
139
57
23
298,084
31
core
10
homeassistant/components/lookin/coordinator.py
Python
13
{ "docstring": "Fetch data only if we have not received a push inside the interval.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/home-assistant/core.git
3
gs_distill
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False): if eps: paper_option = "-dEPSCrop" else: paper_option = "-sPAPERSIZE=%s" % ptype psfile = tmpfile + '.ps' dpi = mpl.rcParams['ps.distiller.res'] cbook._check_and_log_subprocess( [mpl._get_executable_info("gs").executable, "-dBATCH", "-dNOPAUSE", "-r%d" % dpi, "-sDEVICE=ps2write", paper_option, "-sOutputFile=%s" % psfile, tmpfile], _log) os.remove(tmpfile) shutil.move(psfile, tmpfile) # While it is best if above steps preserve the original bounding # box, there seem to be cases when it is not. For those cases, # the original bbox can be restored during the pstoeps step. if eps: # For some versions of gs, above steps result in a ps file where the # original bbox is no more correct. Do not adjust bbox for now. pstoeps(tmpfile, bbox, rotated=rotated)
383de519505964ed879c40b23ef36e90c17ebe0d
12
backend_ps.py
189
[Doc] fix more spelling and grammar
24,070
0
213
110
85
110,331
112
matplotlib
21
lib/matplotlib/backends/backend_ps.py
Python
16
{ "docstring": "\n Use ghostscript's pswrite or epswrite device to distill a file.\n This yields smaller files without illegal encapsulated postscript\n operators. The output is low-level, converting text to outlines.\n ", "language": "en", "n_whitespaces": 40, "n_words": 27, "vocab_size": 26 }
https://github.com/matplotlib/matplotlib.git
2
_get_const_info
def _get_const_info(const_index, const_list): argval = const_index if const_list is not None: argval = const_list[const_index] return argval, repr(argval)
8198943edd73a363c266633e1aa5b2a9e9c9f526
9
dis.py
49
add python 3.10.4 for windows
56,617
0
36
30
15
222,526
17
XX-Net
5
python3.10.4/Lib/dis.py
Python
5
{ "docstring": "Helper to get optional details about const references\n\n Returns the dereferenced constant and its repr if the constant\n list is defined.\n Otherwise returns the constant index and its repr().\n ", "language": "en", "n_whitespaces": 50, "n_words": 29, "vocab_size": 23 }
https://github.com/XX-net/XX-Net.git
4
draw
def draw(self, subs_dict=None): if not numpy: raise ImportError("To use this function numpy module is required") x = Symbol('x') markers = [] annotations = [] rectangles = [] node_markers = self._draw_nodes(subs_dict) markers += node_markers member_rectangles = self._draw_members() rectangles += member_rectangles support_markers = self._draw_supports() markers += support_markers load_annotations = self._draw_loads() annotations += load_annotations xmax = -INF xmin = INF ymax = -INF ymin = INF for node in list(self._node_coordinates): xmax = max(xmax, self._node_coordinates[node][0]) xmin = min(xmin, self._node_coordinates[node][0]) ymax = max(ymax, self._node_coordinates[node][1]) ymin = min(ymin, self._node_coordinates[node][1]) lim = max(xmax*1.1-xmin*0.8+1, ymax*1.1-ymin*0.8+1) if lim==xmax*1.1-xmin*0.8+1: sing_plot = plot(1, (x, 1, 1), markers=markers, show=False, annotations=annotations, xlim=(xmin-0.05*lim, xmax*1.1), ylim=(xmin-0.05*lim, xmax*1.1), axis=False, rectangles=rectangles) else: sing_plot = plot(1, (x, 1, 1), markers=markers, show=False, annotations=annotations, xlim=(ymin-0.05*lim, ymax*1.1), ylim=(ymin-0.05*lim, ymax*1.1), axis=False, rectangles=rectangles) return sing_plot
2ddc46704dffa81a5a2a8df4348bf98dff07ebd5
15
truss.py
495
subs_dict added to the draw method
49,498
0
360
359
69
200,032
122
sympy
35
sympy/physics/continuum_mechanics/truss.py
Python
30
{ "docstring": "\n Returns a plot object of the Truss with all its nodes, members,\n supports and loads.\n\n .. note::\n The user must be careful while entering load values in their\n directions. The draw function assumes a sign convention that\n is used for plotting loads.\n\n Given a right-handed coordinate system with XYZ coordinates,\n the supports are assumed to be such that the reaction forces of a\n pinned support is in the +X and +Y direction while those of a\n roller support is in the +Y direction. For the load, the range\n of angles, one can input goes all the way to 360 degrees which, in the\n the plot is the angle that the load vector makes with the positive x-axis in the anticlockwise direction.\n\n For example, for a 90-degree angle, the load will be a vertically\n directed along +Y while a 270-degree angle denotes a vertical\n load as well but along -Y.\n\n Examples\n ========\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> import math\n >>> t = Truss()\n >>> t.add_node(\"A\", -4, 0)\n >>> t.add_node(\"B\", 0, 0)\n >>> t.add_node(\"C\", 4, 0)\n >>> t.add_node(\"D\", 8, 0)\n >>> t.add_node(\"E\", 6, 2/math.sqrt(3))\n >>> t.add_node(\"F\", 2, 2*math.sqrt(3))\n >>> t.add_node(\"G\", -2, 2/math.sqrt(3))\n >>> t.add_member(\"AB\",\"A\",\"B\")\n >>> t.add_member(\"BC\",\"B\",\"C\")\n >>> t.add_member(\"CD\",\"C\",\"D\")\n >>> t.add_member(\"AG\",\"A\",\"G\")\n >>> t.add_member(\"GB\",\"G\",\"B\")\n >>> t.add_member(\"GF\",\"G\",\"F\")\n >>> t.add_member(\"BF\",\"B\",\"F\")\n >>> t.add_member(\"FC\",\"F\",\"C\")\n >>> t.add_member(\"CE\",\"C\",\"E\")\n >>> t.add_member(\"FE\",\"F\",\"E\")\n >>> t.add_member(\"DE\",\"D\",\"E\")\n >>> t.apply_support(\"A\",\"pinned\")\n >>> t.apply_support(\"D\",\"roller\")\n >>> t.apply_load(\"G\", 3, 90)\n >>> t.apply_load(\"E\", 3, 90)\n >>> t.apply_load(\"F\", 2, 90)\n >>> p = t.draw()\n >>> p\n Plot object containing:\n [0]: cartesian line: 1 for x over (1.0, 1.0)\n >>> p.show()\n ", "language": "en", "n_whitespaces": 813, "n_words": 257, "vocab_size": 156 }
https://github.com/sympy/sympy.git
17
center_crop
def center_crop(self, image, size): self._ensure_format_supported(image) if not isinstance(size, tuple): size = (size, size) # PIL Image.size is (width, height) but NumPy array and torch Tensors have (height, width) if is_torch_tensor(image) or isinstance(image, np.ndarray): if image.ndim == 2: image = self.expand_dims(image) image_shape = image.shape[1:] if image.shape[0] in [1, 3] else image.shape[:2] else: image_shape = (image.size[1], image.size[0]) top = (image_shape[0] - size[0]) // 2 bottom = top + size[0] # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result. left = (image_shape[1] - size[1]) // 2 right = left + size[1] # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result. # For PIL Images we have a method to crop directly. if isinstance(image, PIL.Image.Image): return image.crop((left, top, right, bottom)) # Check if image is in (n_channels, height, width) or (height, width, n_channels) format channel_first = True if image.shape[0] in [1, 3] else False # Transpose (height, width, n_channels) format images if not channel_first: if isinstance(image, np.ndarray): image = image.transpose(2, 0, 1) if is_torch_tensor(image): image = image.permute(2, 0, 1) # Check if cropped area is within image boundaries if top >= 0 and bottom <= image_shape[0] and left >= 0 and right <= image_shape[1]: return image[..., top:bottom, left:right] # Otherwise, we may need to pad if the image is too small. Oh joy... new_shape = image.shape[:-2] + (max(size[0], image_shape[0]), max(size[1], image_shape[1])) if isinstance(image, np.ndarray): new_image = np.zeros_like(image, shape=new_shape) elif is_torch_tensor(image): new_image = image.new_zeros(new_shape) top_pad = (new_shape[-2] - image_shape[0]) // 2 bottom_pad = top_pad + image_shape[0] left_pad = (new_shape[-1] - image_shape[1]) // 2 right_pad = left_pad + image_shape[1] new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image top += top_pad bottom += top_pad left += left_pad right += left_pad new_image = new_image[ ..., max(0, top) : min(new_image.shape[-2], bottom), max(0, left) : min(new_image.shape[-1], right) ] return new_image
49becbaa5549b477b0d96c55f207614773c0ab42
12
image_utils.py
714
Enable crop_center method to handle (W, H, C) images (#17626) * enable crop_center method to handle (W, H, C) images * minor style and comment edits
5,704
0
707
474
156
31,221
301
transformers
34
src/transformers/image_utils.py
Python
42
{ "docstring": "\n Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the\n size given, it will be padded (so the returned result has the size asked).\n\n Args:\n image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape (n_channels, height, width) or (height, width, n_channels)):\n The image to resize.\n size (`int` or `Tuple[int, int]`):\n The size to which crop the image.\n\n Returns:\n new_image: A center cropped `PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape: (n_channels,\n height, width).\n ", "language": "en", "n_whitespaces": 194, "n_words": 84, "vocab_size": 55 }
https://github.com/huggingface/transformers.git
2
input
def input(self): if not self._inbound_nodes: raise AttributeError( "Layer " + self.name + " is not connected, no input to return." ) return self._get_node_attribute_at_index(0, "input_tensors", "input")
84afc5193d38057e2e2badf9c889ea87d80d8fbf
12
base_layer.py
63
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,534
0
83
34
22
270,710
25
keras
6
keras/engine/base_layer.py
Python
6
{ "docstring": "Retrieves the input tensor(s) of a layer.\n\n Only applicable if the layer has exactly one input,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Input tensor or list of input tensors.\n\n Raises:\n RuntimeError: If called in Eager mode.\n AttributeError: If no inbound nodes are found.\n ", "language": "en", "n_whitespaces": 111, "n_words": 47, "vocab_size": 40 }
https://github.com/keras-team/keras.git
4
decrypt_file
def decrypt_file(self, file, key): # precondition assert isinstance(file, str) and isinstance(key, int) try: with open(file, "r") as fin: with open("decrypt.out", "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(line, key)) except: return False return True # Tests # crypt = XORCipher() # key = 67 # # test enrcypt # print crypt.encrypt("hallo welt",key) # # test decrypt # print crypt.decrypt(crypt.encrypt("hallo welt",key), key) # # test encrypt_string # print crypt.encrypt_string("hallo welt",key) # # test decrypt_string # print crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key) # if (crypt.encrypt_file("test.txt",key)): # print "encrypt successful" # else: # print "encrypt unsuccessful" # if (crypt.decrypt_file("encrypt.out",key)): # print "decrypt successful" # else: # print "decrypt unsuccessful"
f0af0c43340763724f139fa68aa1e5a9ffe458b4
17
XOR_cipher.py
141
refactor: clean code Signed-off-by: slowy07 <slowy.arfy@gmail.com>
4,357
0
227
70
60
22,541
106
Python
13
XORcipher/XOR_cipher.py
Python
10
{ "docstring": "\n input: filename (str) and a key (int)\n output: returns true if decrypt process was\n successful otherwise false\n if key not passed the method uses the key by the constructor.\n otherwise key = 1\n ", "language": "en", "n_whitespaces": 76, "n_words": 33, "vocab_size": 26 }
https://github.com/geekcomputers/Python.git