complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
1
9
def get_train_dataset() -> ray.data.Dataset: data_raw = load_breast_cancer(as_frame=True) df = data_raw["data"] df["target"] = data_raw["target"] return ray.data.from_pandas(df)
python/ray/ml/examples/upload_to_wandb.py
76
ray
{ "docstring": "Return the \"Breast cancer\" dataset as a Ray dataset.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
15
Python
13
5d9bf4234a038e16eaa73fed7d4ec8cef3f0038f
upload_to_wandb.py
139,252
6
43
get_train_dataset
https://github.com/ray-project/ray.git
[air] Example to track runs with Weights & Biases (#24459) This PR - adds an example on how to run Ray Train and log results to weights & biases - adds functionality to the W&B plugin to store checkpoints - fixes a bug introduced in #24017 - Adds a CI utility script to setup credentials - Adds a CI utility script to remove test state from external services cc @simon-mo
30
0
31,643
9
16
63
def where(self, other, cond, _downcast="infer") -> list[Block]: assert cond.ndim == self.ndim assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) transpose = self.ndim == 2 # EABlocks override where values = cast(np.ndarray, self.values) orig_other = other if transpose: values = values.T icond, noop = validate_putmask(values, ~cond) if noop: # GH-39595: Always return a copy; short-circuit up/downcasting return [self.copy()] if other is lib.no_default: other = self.fill_value other = self._standardize_fill_value(other) try: # try/except here is equivalent to a self._can_hold_element check, # but this gets us back 'casted' which we will re-use below; # without using 'casted', expressions.where may do unwanted upcasts. casted = np_can_hold_element(values.dtype, other) except (ValueError, TypeError, LossySetitemError): # we cannot coerce, return a compat dtype if self.ndim == 1 or self.shape[0] == 1: # no need to split columns block = self.coerce_to_target_dtype(other) blocks = block.where(orig_other, cond) return self._maybe_downcast(blocks, downcast=_downcast) else: # since _maybe_downcast would split blocks anyway, we # can avoid some potential upcast/downcast by splitting # on the front end. is_array = isinstance(other, (np.ndarray, ExtensionArray)) res_blocks = [] nbs = self._split() for i, nb in enumerate(nbs): oth = other if is_array: # we have a different value per-column oth = other[:, i : i + 1] submask = cond[:, i : i + 1] rbs = nb.where(oth, submask, _downcast=_downcast) res_blocks.extend(rbs) return res_blocks else: other = casted alt = setitem_datetimelike_compat(values, icond.sum(), other) if alt is not other: if is_list_like(other) and len(other) < len(values): # call np.where with other to get the appropriate ValueError np.where(~icond, values, other) raise NotImplementedError( "This should not be reached; call to np.where above is " "expected to raise ValueError. Please report a bug at " "github.com/pandas-dev/pandas" ) result = values.copy() np.putmask(result, icond, alt) else: # By the time we get here, we should have all Series/Index # args extracted to ndarray if ( is_list_like(other) and not isinstance(other, np.ndarray) and len(other) == self.shape[-1] ): # If we don't do this broadcasting here, then expressions.where # will broadcast a 1D other to be row-like instead of # column-like. other = np.array(other).reshape(values.shape) # If lengths don't match (or len(other)==1), we will raise # inside expressions.where, see test_series_where # Note: expressions.where may upcast. result = expressions.where(~icond, values, other) # The np_can_hold_element check _should_ ensure that we always # have result.dtype == self.dtype here. if transpose: result = result.T return [self.make_block(result)]
pandas/core/internals/blocks.py
680
pandas
{ "docstring": "\n evaluate the block; return result block(s) from the result\n\n Parameters\n ----------\n other : a ndarray/object\n cond : np.ndarray[bool], SparseArray[bool], or BooleanArray\n _downcast : str or None, default \"infer\"\n Private because we only specify it when calling from fillna.\n\n Returns\n -------\n List[Block]\n ", "language": "en", "n_whitespaces": 123, "n_words": 41, "vocab_size": 35 }
377
Python
229
769fc54897953d366d573d4e45cf13177a5f582b
blocks.py
164,842
71
422
where
https://github.com/pandas-dev/pandas.git
REF: dispatch Block.fillna to putmask/where (#45911)
1,483
0
39,610
19
4
29
def download_ffmpeg() -> Path: os_name = platform.system().lower() os_arch = platform.machine().lower() ffmpeg_url = FFMPEG_URLS.get(os_name, {}).get(os_arch) ffmpeg_path = Path( os.path.join( get_spotdl_path(), "ffmpeg" + (".exe" if os_name == "windows" else "") ) ) if ffmpeg_url is None: raise FFmpegError("FFmpeg binary is not available for your system.") # Download binary and save it to a file in spotdl directory ffmpeg_binary = requests.get(ffmpeg_url, allow_redirects=True, timeout=10).content with open(ffmpeg_path, "wb") as ffmpeg_file: ffmpeg_file.write(ffmpeg_binary) # Set executable permission on linux and mac if os_name in ["linux", "darwin"]: ffmpeg_path.chmod(ffmpeg_path.stat().st_mode | stat.S_IEXEC) return ffmpeg_path
spotdl/utils/ffmpeg.py
249
spotify-downloader
{ "docstring": "\n Download ffmpeg binary to spotdl directory.\n\n ### Returns\n - Path to ffmpeg binary.\n\n ### Notes\n - ffmpeg is downloaded from github releases\n for current platform and architecture.\n - executable permission is set for ffmpeg binary.\n ", "language": "en", "n_whitespaces": 64, "n_words": 35, "vocab_size": 25 }
84
Python
68
06a84e0400b7f7f847a7a7d06eedba766cdbced3
ffmpeg.py
30,469
28
143
download_ffmpeg
https://github.com/spotDL/spotify-downloader.git
added option to preserve original audio
169
0
5,607
15
14
29
def dis(x=None, *, file=None, depth=None): if x is None: distb(file=file) return # Extract functions from methods. if hasattr(x, '__func__'): x = x.__func__ # Extract compiled code objects from... if hasattr(x, '__code__'): # ...a function, or x = x.__code__ elif hasattr(x, 'gi_code'): #...a generator object, or x = x.gi_code elif hasattr(x, 'ag_code'): #...an asynchronous generator object, or x = x.ag_code elif hasattr(x, 'cr_code'): #...a coroutine. x = x.cr_code # Perform the disassembly. if hasattr(x, '__dict__'): # Class or module items = sorted(x.__dict__.items()) for name, x1 in items: if isinstance(x1, _have_code): print("Disassembly of %s:" % name, file=file) try: dis(x1, file=file, depth=depth) except TypeError as msg: print("Sorry:", msg, file=file) print(file=file) elif hasattr(x, 'co_code'): # Code object _disassemble_recursive(x, file=file, depth=depth) elif isinstance(x, (bytes, bytearray)): # Raw bytecode _disassemble_bytes(x, file=file) elif isinstance(x, str): # Source code _disassemble_str(x, file=file, depth=depth) else: raise TypeError("don't know how to disassemble %s objects" % type(x).__name__)
python3.10.4/Lib/dis.py
413
XX-Net
{ "docstring": "Disassemble classes, methods, functions, and other compiled objects.\n\n With no argument, disassemble the last traceback.\n\n Compiled objects currently include generator objects, async generator\n objects, and coroutine objects, all of which store their code object\n in a special attribute.\n ", "language": "en", "n_whitespaces": 53, "n_words": 38, "vocab_size": 34 }
145
Python
96
8198943edd73a363c266633e1aa5b2a9e9c9f526
dis.py
222,529
33
249
dis
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
421
0
56,619
17
1
15
def test_loadtxt_converter_with_unicode_dtype(): txt = StringIO('abc,def\nrst,xyz') conv = bytes.upper res = np.loadtxt(txt, dtype=np.dtype("U3"), converters=conv, delimiter=",") expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']]) assert_equal(res, expected)
numpy/lib/tests/test_io.py
118
numpy
{ "docstring": "\n With the default 'bytes' encoding, tokens are encoded prior to being passed\n to the converter. This means that the output of the converter may be bytes\n instead of unicode as expected by `read_rows`.\n\n This test checks that outputs from the above scenario are properly decoded\n prior to parsing by `read_rows`.\n ", "language": "en", "n_whitespaces": 69, "n_words": 50, "vocab_size": 37 }
22
Python
19
66a61b03658f3c9f312505dcf7eab07e4cf91ac6
test_io.py
159,776
6
67
test_loadtxt_converter_with_unicode_dtype
https://github.com/numpy/numpy.git
Port over tests from npreadtext test suite - Add test for parsing scientific notation. - Add multiple-char comment test. - Port over tests for structured dtypes. - Add tests for exceptions on skiprows/max_rows. - port over ndmin tests. - Make structured data reusable, add unpack tests. - Port over delimiter tests. - Port over maxrows test w/ various dtypes. - Port over test of exception msg on parse failure. - Port over test for converters w/neg indices. - Port over usecols tests - Port over unicode tests. - Port over more converter tests. - Port over test for large rows. - Port over test for string-len discovery. - Port over float conversion accuracy test. - Port over bool test. - Add test for implicit float->int conversion. - Port over complex parsing tests. - Port over tests for reading from generator. - Port over object cleanup test. - Port over bytes incompat test. - Port over converters tests. Co-authored-by: Warren Weckesser <warren.weckesser@gmail.com> Co-authored-by: Sebastian Berg <sebastian@sipsolutions.net>
40
0
38,421
12
10
35
def get_batch(self, queue): exhausted = False batch = {} idx = 0 while idx < self.batchsize: item = self._collect_item(queue) if item == "EOF": logger.trace("EOF received") exhausted = True break # Put frames with no faces into the out queue to keep TQDM consistent if not item.detected_faces: self._queues["out"].put(item) continue converted_image = item.get_image_copy(self.color_format) for f_idx, face in enumerate(item.detected_faces): batch.setdefault("image", []).append(converted_image) batch.setdefault("detected_faces", []).append(face) batch.setdefault("filename", []).append(item.filename) idx += 1 if idx == self.batchsize: frame_faces = len(item.detected_faces) if f_idx + 1 != frame_faces: self._rollover = ExtractMedia( item.filename, item.image, detected_faces=item.detected_faces[f_idx + 1:]) logger.trace("Rolled over %s faces of %s to next batch for '%s'", len(self._rollover.detected_faces), frame_faces, item.filename) break if batch: logger.trace("Returning batch: %s", {k: v.shape if isinstance(v, np.ndarray) else v for k, v in batch.items()}) else: logger.trace(item) return exhausted, batch
plugins/extract/align/_base.py
416
faceswap
{ "docstring": " Get items for inputting into the aligner from the queue in batches\n\n Items are returned from the ``queue`` in batches of\n :attr:`~plugins.extract._base.Extractor.batchsize`\n\n Items are received as :class:`~plugins.extract.pipeline.ExtractMedia` objects and converted\n to ``dict`` for internal processing.\n\n To ensure consistent batch sizes for aligner the items are split into separate items for\n each :class:`~lib.align.DetectedFace` object.\n\n Remember to put ``'EOF'`` to the out queue after processing\n the final batch\n\n Outputs items in the following format. All lists are of length\n :attr:`~plugins.extract._base.Extractor.batchsize`:\n\n >>> {'filename': [<filenames of source frames>],\n >>> 'image': [<source images>],\n >>> 'detected_faces': [[<lib.align.DetectedFace objects]]}\n\n Parameters\n ----------\n queue : queue.Queue()\n The ``queue`` that the plugin will be fed from.\n\n Returns\n -------\n exhausted, bool\n ``True`` if queue is exhausted, ``False`` if not\n batch, dict\n A dictionary of lists of :attr:`~plugins.extract._base.Extractor.batchsize`:\n ", "language": "en", "n_whitespaces": 309, "n_words": 126, "vocab_size": 86 }
123
Python
91
5e73437be47f2410439a3c6716de96354e6a0c94
_base.py
101,242
36
255
get_batch
https://github.com/deepfakes/faceswap.git
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
721
0
20,662
20
2
7
def test_sent_event_end_up_in_room_state(self) -> None: event_type = "org.matrix.test_state" # This content will be updated later on, and since we actually use a reference on # the dict it does the right thing. It's a bit hacky but a handy way of making # sure the state actually gets updated. event_content = {"i": -1} api = self.hs.get_module_api() # Define a callback that sends a custom event on power levels update.
tests/rest/client/test_third_party_rules.py
57
synapse
{ "docstring": "Tests that a state event sent by a module while processing another state event\n doesn't get dropped from the state of the room. This is to guard against a bug\n where Synapse has been observed doing so, see https://github.com/matrix-org/synapse/issues/10830\n ", "language": "en", "n_whitespaces": 60, "n_words": 39, "vocab_size": 33 }
68
Python
55
2ffaf30803f93273a4d8a65c9e6c3110c8433488
test_third_party_rules.py
247,332
20
118
test_sent_event_end_up_in_room_state
https://github.com/matrix-org/synapse.git
Add type hints to `tests/rest/client` (#12108) * Add type hints to `tests/rest/client` * newsfile * fix imports * add `test_account.py` * Remove one type hint in `test_report_event.py` * change `on_create_room` to `async` * update new functions in `test_third_party_rules.py` * Add `test_filter.py` * add `test_rooms.py` * change to `assertEquals` to `assertEqual` * lint
124
0
71,598
9
1
6
def image2hmap(self, image_tensor): return self.model.forward(image_tensor, training=False)
easyocr/DBNet/DBNet.py
34
EasyOCR
{ "docstring": "\n Run the model to obtain a heatmap tensor from a image tensor. The heatmap\n tensor indicates the probability of each pixel being a part of text area.\n\n Parameters\n ----------\n image_tensor : torch.tensor\n Image tensor.\n\n Returns\n -------\n torch.tensor\n Probability heatmap tensor.\n ", "language": "en", "n_whitespaces": 126, "n_words": 40, "vocab_size": 30 }
6
Python
6
803b90729d25fda253011c505d0189e8e63cc039
DBNet.py
123,112
2
21
image2hmap
https://github.com/JaidedAI/EasyOCR.git
add dbnet
28
0
27,289
8
3
10
def get_permission_required(self): if self.permission_required is None: raise ImproperlyConfigured( "{0} is missing the permission_required attribute. Define {0}.permission_required, or override " "{0}.get_permission_required().".format(self.__class__.__name__) ) if isinstance(self.permission_required, str): perms = (self.permission_required,) else: perms = self.permission_required return perms
django/contrib/auth/mixins.py
94
django
{ "docstring": "\n Override this method to override the permission_required attribute.\n Must return an iterable.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
33
Python
27
9c19aff7c7561e3a82978a272ecdaad40dda5c00
mixins.py
203,671
11
55
get_permission_required
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
142
0
50,504
13
1
4
def pre_validation(self, config, key_name):
mkdocs/config/base.py
17
mkdocs
{ "docstring": "\n Before all options are validated, perform a pre-validation process.\n\n The pre-validation process method should be implemented by subclasses.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
4
Python
4
9c0a8e50b11b70f803500cd73e7256b63f64b5e3
base.py
224,546
1
10
pre_validation
https://github.com/mkdocs/mkdocs.git
Move some documentation into code, add misc API docs page (#2934)
11
0
57,334
6
6
7
def withEnvironmentVarOverridden(env_var_name, value): if env_var_name in os.environ: old_value = os.environ[env_var_name] else: old_value = None if value is not None: os.environ[env_var_name] = value elif old_value is not None: del os.environ[env_var_name] yield if old_value is None: if value is not None: del os.environ[env_var_name] else: os.environ[env_var_name] = old_value @contextmanager
nuitka/utils/Execution.py
137
@contextmanager
Nuitka
{ "docstring": "Change an environment and restore it after context.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
46
Python
20
bf42800f1ddfae3563a20d9e4fbc79265e8d5209
Execution.py
178,518
15
84
withEnvironmentVarOverridden
https://github.com/Nuitka/Nuitka.git
Minor cleanups * Typo cleanups
122
1
42,726
11
3
25
def forward(self, data, optimizer=None, return_loss=False, **kwargs): batch_inputs, batch_data_samples = self.preprocss_data(data) if torch.onnx.is_in_onnx_export(): # TODO: Delete assert len(batch_inputs) == 1 return self.onnx_export(batch_inputs, batch_data_samples) if return_loss: losses = self.forward_train(batch_inputs, batch_data_samples, **kwargs) loss, log_vars = self._parse_losses(losses) outputs = dict( loss=loss, log_vars=log_vars, num_samples=len(batch_data_samples)) return outputs else: # TODO: refactor and support aug test later assert isinstance(data[0]['inputs'], torch.Tensor), \ 'Only support simple test currently. Aug-test is ' \ 'not supported yet' return self.forward_simple_test(batch_inputs, batch_data_samples, **kwargs)
mmdet/models/detectors/base.py
209
mmdetection
{ "docstring": "The iteration step during training and testing. This method defines\n an iteration step during training and testing, except for the back\n propagation and optimizer updating during training, which are done in\n an optimizer hook.\n\n Args:\n data (list[dict]): The output of dataloader.\n optimizer (:obj:`torch.optim.Optimizer`, dict, Optional): The\n optimizer of runner. This argument is unused and reserved.\n Default to None.\n return_loss (bool): Whether to return loss. In general,\n it will be set to True during training and False\n during testing. Default to False.\n\n Returns:\n during training\n dict: It should contain at least 3 keys: ``loss``,\n ``log_vars``, ``num_samples``.\n - ``loss`` is a tensor for back propagation, which can be a\n weighted sum of multiple losses.\n - ``log_vars`` contains all the variables to be sent to the\n logger.\n - ``num_samples`` indicates the batch size (when the model\n is DDP, it means the batch size on each GPU), which is\n used for averaging the logs.\n\n during testing\n list(obj:`DetDataSample`): Detection results of the\n input images. Each DetDataSample usually contains\n ``pred_instances`` or ``pred_panoptic_seg`` or\n ``pred_sem_seg``.\n ", "language": "en", "n_whitespaces": 562, "n_words": 168, "vocab_size": 111 }
70
Python
55
924c381a78eb70cede198e042ef34e038e05c15a
base.py
244,473
20
135
forward
https://github.com/open-mmlab/mmdetection.git
Modify RetinaNet model interface
372
0
70,395
13
10
18
def matches(self, expr, repl_dict=None, old=False): expr = sympify(expr) if not isinstance(expr, self.__class__): return None if repl_dict is None: repl_dict = {} else: repl_dict = repl_dict.copy() if self == expr: return repl_dict if len(self.args) != len(expr.args): return None d = repl_dict # already a copy for arg, other_arg in zip(self.args, expr.args): if arg == other_arg: continue if arg.is_Relational: try: d = arg.xreplace(d).matches(other_arg, d, old=old) except TypeError: # Should be InvalidComparisonError when introduced d = None else: d = arg.xreplace(d).matches(other_arg, d, old=old) if d is None: return None return d
sympy/core/basic.py
260
sympy
{ "docstring": "\n Helper method for match() that looks for a match between Wild symbols\n in self and expressions in expr.\n\n Examples\n ========\n\n >>> from sympy import symbols, Wild, Basic\n >>> a, b, c = symbols('a b c')\n >>> x = Wild('x')\n >>> Basic(a + x, x).matches(Basic(a + b, c)) is None\n True\n >>> Basic(a + x, x).matches(Basic(a + b + c, b + c))\n {x_: b + c}\n ", "language": "en", "n_whitespaces": 151, "n_words": 66, "vocab_size": 45 }
88
Python
52
9d58006fc0a23afcba38f641c9472917c436428a
basic.py
198,467
26
164
matches
https://github.com/sympy/sympy.git
Code cleanup
375
0
48,951
16
1
3
def supported_features(self): return self._attr_supported_features
homeassistant/components/xiaomi_miio/vacuum.py
19
core
{ "docstring": "Flag vacuum cleaner robot features that are supported.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
4
Python
4
424731863423f7b7cd2e29f418ddbce01418828b
vacuum.py
298,521
2
10
supported_features
https://github.com/home-assistant/core.git
Use VacuumEntityFeature in xiaomi_miio (#70564)
18
0
97,465
6
1
2
def overlaying(self): return self["overlaying"]
packages/python/plotly/plotly/graph_objs/layout/_xaxis.py
22
plotly.py
{ "docstring": "\n If set a same-letter axis id, this axis is overlaid on top of\n the corresponding same-letter axis, with traces and axes\n visible for both axes. If False, this axis does not overlay any\n same-letter axes. In this case, for axes with overlapping\n domains only the highest-numbered axis will be visible.\n\n The 'overlaying' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['free']\n - A string that matches one of the following regular expressions:\n ['^x([2-9]|[1-9][0-9]+)?( domain)?$',\n '^y([2-9]|[1-9][0-9]+)?( domain)?$']\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 221, "n_words": 87, "vocab_size": 64 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_xaxis.py
231,821
2
11
overlaying
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
63,265
7
19
24
def match_hostname(cert, hostname): if not cert: raise ValueError( "empty or no certificate, match_hostname needs a " "SSL socket or SSL context with either " "CERT_OPTIONAL or CERT_REQUIRED" ) try: # Divergence from upstream: ipaddress can't handle byte str host_ip = ipaddress.ip_address(_to_unicode(hostname)) except (UnicodeError, ValueError): # ValueError: Not an IP address (common case) # UnicodeError: Divergence from upstream: Have to deal with ipaddress not taking # byte strings. addresses should be all ascii, so we consider it not # an ipaddress in this case host_ip = None except AttributeError: # Divergence from upstream: Make ipaddress library optional if ipaddress is None: host_ip = None else: # Defensive raise dnsnames = [] san = cert.get("subjectAltName", ()) for key, value in san: if key == "DNS": if host_ip is None and _dnsname_match(value, hostname): return dnsnames.append(value) elif key == "IP Address": if host_ip is not None and _ipaddress_match(value, host_ip): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get("subject", ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == "commonName": if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError( "hostname %r " "doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames))) ) elif len(dnsnames) == 1: raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError( "no appropriate commonName or subjectAltName fields were found" )
pipenv/patched/notpip/_vendor/urllib3/util/ssl_match_hostname.py
401
pipenv
{ "docstring": "Verify that *cert* (in decoded format as returned by\n SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125\n rules are followed, but IP addresses are not accepted for *hostname*.\n\n CertificateError is raised on failure. On success, the function\n returns nothing.\n ", "language": "en", "n_whitespaces": 56, "n_words": 40, "vocab_size": 36 }
244
Python
147
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
ssl_match_hostname.py
21,627
45
230
match_hostname
https://github.com/pypa/pipenv.git
Vendor in pip 22.1.2
715
0
3,963
15
3
26
def test_include(self) -> None: # Thread 1 has the user as the root event. thread_1 = self.parent_id self._send_relation( RelationTypes.THREAD, "m.room.test", access_token=self.user2_token ) # Thread 2 has the user replying. res = self.helper.send(self.room, body="Thread Root!", tok=self.user2_token) thread_2 = res["event_id"] self._send_relation(RelationTypes.THREAD, "m.room.test", parent_id=thread_2) # Thread 3 has the user not participating in. res = self.helper.send(self.room, body="Another thread!", tok=self.user2_token) thread_3 = res["event_id"] self._send_relation( RelationTypes.THREAD, "m.room.test", access_token=self.user2_token, parent_id=thread_3, ) # All threads in the room. channel = self.make_request( "GET", f"/_matrix/client/unstable/org.matrix.msc3856/rooms/{self.room}/threads", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) thread_roots = [ev["event_id"] for ev in channel.json_body["chunk"]] self.assertEqual( thread_roots, [thread_3, thread_2, thread_1], channel.json_body ) # Only participated threads. channel = self.make_request( "GET", f"/_matrix/client/unstable/org.matrix.msc3856/rooms/{self.room}/threads?include=participated", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) thread_roots = [ev["event_id"] for ev in channel.json_body["chunk"]] self.assertEqual(thread_roots, [thread_2, thread_1], channel.json_body)
tests/rest/client/test_relations.py
409
synapse
{ "docstring": "Filtering threads to all or participated in should work.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
120
Python
70
3bbe532abb7bfc41467597731ac1a18c0331f539
test_relations.py
249,645
35
252
test_include
https://github.com/matrix-org/synapse.git
Add an API for listing threads in a room. (#13394) Implement the /threads endpoint from MSC3856. This is currently unstable and behind an experimental configuration flag. It includes a background update to backfill data, results from the /threads endpoint will be partial until that finishes.
441
0
73,041
11
2
9
def shutdown(self): logger.info("Stopping FreqAI") self._stop_event.set() logger.info("Waiting on Training iteration") for _thread in self._threads: _thread.join()
freqtrade/freqai/freqai_interface.py
68
freqtrade
{ "docstring": "\n Cleans up threads on Shutdown, set stop event. Join threads to wait\n for current training iteration.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
14
Python
14
dae3b3d86adde0f6c7065ce1d083d9ceac62e5ef
freqai_interface.py
151,037
6
37
shutdown
https://github.com/freqtrade/freqtrade.git
support shutting down freqai
60
0
34,928
9
6
16
def render_annotated(self, context): try: return self.render(context) except Exception as e: if context.template.engine.debug: # Store the actual node that caused the exception. if not hasattr(e, "_culprit_node"): e._culprit_node = self if ( not hasattr(e, "template_debug") and context.render_context.template.origin == e._culprit_node.origin ): e.template_debug = ( context.render_context.template.get_exception_info( e, e._culprit_node.token, ) ) raise
django/template/base.py
154
django
{ "docstring": "\n Render the node. If debug is True and an exception occurs during\n rendering, the exception is annotated with contextual line information\n where it occurred in the template. For internal usage this method is\n preferred over using the render method directly.\n ", "language": "en", "n_whitespaces": 76, "n_words": 40, "vocab_size": 33 }
47
Python
39
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
206,196
18
94
render_annotated
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
356
0
51,403
18
8
15
def filter_empty_gradients(grads_and_vars): grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = [] for grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: variable = ([v.name for _, v in grads_and_vars],) raise ValueError( f"No gradients provided for any variable: {variable}. " f"Provided `grads_and_vars` is {grads_and_vars}." ) if vars_with_empty_grads: logging.warning( ( "Gradients do not exist for variables %s when minimizing the loss. " "If you're using `model.compile()`, did you forget to provide a `loss`" "argument?" ), ([v.name for v in vars_with_empty_grads]), ) return filtered
keras/optimizers/optimizer_v2/utils.py
203
keras
{ "docstring": "Filter out `(grad, var)` pairs that have a gradient equal to `None`.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
95
Python
69
84afc5193d38057e2e2badf9c889ea87d80d8fbf
utils.py
275,623
28
118
filter_empty_gradients
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
303
0
81,435
13
1
3
def expected_parameters(self): raise NotImplementedError( "subclasses of ListFilter must provide an expected_parameters() method" )
django/contrib/admin/filters.py
23
django
{ "docstring": "\n Return the list of parameter names that are expected from the\n request's query string and that will be used by this filter.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 20 }
13
Python
13
9c19aff7c7561e3a82978a272ecdaad40dda5c00
filters.py
203,385
4
11
expected_parameters
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
45
0
50,351
8
1
7
def euler_poly(n, x=None, polys=False): r return named_poly(n, dup_euler, QQ, "Euler polynomial", (x,), polys)
sympy/polys/appellseqs.py
47
sympy
{ "docstring": "Generates the Euler polynomial `\\operatorname{E}_n(x)`.\n\n These are scaled and reindexed versions of the Genocchi polynomials:\n\n .. math :: \\operatorname{E}_n(x) = -\\frac{\\operatorname{G}_{n+1}(x)}{n+1}\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n\n See Also\n ========\n\n sympy.functions.combinatorial.numbers.euler\n ", "language": "en", "n_whitespaces": 98, "n_words": 51, "vocab_size": 44 }
13
Python
13
75e3143a934c3427f39b82613d77f6d6f55e00b4
appellseqs.py
199,851
22
33
euler_poly
https://github.com/sympy/sympy.git
Varying Dirichlet L-series for evalfing the André function
18
0
49,404
8
1
15
def forward(self, img): i, j, h, w = self.get_params(img, self.scale, self.ratio) return F.resized_crop(img, i, j, h, w, self.size, self.interpolation, antialias=self.antialias)
torchvision/transforms/transforms.py
82
vision
{ "docstring": "\n Args:\n img (PIL Image or Tensor): Image to be cropped and resized.\n\n Returns:\n PIL Image or Tensor: Randomly cropped and resized image.\n ", "language": "en", "n_whitespaces": 66, "n_words": 22, "vocab_size": 17 }
20
Python
17
a5536de95d8e703645e391f4bd885ef489ab35bd
transforms.py
192,920
3
59
forward
https://github.com/pytorch/vision.git
Added antialias arg to resized crop transform and op (#6193)
41
0
46,921
9
1
2
def pullsrc(self): return self["pullsrc"]
packages/python/plotly/plotly/graph_objs/_pie.py
22
plotly.py
{ "docstring": "\n Sets the source reference on Chart Studio Cloud for `pull`.\n\n The 'pullsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 77, "n_words": 27, "vocab_size": 25 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_pie.py
227,572
2
11
pullsrc
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
59,245
7
3
35
def _generate_input_dict(B, T, obs_space, action_space): # generate deterministic inputs # obs obs = np.arange(B * T * obs_space.shape[0], dtype=np.float32).reshape( (B, T, obs_space.shape[0]) ) # actions if isinstance(action_space, gym.spaces.Box): act = np.arange(B * T * action_space.shape[0], dtype=np.float32).reshape( (B, T, action_space.shape[0]) ) else: act = np.mod(np.arange(B * T, dtype=np.int32).reshape((B, T)), action_space.n) # returns to go rtg = np.arange(B * (T + 1), dtype=np.float32).reshape((B, T + 1, 1)) # timesteps timesteps = np.stack([np.arange(T, dtype=np.int32) for _ in range(B)], axis=0) # attention mask mask = np.ones((B, T), dtype=np.float32) input_dict = SampleBatch( { SampleBatch.OBS: obs, SampleBatch.ACTIONS: act, SampleBatch.RETURNS_TO_GO: rtg, SampleBatch.T: timesteps, SampleBatch.ATTENTION_MASKS: mask, } ) input_dict = convert_to_torch_tensor(input_dict) return input_dict
rllib/algorithms/dt/tests/test_dt_model.py
395
ray
{ "docstring": "Generate input_dict that has completely fake values.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
105
Python
70
61880591e97c2760d929c16263e039d503b1b9a9
test_dt_model.py
127,144
24
266
_generate_input_dict
https://github.com/ray-project/ray.git
[RLlib] Add DTTorchModel (#27872)
267
0
28,371
17
1
3
async def test_empty_integrations_list_is_only_sent_at_the_end_of_bootstrap(hass): order = []
tests/test_bootstrap.py
21
core
{ "docstring": "Test empty integrations list is only sent at the end of bootstrap.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
6
Python
6
58d531841bc2197616e7285d0561a9935d9bda73
test_bootstrap.py
288,740
35
168
test_empty_integrations_list_is_only_sent_at_the_end_of_bootstrap
https://github.com/home-assistant/core.git
Fix typo SIGNAL_BOOTSTRAP_INTEGRATONS -> SIGNAL_BOOTSTRAP_INTEGRATIONS (#79970)
12
0
87,893
7
2
19
def test_run_glue_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f.split() if is_cuda_and_apex_available(): testargs.append("--fp16") run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.75) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "glue_no_trainer")))
examples/pytorch/test_accelerate_examples.py
180
transformers
{ "docstring": "\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ", "language": "en", "n_whitespaces": 145, "n_words": 16, "vocab_size": 16 }
23
Python
20
99eb9b523f9b9ea6096323ce5610ce6633acc88a
test_accelerate_examples.py
32,328
22
102
test_run_glue_no_trainer
https://github.com/huggingface/transformers.git
Fix `no_trainer` CI (#18242) * Fix all tests
89
0
5,902
12
6
12
def get_geom_placeholder(self, f, value, compiler): transform_func = self.spatial_function_name("Transform") if hasattr(value, "as_sql"): if value.field.srid == f.srid: placeholder = "%s" else: placeholder = "%s(%%s, %s)" % (transform_func, f.srid) return placeholder # Get the srid for this object if value is None: value_srid = None else: value_srid = value.srid # Adding Transform() to the SQL placeholder if the value srid # is not equal to the field srid. if value_srid is None or value_srid == f.srid: placeholder = "%s" else: placeholder = "%s(%%s, %s)" % (transform_func, f.srid) return placeholder
django/contrib/gis/db/backends/postgis/operations.py
174
django
{ "docstring": "\n Provide a proper substitution value for Geometries or rasters that are\n not in the SRID of the field. Specifically, this routine will\n substitute in the ST_Transform() function call.\n ", "language": "en", "n_whitespaces": 57, "n_words": 28, "vocab_size": 25 }
86
Python
45
9c19aff7c7561e3a82978a272ecdaad40dda5c00
operations.py
203,843
17
101
get_geom_placeholder
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
270
0
50,554
14
1
4
def truncate(self, size=None): # type: (Optional[int]) -> int return self._file.truncate(size)
.venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py
33
transferlearning
{ "docstring": "Resize the stream to the given size in bytes.\n\n If size is unspecified resize to the current position.\n The current stream position isn't changed.\n\n Return the new file size.\n ", "language": "en", "n_whitespaces": 57, "n_words": 29, "vocab_size": 22 }
10
Python
10
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
lazy_wheel.py
60,887
2
19
truncate
https://github.com/jindongwang/transferlearning.git
upd; format
31
0
12,321
8
4
31
def test_dqn_compilation(self): num_iterations = 1 config = dqn.dqn.DQNConfig().rollouts(num_rollout_workers=2) for _ in framework_iterator(config, with_eager_tracing=True): # Double-dueling DQN. print("Double-dueling") plain_config = deepcopy(config) trainer = dqn.DQNTrainer(config=plain_config, env="CartPole-v0") for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) check_compute_single_action(trainer) trainer.stop() # Rainbow. print("Rainbow") rainbow_config = deepcopy(config).training( num_atoms=10, noisy=True, double_q=True, dueling=True, n_step=5 ) trainer = dqn.DQNTrainer(config=rainbow_config, env="CartPole-v0") for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) check_compute_single_action(trainer) trainer.stop()
rllib/agents/dqn/tests/test_dqn.py
286
ray
{ "docstring": "Test whether a DQNTrainer can be built on all frameworks.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
62
Python
40
5c96e7223b468fed6b6db763c837728c721f78cd
test_dqn.py
139,548
24
172
test_dqn_compilation
https://github.com/ray-project/ray.git
[RLlib] SimpleQ (minor cleanups) and DQN TrainerConfig objects. (#24584)
360
0
31,730
12
2
6
def on_test_end(self, logs=None): logs = self._process_logs(logs) for callback in self.callbacks: callback.on_test_end(logs)
keras/callbacks.py
51
keras
{ "docstring": "Calls the `on_test_end` methods of its callbacks.\n\n Args:\n logs: Dict. Currently, no data is passed via this argument\n for this method, but that may change in the future.\n ", "language": "en", "n_whitespaces": 66, "n_words": 28, "vocab_size": 26 }
11
Python
11
84afc5193d38057e2e2badf9c889ea87d80d8fbf
callbacks.py
269,921
4
31
on_test_end
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
43
0
80,332
9
5
22
def _add_contourf_set(self, cset, zdir='z', offset=None): zdir = '-' + zdir midpoints = cset.levels[:-1] + np.diff(cset.levels) / 2 # Linearly interpolate to get levels for any extensions if cset._extend_min: min_level = cset.levels[0] - np.diff(cset.levels[:2]) / 2 midpoints = np.insert(midpoints, 0, min_level) if cset._extend_max: max_level = cset.levels[-1] + np.diff(cset.levels[-2:]) / 2 midpoints = np.append(midpoints, max_level) for z, linec in zip(midpoints, cset.collections): if offset is not None: z = offset art3d.poly_collection_2d_to_3d(linec, z, zdir=zdir) linec.set_sort_zpos(z) return midpoints
lib/mpl_toolkits/mplot3d/axes3d.py
257
matplotlib
{ "docstring": "\n Returns\n -------\n levels : `numpy.ndarray`\n Levels at which the filled contours are added.\n ", "language": "en", "n_whitespaces": 53, "n_words": 13, "vocab_size": 13 }
73
Python
52
df6f95703b60348e01603f98a439b133da2938a0
axes3d.py
109,939
15
165
_add_contourf_set
https://github.com/matplotlib/matplotlib.git
Improve mpl_toolkit documentation
221
0
23,846
15
3
25
def forward(self, input_ids=None, attention_mask=None): if input_ids is None: raise ValueError("Input_ids cannot be None.") inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale inputs_embed_pos = self.encoder_embed_positions(input_ids.shape) hidden_states = inputs_embeds + inputs_embed_pos hidden_states = self.encoder_layernorm_embedding(hidden_states) encoder_input = self.encoder_dropout(hidden_states) if attention_mask is None: attention_mask = paddle.cast( input_ids == self.pad_token_id, dtype=paddle.get_default_dtype()).unsqueeze([1, 2]) * -1e4 attention_mask.stop_gradient = True encoder_output = self.encoder(encoder_input, src_mask=attention_mask) return encoder_output
paddlenlp/transformers/blenderbot_small/modeling.py
196
PaddleNLP
{ "docstring": "\n Returns:\n Tensor: The last hidden-states at the last layer of the encoder.\n It's data type should be `float` and has a shape of `(batch_size, seq_lens, hidden_size)`.\n ``seq_lens`` corresponds to the length of input sequence.\n ", "language": "en", "n_whitespaces": 82, "n_words": 34, "vocab_size": 29 }
55
Python
38
b0c35d5e1ff02a634fa26392b60d3885c2c78677
modeling.py
322,093
15
123
forward
https://github.com/PaddlePaddle/PaddleNLP.git
Fix the attention mask for fp16 (#1585)
188
0
118,053
16
1
2
def no_translations(handle_func):
django/core/management/base.py
13
django
{ "docstring": "Decorator that forces a command to run with translations deactivated.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
2
Python
2
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
204,594
3
10
no_translations
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
5
0
50,805
6
4
4
def get_return_data_type(func_name): if func_name.startswith("get_"): if func_name.endswith("_list"): return "List" elif func_name.endswith("_count"): return "Integer" return ""
django/contrib/admindocs/views.py
73
django
{ "docstring": "Return a somewhat-helpful data type given a function name", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
14
Python
11
9c19aff7c7561e3a82978a272ecdaad40dda5c00
views.py
203,580
7
36
get_return_data_type
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
59
0
50,457
11
1
27
def print_ir(*prototypes): def lower(f): inputs = tree_util.tree_map(np.array, prototypes) flat_inputs, _ = tree_util.tree_flatten(inputs) shape_strs = " ".join([f"{x.dtype.name}[{','.join(map(str, x.shape))}]" for x in flat_inputs]) name = f.func.__name__ if hasattr(f, "func") else f.__name__ print(f"\nTEST: {name} {shape_strs}") print(jax.jit(f).lower(*inputs).compiler_ir()) return lower
tests/filecheck/jax_filecheck_helpers.py
198
jax
{ "docstring": "Prints the MLIR IR that results from lowering `f`.\n\n The arguments to `f` are taken to be arrays shaped like `prototypes`.", "language": "en", "n_whitespaces": 23, "n_words": 21, "vocab_size": 20 }
35
Python
31
b8ae8e3fa10f9abe998459fac1513915acee776d
jax_filecheck_helpers.py
122,864
3
10
print_ir
https://github.com/google/jax.git
(NFC) Prepare for migration from producing MHLO to producing StableHLO This CL renames occurrences of "mhlo" in: 1) names, 2) tests, 3) prose in order to prepare for the upcoming migration. Unchanged occurrences: 1) Public API that contains "mhlo", e.g. XlaLowering.mhlo and the "mhlo" argument value in Lowering.as_text and Lowering.compiler_ir. 2) Documentation (changelog, JEPs, IR examples, etc). 3) One rare situation where prose says "StableHLO" and "MHLO" in one sentence, so both are necessary to disambiguate. PiperOrigin-RevId: 495771153
84
0
27,259
18
2
16
def test_extract_strings_with_rollout(rollout_option, option_value, expected, set_sentry_option): if rollout_option: set_sentry_option(rollout_option, option_value) outer_message = _construct_outer_message( [ (counter_payload, []), (counter_payload_org_70, []), (distribution_payload, []), (set_payload, []), ] ) batch = IndexerBatch(UseCaseKey.PERFORMANCE, outer_message, rollout_option) assert batch.extract_strings() == expected
tests/sentry/sentry_metrics/test_batch.py
111
sentry
{ "docstring": "\n Test that the indexer batch extracts the correct strings from the messages\n based on the rollout option name and the option value.\n ", "language": "en", "n_whitespaces": 32, "n_words": 22, "vocab_size": 17 }
32
Python
28
245b174c30ba3d814043221faa327b1a14d64859
test_batch.py
85,988
13
76
test_extract_strings_with_rollout
https://github.com/getsentry/sentry.git
feat(indexer): Allow mechanism to not index tag values (#38837) This PR adds mechanism to skip indexing tag values. The code is based on an option introduced in https://github.com/getsentry/sentry/pull/38758/ After the change, when the option is configured, the indexer on performance would send strings for tag values
115
0
18,068
11