id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
294,047
11
8
4
45
6
0
12
33
channels
Seperate emonitor extra_state_attributes into their own sensors (#68479)
https://github.com/home-assistant/core.git
def channels(self) -> dict[int, EmonitorChannel]: channels: dict[int, EmonitorChannel] = self.emonitor_status.channels return channels
29
sensor.py
Python
homeassistant/components/emonitor/sensor.py
cb011570e81c2e9fcbf39e30b76dd9f441ce5264
core
1
160,620
225
21
67
908
57
0
421
1,175
in1d
TST: Extend np.in1d tests to old algorithm - Add flag ``_slow_integer`` to np.isin/np.in1d to force the use of the old isin/in1d algorithm for integers.
https://github.com/numpy/numpy.git
def in1d(ar1, ar2, assume_unique=False, invert=False, _slow_integer=None): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # Ensure that iteration through object arrays yields size-1 arrays if ar2.dtype == object: ar2 = ar2.reshape(-1, 1) # Convert booleans to uint8 so we can use the fast integer algorithm if ar1.dtype == np.bool_: ar1 = ar1.view(np.uint8) if ar2.dtype == np.bool_: ar2 = ar2.view(np.uint8) # Check if we can use a fast integer algorithm: integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and np.issubdtype(ar2.dtype, np.integer)) if integer_arrays and _slow_integer in [None, False]: ar2_min = np.min(ar2) ar2_max = np.max(ar2) ar2_size = ar2.size # Check for integer overflow with np.errstate(over='raise'): try: ar2_range = ar2_max - ar2_min # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927, see discussion on # https://github.com/numpy/numpy/pull/12065 optimal_parameters = ( np.log10(ar2_size) > ((np.log10(ar2_range + 1.0) - 2.27) / 0.927) ) except FloatingPointError: optimal_parameters = False # Use the fast integer algorithm if optimal_parameters or _slow_integer == False: if invert: outgoing_array = np.ones_like(ar1, dtype=np.bool_) else: outgoing_array = np.zeros_like(ar1, dtype=np.bool_) # Make elements 1 where the integer exists in ar2 if invert: isin_helper_ar = np.ones(ar2_range + 1, dtype=np.bool_) isin_helper_ar[ar2 - ar2_min] = 0 else: isin_helper_ar = np.zeros(ar2_range + 1, dtype=np.bool_) isin_helper_ar[ar2 - ar2_min] = 1 # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - ar2_min] return outgoing_array # Check if one of the arrays may contain arbitrary objects contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject # This code is run when # a) the first condition is true, making the code significantly faster # b) the second condition is true (i.e. `ar1` or `ar2` may contain # arbitrary objects), since then sorting is not guaranteed to work if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: if invert: mask = np.ones(len(ar1), dtype=bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) ret = np.empty(ar.shape, dtype=bool) ret[order] = flag if assume_unique: return ret[:len(ar1)] else: return ret[rev_idx]
576
arraysetops.py
Python
numpy/lib/arraysetops.py
d6437066f27f81c4a78fb377ef1c61b4969f8159
numpy
21
299,430
45
17
21
207
32
0
57
220
test_change_toggle_property
Insteon Device Control Panel (#70834) Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>
https://github.com/home-assistant/core.git
async def test_change_toggle_property(hass, hass_ws_client, kpl_properties_data): ws_client, devices = await _setup( hass, hass_ws_client, "33.33.33", kpl_properties_data ) device = devices["33.33.33"] prop_name = f"{TOGGLE_BUTTON}_c" toggle_prop = device.configuration[prop_name] assert toggle_prop.value == ToggleMode.TOGGLE with patch.object(insteon.api.properties, "devices", devices): await ws_client.send_json( { ID: 2, TYPE: "insteon/properties/change", DEVICE_ADDRESS: "33.33.33", PROPERTY_NAME: prop_name, PROPERTY_VALUE: str(ToggleMode.ON_ONLY).lower(), } ) msg = await ws_client.receive_json() assert msg["success"] assert toggle_prop.new_value == ToggleMode.ON_ONLY
125
test_api_properties.py
Python
tests/components/insteon/test_api_properties.py
a9ca774e7ed1d8fe502a53d5b765c1d9b393a524
core
1
262,688
13
8
6
62
8
0
23
65
_mask_lengths
Adding OverFlow (#2183) * Adding encoder * currently modifying hmm * Adding hmm * Adding overflow * Adding overflow setting up flat start * Removing runs * adding normalization parameters * Fixing models on same device * Training overflow and plotting evaluations * Adding inference * At the end of epoch the test sentences are coming on cpu instead of gpu * Adding figures from model during training to monitor * reverting tacotron2 training recipe * fixing inference on gpu for test sentences on config * moving helpers and texts within overflows source code * renaming to overflow * moving loss to the model file * Fixing the rename * Model training but not plotting the test config sentences's audios * Formatting logs * Changing model name to camelcase * Fixing test log * Fixing plotting bug * Adding some tests * Adding more tests to overflow * Adding all tests for overflow * making changes to camel case in config * Adding information about parameters and docstring * removing compute_mel_statistics moved statistic computation to the model instead * Added overflow in readme * Adding more test cases, now it doesn't saves transition_p like tensor and can be dumped as json
https://github.com/coqui-ai/TTS.git
def _mask_lengths(mel_lens, log_c, log_alpha_scaled): mask_log_c = sequence_mask(mel_lens) log_c = log_c * mask_log_c mask_log_alpha_scaled = mask_log_c.unsqueeze(2) log_alpha_scaled = log_alpha_scaled * mask_log_alpha_scaled return log_c, log_alpha_scaled
38
neural_hmm.py
Python
TTS/tts/layers/overflow/neural_hmm.py
3b8b105b0d6539ac12972de94e0b2a5077fa1ce2
TTS
1
111,516
48
12
18
336
18
0
77
155
test_kb_set_entities
Refactor KB for easier customization (#11268) * Add implementation of batching + backwards compatibility fixes. Tests indicate issue with batch disambiguation for custom singular entity lookups. * Fix tests. Add distinction w.r.t. batch size. * Remove redundant and add new comments. * Adjust comments. Fix variable naming in EL prediction. * Fix mypy errors. * Remove KB entity type config option. Change return types of candidate retrieval functions to Iterable from Iterator. Fix various other issues. * Update spacy/pipeline/entity_linker.py Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com> * Update spacy/pipeline/entity_linker.py Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com> * Update spacy/kb_base.pyx Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com> * Update spacy/kb_base.pyx Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com> * Update spacy/pipeline/entity_linker.py Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com> * Add error messages to NotImplementedErrors. Remove redundant comment. * Fix imports. * Remove redundant comments. * Rename KnowledgeBase to InMemoryLookupKB and BaseKnowledgeBase to KnowledgeBase. * Fix tests. * Update spacy/errors.py Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Move KB into subdirectory. * Adjust imports after KB move to dedicated subdirectory. * Fix config imports. * Move Candidate + retrieval functions to separate module. Fix other, small issues. * Fix docstrings and error message w.r.t. class names. Fix typing for candidate retrieval functions. * Update spacy/kb/kb_in_memory.pyx Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Update spacy/ml/models/entity_linker.py Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Fix typing. * Change typing of mentions to be Span instead of Union[Span, str]. * Update docs. * Update EntityLinker and _architecture docs. * Update website/docs/api/entitylinker.md Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com> * Adjust message for E1046. * Re-add section for Candidate in kb.md, add reference to dedicated page. * Update docs and docstrings. * Re-add section + reference for KnowledgeBase.get_alias_candidates() in docs. * Update spacy/kb/candidate.pyx * Update spacy/kb/kb_in_memory.pyx * Update spacy/pipeline/legacy/entity_linker.py * Remove canididate.md. Remove mistakenly added config snippet in entity_linker.py. Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com> Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com>
https://github.com/explosion/spaCy.git
def test_kb_set_entities(nlp): v = [5, 6, 7, 8] v1 = [1, 1, 1, 0] v2 = [2, 2, 2, 3] kb1 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4) kb1.set_entities(["E0"], [1], [v]) assert kb1.get_entity_strings() == ["E0"] kb1.set_entities(["E1", "E2"], [1, 9], [v1, v2]) assert set(kb1.get_entity_strings()) == {"E1", "E2"} assert kb1.get_vector("E1") == v1 assert kb1.get_vector("E2") == v2 with make_tempdir() as d: kb1.to_disk(d / "kb") kb2 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4) kb2.from_disk(d / "kb") assert set(kb2.get_entity_strings()) == {"E1", "E2"} assert kb2.get_vector("E1") == v1 assert kb2.get_vector("E2") == v2
204
test_entity_linker.py
Python
spacy/tests/pipeline/test_entity_linker.py
1f23c615d7a7326ca5a38a7d768b8b70caaa0e17
spaCy
1
245,831
95
12
38
488
35
1
181
289
eiou_loss
[Feats]: support EIoU Loss (#9086) * add eiou impl * update docstring * correct docstring
https://github.com/open-mmlab/mmdetection.git
def eiou_loss(pred, target, smooth_point=0.1, eps=1e-7): r px1, py1, px2, py2 = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3] tx1, ty1, tx2, ty2 = target[:, 0], target[:, 1], target[:, 2], target[:, 3] # extent top left ex1 = torch.min(px1, tx1) ey1 = torch.min(py1, ty1) # intersection coordinates ix1 = torch.max(px1, tx1) iy1 = torch.max(py1, ty1) ix2 = torch.min(px2, tx2) iy2 = torch.min(py2, ty2) # extra xmin = torch.min(ix1, ix2) ymin = torch.min(iy1, iy2) xmax = torch.max(ix1, ix2) ymax = torch.max(iy1, iy2) # Intersection intersection = (ix2 - ex1) * (iy2 - ey1) + (xmin - ex1) * (ymin - ey1) - ( ix1 - ex1) * (ymax - ey1) - (xmax - ex1) * ( iy1 - ey1) # Union union = (px2 - px1) * (py2 - py1) + (tx2 - tx1) * ( ty2 - ty1) - intersection + eps # IoU ious = 1 - (intersection / union) # Smooth-EIoU smooth_sign = (ious < smooth_point).detach().float() loss = 0.5 * smooth_sign * (ious**2) / smooth_point + (1 - smooth_sign) * ( ious - 0.5 * smooth_point) return loss @MODELS.register_module()
@MODELS.register_module()
326
iou_loss.py
Python
mmdet/models/losses/iou_loss.py
4510c77a48b264ea12389ecf766604ed1b04b694
mmdetection
1
175,193
32
10
8
111
15
0
37
69
stdev
bpo-46257: Convert statistics._ss() to a single pass algorithm (GH-30403)
https://github.com/python/cpython.git
def stdev(data, xbar=None): T, ss, n = _ss(data, xbar) if n < 2: raise StatisticsError('stdev requires at least two data points') mss = ss / (n - 1) if issubclass(T, Decimal): return _decimal_sqrt_of_frac(mss.numerator, mss.denominator) return _float_sqrt_of_frac(mss.numerator, mss.denominator)
71
statistics.py
Python
Lib/statistics.py
43aac29cbbb8a963a22c334b5b795d1e43417d6b
cpython
3
300,191
24
11
14
146
11
0
41
95
test_mute_volume
Add ws66i core integration (#56094) * Add ws66i core integration * Remove all ws66i translations * Update ws66i unit tests to meet minimum code coverage * Update ws66i based on @bdraco review * General improvements after 2nd PR review * Disable entities if amp shutoff, set default source names, set 30sec polling * Add _attr_ and change async_on_unload * Improve entity generation * Implement coordinator * Made options fields required, retry connection on failed attempts, use ZoneStatus for attributes * Refactor WS66i entity properties, raise HomeAssistantError on restore service if no snapshot * Update to pyws66i v1.1 * Add quality scale of silver to manifest * Update config_flow test
https://github.com/home-assistant/core.git
async def test_mute_volume(hass): ws66i = MockWs66i() await _setup_ws66i(hass, ws66i) await _call_media_player_service( hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.5} ) await _call_media_player_service( hass, SERVICE_VOLUME_MUTE, {"entity_id": ZONE_1_ID, "is_volume_muted": False} ) assert not ws66i.zones[11].mute await _call_media_player_service( hass, SERVICE_VOLUME_MUTE, {"entity_id": ZONE_1_ID, "is_volume_muted": True} ) assert ws66i.zones[11].mute
90
test_media_player.py
Python
tests/components/ws66i/test_media_player.py
5e737bfe4fbc5a724f5fdf04ea9319c2224cb114
core
1
22,057
88
13
25
359
40
0
126
435
handle_401
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
https://github.com/pypa/pipenv.git
def handle_401(self, r, **kwargs): # If response is not 4xx, do not auth # See https://github.com/psf/requests/issues/3772 if not 400 <= r.status_code < 500: self._thread_local.num_401_calls = 1 return r if self._thread_local.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self._thread_local.pos) s_auth = r.headers.get("www-authenticate", "") if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2: self._thread_local.num_401_calls += 1 pat = re.compile(r"digest ", flags=re.IGNORECASE) self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. r.content r.close() prep = r.request.copy() extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) prep.headers["Authorization"] = self.build_digest_header( prep.method, prep.url ) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep return _r self._thread_local.num_401_calls = 1 return r
221
auth.py
Python
pipenv/patched/pip/_vendor/requests/auth.py
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
5
270,117
49
14
26
300
21
0
77
244
_kernel_constraint
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _kernel_constraint(self, kernel): padding = backend.constant([[1, 1], [1, 1]], dtype="int32") kernel_shape = backend.shape(kernel)[0] start = backend.cast(kernel_shape / 2, "int32") kernel_new = backend.switch( backend.cast(tf.math.floormod(kernel_shape, 2), "bool"), lambda: kernel[start - 1 : start, start - 1 : start], lambda: kernel[start - 1 : start, start - 1 : start] + backend.zeros( # pylint: disable=g-long-lambda (2, 2), dtype=kernel.dtype ), ) index = backend.switch( backend.cast(tf.math.floormod(kernel_shape, 2), "bool"), lambda: backend.constant(0, dtype="int32"), lambda: backend.constant(1, dtype="int32"), ) while_condition = lambda index, *args: backend.less(index, start)
246
constraints.py
Python
keras/constraints.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
197,380
79
17
29
463
49
0
120
261
permutedims
PermuteDims and permutedims now have an easier API based on index orders. The corresponding permutation is then derived internally
https://github.com/sympy/sympy.git
def permutedims(expr, perm=None, index_order_old=None, index_order_new=None): from sympy.tensor.array import SparseNDimArray from sympy.tensor.array.expressions.array_expressions import _ArrayExpr from sympy.tensor.array.expressions.array_expressions import _CodegenArrayAbstract from sympy.tensor.array.expressions.array_expressions import _permute_dims from sympy.matrices.expressions.matexpr import MatrixSymbol from sympy.tensor.array.expressions import PermuteDims from sympy.tensor.array.expressions.array_expressions import get_rank perm = PermuteDims._get_permutation_from_arguments(perm, index_order_old, index_order_new, get_rank(expr)) if isinstance(expr, (_ArrayExpr, _CodegenArrayAbstract, MatrixSymbol)): return _permute_dims(expr, perm) if not isinstance(expr, NDimArray): expr = ImmutableDenseNDimArray(expr) from sympy.combinatorics import Permutation if not isinstance(perm, Permutation): perm = Permutation(list(perm)) if perm.size != expr.rank(): raise ValueError("wrong permutation size") # Get the inverse permutation: iperm = ~perm new_shape = perm(expr.shape) if isinstance(expr, SparseNDimArray): return type(expr)({tuple(perm(expr._get_tuple_index(k))): v for k, v in expr._sparse_array.items()}, new_shape) indices_span = perm([range(i) for i in expr.shape]) new_array = [None]*len(expr) for i, idx in enumerate(itertools.product(*indices_span)): t = iperm(idx) new_array[i] = expr[t] return type(expr)(new_array, new_shape)
312
arrayop.py
Python
sympy/tensor/array/arrayop.py
34d9ba9bd176605cb60acafc906a36607cd76061
sympy
9
155,114
19
12
15
95
14
0
22
75
_get
FIX-#5187: Fixed RecursionError in OmnisciLaunchParameters.get() (#5199) Signed-off-by: Andrey Pavlenko <andrey.a.pavlenko@gmail.com>
https://github.com/modin-project/modin.git
def _get(cls) -> dict: custom_parameters = super().get() result = cls.default.copy() result.update( {key.replace("-", "_"): value for key, value in custom_parameters.items()} ) return result
55
envvars.py
Python
modin/config/envvars.py
c51ab405efec920dbb4baa2e2389409df04e8d43
modin
2
156,090
10
8
3
44
7
0
12
33
to_csv
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
https://github.com/dask/dask.git
def to_csv(self, filename, **kwargs): from dask.dataframe.io import to_csv return to_csv(self, filename, **kwargs)
29
core.py
Python
dask/dataframe/core.py
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
1
146,622
63
13
26
181
17
0
86
243
preprocess_datasets
[ml] Trainer implementation (#22969) Implementation for base Trainer Co-authored-by: Eric Liang <ekhliang@gmail.com> Co-authored-by: Richard Liaw <rliaw@berkeley.edu>
https://github.com/ray-project/ray.git
def preprocess_datasets(self) -> None: # Evaluate all datasets. self.datasets = {k: d() if callable(d) else d for k, d in self.datasets.items()} if self.preprocessor: train_dataset = self.datasets.get(TRAIN_DATASET_KEY, None) if train_dataset and not self.preprocessor.check_is_fitted(): self.preprocessor.fit(train_dataset) # Execute dataset transformations serially for now. # Cannot execute them in remote tasks due to dataset ownership model: # if datasets are created on a remote node, then if that node fails, # we cannot recover the dataset. new_datasets = {} for key, dataset in self.datasets.items(): new_datasets[key] = self.preprocessor.transform(dataset) self.datasets = new_datasets
110
trainer.py
Python
python/ray/ml/trainer.py
2548083dcb3928fce145ab75976c7aecbb8a0d51
ray
7
110,061
16
9
4
68
11
0
18
46
clip_to_bbox
Simplify some patches path definitions. - When a Path ends with a CLOSEPOLY, it is not necessary to put a LINETO to the closing position before it (in fact that can result in an incorrect line join at that position), and the xy position associated with the CLOSEPOLY can just be (0, 0), as it is irrelevant. - For defining the codes arrays, for short paths (such as the patch shapes here), one can just use list unpacking for shorter definitions. - Rename the _path and _fillable lists in ArrowStyle to plural names. - Rely on the default tolerance of split_bezier_intersecting_with_closedpath (which is 0.01) rather than re-specifying the same magic value everywhere. - Remove inapplicable comment re: make_compound_path_from_polys (which only applies to polygons all of with the same number of sides, which is not the case when clipping to a bbox).
https://github.com/matplotlib/matplotlib.git
def clip_to_bbox(self, bbox, inside=True): verts = _path.clip_path_to_rect(self, bbox, inside) paths = [Path(poly) for poly in verts] return self.make_compound_path(*paths)
44
path.py
Python
lib/matplotlib/path.py
73622a0173916bfcb4cb7b9b393929be025e18c9
matplotlib
2
104,904
13
11
16
96
13
0
14
53
get_all_exported_dataset_infos
Add API code examples for Builder classes (#4313) * 📝 add examples for builder classes * 📝 apply quentin review
https://github.com/huggingface/datasets.git
def get_all_exported_dataset_infos(cls) -> dict: dset_infos_file_path = os.path.join(cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME) if os.path.exists(dset_infos_file_path): return DatasetInfosDict.from_directory(cls.get_imported_module_dir()) return {}
50
builder.py
Python
src/datasets/builder.py
d1d4f1065fd4ab91b2c8682643dbd12f86d66fcd
datasets
2
256,959
9
8
3
38
6
0
9
18
enable_telemetry
Add basic telemetry features (#2314) * add basic telemetry features * change pipeline_config to _component_config * Update Documentation & Code Style * add super().__init__() calls to error classes * make posthog mock work with python 3.7 * Update Documentation & Code Style * update link to docs web page * log exceptions, send event for raised HaystackErrors, refactor Path(CONFIG_PATH) * add comment on send_event in BaseComponent.init() and fix mypy * mock NonPrivateParameters and fix pylint undefined-variable * Update Documentation & Code Style * check model path contains multiple / * add test for writing to file * add test for en-/disable telemetry * Update Documentation & Code Style * merge file deletion methods and ignore pylint global statement * Update Documentation & Code Style * set env variable in demo to activate telemetry * fix mock of HAYSTACK_TELEMETRY_ENABLED * fix mypy and linter * add CI as env variable to execution contexts * remove threading, add test for custom error event * Update Documentation & Code Style * simplify config/log file deletion * add test for final event being sent * force writing config file in test * make test compatible with python 3.7 * switch to posthog production server * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
https://github.com/deepset-ai/haystack.git
def enable_telemetry(): os.environ[HAYSTACK_TELEMETRY_ENABLED] = "True" logger.info("Telemetry has been enabled.")
19
telemetry.py
Python
haystack/telemetry.py
ac5617e757e9ace6f30b7291686d9dbbc339f433
haystack
1
105,813
49
11
13
143
14
0
59
175
__add__
refactor: replace AssertionError with more meaningful exceptions (#5074) (#5079) * refactor: replace AssertionError with more meaningful exceptions (#5074) * refactor: change FileNotFoundError to ValueError (#5074) Co-authored-by: Mario Šaško <mariosasko777@gmail.com> Co-authored-by: Mario Šaško <mariosasko777@gmail.com>
https://github.com/huggingface/datasets.git
def __add__(self, other): if not isinstance(other, ReadInstruction): msg = "ReadInstruction can only be added to another ReadInstruction obj." raise TypeError(msg) self_ris = self._relative_instructions other_ris = other._relative_instructions # pylint: disable=protected-access if ( self_ris[0].unit != "abs" and other_ris[0].unit != "abs" and self._relative_instructions[0].rounding != other_ris[0].rounding ): raise ValueError("It is forbidden to sum ReadInstruction instances with different rounding values.") return self._read_instruction_from_relative_instructions(self_ris + other_ris)
86
arrow_reader.py
Python
src/datasets/arrow_reader.py
31d6ba08a4dd4eb0cc763ed6c20423258ccbdee8
datasets
5
156,973
34
10
8
128
16
0
42
69
test_direct_deferral_wrapping_override
Added `flake8-bugbear` to `pre-commit` hooks (#9457)
https://github.com/dask/dask.git
def test_direct_deferral_wrapping_override(): a = da.from_array(np.arange(4)) b = WrappedArray(np.arange(4)) assert a.__add__(b) is NotImplemented # Note: remove dask_graph to be able to wrap b in a dask array b.__dask_graph__ = None res = a + da.from_array(b) assert isinstance(res, da.Array) assert_eq(res, 2 * np.arange(4), check_type=False)
79
test_dispatch.py
Python
dask/array/tests/test_dispatch.py
5f11ba94cd5b1e0284c5246c4583e3a898d6447d
dask
1