n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
30
0
1
16
tests/sentry/snuba/metrics/test_fields.py
97,350
feat(metrics): Adds support for CompositeEntityDerivedMetrics [INGEST-924 INGEST-1044 INGEST-1064] (#32829) * feat(metrics): Adds support for CompositeEntityDerivedMetrics Adds support for CompositeEntityDerivedMetrics, Adds derived metric for sessions.errored, renames RawMetric class to RawAggregatedMetric. Modifies QueryBuilder to always perform post query operations * Incorporate PR feedback
sentry
12
Python
17
test_fields.py
def test_generate_bottom_up_derived_metrics_dependencies(self): assert list(self.sessions_errored.generate_bottom_up_derived_metrics_dependencies()) == [ (None, "session.errored_set"), (None, "session.errored_preaggregated"), (None, "session.errored"), ] assert list( MOCKED_DERIVED_METRICS[ "random_composite" ].generate_bottom_up_derived_metrics_dependencies() ) == [ (None, "session.errored_set"), (None, "session.errored_preaggregated"), (None, "session.errored"), (None, "random_composite"), ]
b52d8e5fa16670e5d4b071ca72457e187ed7eeeb
76
https://github.com/getsentry/sentry.git
178
def test_generate_bottom_up_derived_metrics_dependencies(self): assert list(self.sessions_errored.generate_bottom_up_derived_metrics_dependencies()) == [ (None, "session.errored_set"), (None, "session.errored_preaggregated"), (None, "session.errored"), ] assert list( MOCKED_DERIVED_METRICS[ "random_composite" ].generate_bottom_up_derived_metrics_dependencies() ) == [ (None, "session.errored_set"), (None, "session.errored_preaggregated"), (None, "session.errore
6
121
test_generate_bottom_up_derived_metrics_dependencies
35
0
1
9
jaxlib/cusparse.py
120,094
[MHLO] Add direct MHLO lowerings for sparse primitives. PiperOrigin-RevId: 440374054
jax
9
Python
28
cusparse.py
def _validate_csr_mhlo(data, indices, indptr, shape): data_type = ir.RankedTensorType(data.type) indices_type = ir.RankedTensorType(indices.type) indptr_type = ir.RankedTensorType(indptr.type) nnz, = data_type.shape assert indices_type.shape == [nnz] assert indptr_type.element_type == indices_type.element_type assert indptr_type.shape == [shape[0] + 1] return data_type.element_type, indices_type.element_type, nnz
648a512488a5184caa8dc1bced58e9f8ab7269f2
86
https://github.com/google/jax.git
42
def _validate_csr_mhlo(data, indices, indptr, shape): data_type = ir.RankedTensorType(data.type) indices_type = ir.RankedTensorType(indices.type) indptr_type = ir.RankedTensorType(indptr.type) nnz, = data_type.shape assert indices_type.shape == [nnz] assert indptr_type.element_type == indices_type.element_type assert indptr_type.shape == [shape[0] + 1] return da
13
130
_validate_csr_mhlo
34
0
5
7
src/calibre/ebooks/css_transform_rules.py
188,915
Automated upgrade of code to python 3.7+ Done by https://github.com/asottile/pyupgrade Consists mainly of moving string formatting to f-strings and removing encoding declarations
calibre
14
Python
27
css_transform_rules.py
def export_rules(serialized_rules): lines = [] for rule in serialized_rules: lines.extend('# ' + l for l in rule_to_text(rule).splitlines()) lines.extend('{}: {}'.format(k, v.replace('\n', ' ')) for k, v in iteritems(rule) if k in allowed_keys) lines.append('') return '\n'.join(lines).encode('utf-8')
eb78a761a99ac20a6364f85e12059fec6517d890
84
https://github.com/kovidgoyal/calibre.git
63
def export_rules(serialized_rules): lines =
17
147
export_rules
35
0
1
11
python/ray/serve/tests/test_deployment_state.py
144,680
[serve] Introduce DeploymentStatus, poll for statuses instead of using async goals (#22121)
ray
9
Python
25
test_deployment_state.py
def test_deploy_with_consistent_constructor_failure(mock_deployment_state): deployment_state, timer = mock_deployment_state b_info_1, b_version_1 = deployment_info(num_replicas=2) updating = deployment_state.deploy(b_info_1) assert updating assert deployment_state.curr_status_info.status == DeploymentStatus.UPDATING _constructor_failure_loop_two_replica(deployment_state, 3) assert deployment_state._replica_constructor_retry_counter == 6 assert deployment_state.curr_status_info.status == DeploymentStatus.FAILED check_counts(deployment_state, total=0) assert deployment_state.curr_status_info.message != ""
48adb6f7bb335b28fb0fb0d1190bd6c5dfc8ddfa
79
https://github.com/ray-project/ray.git
68
def test_deploy_with_consistent_constructor_failure(mock_deployment_state): deployment_state, timer = mock_deployment_state b_info_1, b_version_1 = deployment_info(num_replicas=2) updating = deployment_state.deploy(b_info_1) assert updating assert deployment_state.curr_status_info.status == DeploymentStatus.UPDATING _constructor_failure_loop_two_replica(deployment_state, 3) assert deployment_state._replica_constructor_retry_counter == 6 assert deployment_state.curr_status_info.status == DeploymentStatus.FAILED check_counts(deployment_state, total=0) assert deployment_state.curr_status_info.message != ""
20
127
test_deploy_with_consistent_constructor_failure
49
0
1
22
kubernetes_tests/test_kubernetes_pod_operator_backcompat.py
42,802
Use KubernetesHook to create api client in KubernetesPodOperator (#20578) Add support for k8s hook in KPO; use it always (even when no conn id); continue to consider the core k8s settings that KPO already takes into account but emit deprecation warning about them. KPO historically takes into account a few settings from core airflow cfg (e.g. verify ssl, tcp keepalive, context, config file, and in_cluster). So to use the hook to generate the client, somehow the hook has to take these settings into account. But we don't want the hook to consider these settings in general. So we read them in KPO and if necessary patch the hook and warn.
airflow
13
Python
41
test_kubernetes_pod_operator_backcompat.py
def test_envs_from_configmaps(self, mock_monitor, mock_start): # GIVEN configmap = 'test-configmap' # WHEN k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name="test", task_id="task", in_cluster=False, do_xcom_push=False, configmaps=[configmap], ) # THEN mock_pod = MagicMock() mock_pod.status.phase = 'Succeeded' mock_monitor.return_value = mock_pod context = create_context(k) k.execute(context) assert mock_start.call_args[1]['pod'].spec.containers[0].env_from == [ k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name=configmap)) ]
60eb9e106f5915398eafd6aa339ec710c102dc09
135
https://github.com/apache/airflow.git
260
def test_envs_from_configmaps(self, mock_monitor, mock_start): # GIVEN configmap = 'test-configmap' # WHEN k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name="test", task_id="task", in_cluster=False, do_xcom_push=False, c
33
224
test_envs_from_configmaps
37
0
1
27
keras/engine/data_adapter_test.py
271,153
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
15
Python
28
data_adapter_test.py
def setUp(self): super().setUp() self.batch_size = 5 self.numpy_input = np.zeros((50, 10)) self.numpy_target = np.ones(50) self.tensor_input = tf.constant(2.0, shape=(50, 10)) self.tensor_target = tf.ones((50,)) self.arraylike_input = DummyArrayLike(self.numpy_input) self.arraylike_target = DummyArrayLike(self.numpy_target) self.dataset_input = ( tf.data.Dataset.from_tensor_slices( (self.numpy_input, self.numpy_target) ) .shuffle(50) .batch(self.batch_size) )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
218
https://github.com/keras-team/keras.git
165
def setUp(self): super().setUp() self.batch_size =
23
200
setUp
19
0
2
5
lib/matplotlib/tri/_triangulation.py
109,990
Make all matplotlib.tri submodules private Users should access all elements through the outer namespace matplotlib.tri. Back-compatibility for the old module names will be added in a separate commit. If done in the same commit, git would interpret this as a modified file plus a new file and not as a rename. With the separation and the rename we keep the history.
matplotlib
10
Python
17
_triangulation.py
def get_trifinder(self): if self._trifinder is None: # Default TriFinder class. from matplotlib.tri._trifinder import TrapezoidMapTriFinder self._trifinder = TrapezoidMapTriFinder(self) return self._trifinder
cf8e04ddc1686dd285afdcc6e3ea8d9f29ff869b
33
https://github.com/matplotlib/matplotlib.git
73
def get_trifinder(self): if self._trifinder is None: # Default TriFinder class. from matplotlib.tri._trifinder import TrapezoidMapTriFinder self._trifinder = TrapezoidMapTriFinder(self) return self._trifinder
6
55
get_trifinder
34
0
4
23
homeassistant/components/ibeacon/coordinator.py
287,742
Handle iBeacons that broadcast multiple different uuids (#79011) * Handle iBeacons that broadcast multiple different uuids * fix flip-flopping between uuids * naming
core
13
Python
28
coordinator.py
def _async_update_rssi(self) -> None: for ( unique_id, ibeacon_advertisement, ) in self._last_ibeacon_advertisement_by_unique_id.items(): address = unique_id.split("_")[-1] if ( service_info := bluetooth.async_last_service_info( self.hass, address, connectable=False ) ) and service_info.rssi != ibeacon_advertisement.rssi: ibeacon_advertisement.update_rssi(service_info.rssi) async_dispatcher_send( self.hass, signal_seen(unique_id), ibeacon_advertisement, )
02731efc4cb3f7ee94b0c08aecc10e3a5209dbf4
86
https://github.com/home-assistant/core.git
261
def _async_update_rssi(self) -> None: for ( unique_id, ibeacon_advertisement, ) in self._last_ibeacon_advertisement_by_unique_id.items(): address = unique_id.split("_")[-1] if ( servic
17
134
_async_update_rssi
43
0
5
10
bootloader/waflib/Tools/c_preproc.py
263,293
Bootloader: Building: Unpack waf's lib archive. Doing so makes it easier to modify. This is a temporary measure until the next waf version is released (although I'm tempted to keep it since it's much more IDE completion friendly).
pyinstaller
13
Python
33
c_preproc.py
def eval_macro(lst, defs): reduce_tokens(lst, defs, []) if not lst: raise PreprocError('missing tokens to evaluate') if lst: p, v = lst[0] if p == IDENT and v not in defs: raise PreprocError('missing macro %r' % lst) p, v = reduce_eval(lst) return int(v) != 0
64ccb7aea824fbec57f7ed1bbe483ec486183c13
68
https://github.com/pyinstaller/pyinstaller.git
89
def eval_macro(lst, defs): reduce_tokens(lst, defs, []) if not lst: raise PreprocError('missing tokens to evaluate') if lst: p, v = lst[0] if p == IDENT and v not in defs: raise PreprocError('missing macro %r' % lst) p, v = reduce_eval(lst) re
10
110
eval_macro
27
0
1
20
tests/snuba/api/endpoints/test_organization_events.py
94,836
fix(tests): Fix dnd backend test flakes (#37916) This PR fixes 3 major flakes: Fixes SENTRY-TESTS-3J5: Just sort the project id order Fixes SENTRY-TESTS-3HQ: Flakes because we calculate the retention in the test once and the value returned in the response is calculated a little while after. We don't need to test for seconds granularity so replacing seconds to 0. Fixes SENTRY-TESTS-3J0: Successively calling before_now results in some flakes particularly in tests that are calculating aggregates on transaction.duration. Introduced a load_data method that takes a datetime object timestamp and a timedelta duration calculates the offset based on timestamp to get start_timestamp.
sentry
14
Python
18
test_organization_events.py
def test_in_query_events_stack(self): test_js = self.store_event( self.load_data( platform="javascript", timestamp=before_now(minutes=10), duration=timedelta(seconds=5), ), project_id=self.project.id, ) test_java = self.store_event( self.load_data( platform="java", timestamp=before_now(minutes=10), duration=timedelta(seconds=5), ), project_id=self.project.id, ) self.run_test_in_query( "stack.filename:[../../sentry/scripts/views.js]", [test_js], [test_java] )
ab993b32614bb83d17d10e1041817e43dd6f5980
105
https://github.com/getsentry/sentry.git
235
def test_in_query_events_stack(self): test_js = self.store_event( self.load_data( platform="javascript", timestamp=before_now(minutes=10), duration=timedelta(seconds=5), ), project_id=self.project.id, ) test_java = self.store_event( self.load_data( platform="java", timestamp=before_now(minutes=10), duration=timedelt
17
161
test_in_query_events_stack
49
0
1
13
pandas/tests/arrays/test_datetimes.py
169,899
REF: _reso->_creso (#49107)
pandas
11
Python
25
test_datetimes.py
def test_add_timedeltalike_scalar_mismatched_reso(self, dta_dti, scalar): dta, dti = dta_dti td = pd.Timedelta(scalar) exp_reso = max(dta._creso, td._creso) exp_unit = npy_unit_to_abbrev(exp_reso) expected = (dti + td)._data._as_unit(exp_unit) result = dta + scalar tm.assert_extension_array_equal(result, expected) result = scalar + dta tm.assert_extension_array_equal(result, expected) expected = (dti - td)._data._as_unit(exp_unit) result = dta - scalar tm.assert_extension_array_equal(result, expected)
90b4add77859d1349530fff3c8cadeef95f36f39
107
https://github.com/pandas-dev/pandas.git
132
def test_add_timedeltalike_scalar_mismatched_reso(self, dta_dti, scalar): dta, dti = dta_dti td = pd.Timedelta(scalar) exp_reso = max(dta._creso, td._creso) exp_unit = npy_un
20
166
test_add_timedeltalike_scalar_mismatched_reso
211
0
12
42
PPOCRLabel/PPOCRLabel.py
23,691
new
PaddleOCR
20
Python
126
PPOCRLabel.py
def cellreRecognition(self): img = cv2.imread(self.filePath) for shape in self.canvas.selectedShapes: box = [[int(p.x()), int(p.y())] for p in shape.points] if len(box) > 4: box = self.gen_quad_from_poly(np.array(box)) assert len(box) == 4 # pad around bbox for better text recognition accuracy _box = boxPad(box, img.shape, 6) img_crop = get_rotate_crop_image(img, np.array(_box, np.float32)) if img_crop is None: msg = 'Can not recognise the detection box in ' + self.filePath + '. Please change manually' QMessageBox.information(self, "Information", msg) return # merge the text result in the cell texts = '' probs = 0. # the probability of the cell is avgerage prob of every text box in the cell bboxes = self.ocr.ocr(img_crop, det=True, rec=False, cls=False) if len(bboxes) > 0: bboxes.reverse() # top row text at first for _bbox in bboxes: patch = get_rotate_crop_image(img_crop, np.array(_bbox, np.float32)) rec_res = self.ocr.ocr(patch, det=False, rec=True, cls=False) text = rec_res[0][0] if text != '': texts += text + (' ' if text[0].isalpha() else '') # add space between english word probs += rec_res[0][1] probs = probs / len(bboxes) result = [(texts.strip(), probs)] if result[0][0] != '': result.insert(0, box) print('result in reRec is ', result) if result[1][0] == shape.label: print('label no change') else: shape.label = result[1][0] else: print('Can not recognise the box') if self.noLabelText == shape.label: print('label no change') else: shape.label = self.noLabelText self.singleLabel(shape) self.setDirty()
8b228a1f9b011aba935963431cadb81c7fe361d5
378
https://github.com/PaddlePaddle/PaddleOCR.git
827
def cellreRecognition(self): img = cv2.imread(self.filePath) for shape in self.canvas.selectedShapes: box = [[int(p.x()), int(p.y())] for p in shape.points] if len(box) > 4: box = self.gen_quad_from_poly(np.array(box)) assert len(box) == 4 # pad around bbox for better text recognition accuracy _box = boxPad(box, img.shape, 6) img_crop = get_rotate_crop_image(img, np.array(_box, np.float32)) if img_crop is None: msg = 'Can not recognise the detection box in ' + self.filePath + '. Please change manually' QMessageBox.information(self, "Information", msg) return # merge the text result in the cell texts = '' probs = 0. # the probability of the cell is avgerage prob of every text box in the cell bboxes = self.ocr.ocr(img_crop, det=True, rec=False, cls=False) if len(bboxes) > 0: bboxes.reverse() # top row text at first for _bbox in bboxes: patch = get_rotate_crop_image(img_crop, np.array(_bbox, np.float32)) rec_res = self.ocr.ocr(patch, det=False, rec=True, cls=False) text = rec_res[0][0] if text != '': texts += text + (' ' if text[0].isalpha() else '') # add space between english word probs += rec_res[0][1] probs = probs / len(bboxes) result = [(texts.strip(), probs)] if result[0][0] != '': result.insert(0, box) print('resu
48
610
cellreRecognition
36
1
2
15
test/units/galaxy/test_collection.py
266,413
ansible-galaxy - fix the --ignore-certs flag for the implicit galaxy server (#76735) * ansible-galaxy - fix the --ignore-certs flag for the implicit galaxy server * changelog * Add a test without the server config * Fix respecting --ignore-certs for individual --server URLs also * Update changelogs/fragments/76735-ansible-galaxy-fix-ignore-certs.yaml
ansible
10
Python
33
test_collection.py
def test_validate_certs(global_ignore_certs, monkeypatch): cli_args = [ 'ansible-galaxy', 'collection', 'install', 'namespace.collection:1.0.0', ] if global_ignore_certs: cli_args.append('--ignore-certs') galaxy_cli = GalaxyCLI(args=cli_args) mock_execute_install = MagicMock() monkeypatch.setattr(galaxy_cli, '_execute_install_collection', mock_execute_install) galaxy_cli.run() assert len(galaxy_cli.api_servers) == 1 assert galaxy_cli.api_servers[0].validate_certs is not global_ignore_certs @pytest.mark.parametrize('global_ignore_certs', [True, False])
76220c4a7bf90c97113fe104ea33957a9881b8a9
@pytest.mark.parametrize('global_ignore_certs', [True, False])
77
https://github.com/ansible/ansible.git
96
def test_validate_certs(global_ignore_certs, monkeypatch): cli_args = [ 'ansible-galaxy', 'collection', 'install', 'namespace.collection:1.0.0', ] if global_ignore_certs: cli_args.append('--ignore-certs') galaxy_cli = Galaxy
18
153
test_validate_certs
254
0
14
47
.venv/lib/python3.8/site-packages/pip/_internal/cli/parser.py
60,539
upd; format
transferlearning
18
Python
148
parser.py
def _update_defaults(self, defaults): # type: (Dict[str, Any]) -> Dict[str, Any] # Accumulate complex default state. self.values = optparse.Values(self.defaults) late_eval = set() # Then set the options with those values for key, val in self._get_ordered_configuration_items(): # '--' because configuration supports only long names option = self.get_option("--" + key) # Ignore options not present in this parser. E.g. non-globals put # in [global] by users that want them to apply to all applicable # commands. if option is None: continue assert option.dest is not None if option.action in ("store_true", "store_false"): try: val = strtobool(val) except ValueError: self.error( "{} is not a valid value for {} option, " # noqa "please specify a boolean value like yes/no, " "true/false or 1/0 instead.".format(val, key) ) elif option.action == "count": with suppress(ValueError): val = strtobool(val) with suppress(ValueError): val = int(val) if not isinstance(val, int) or val < 0: self.error( "{} is not a valid value for {} option, " # noqa "please instead specify either a non-negative integer " "or a boolean value like yes/no or false/true " "which is equivalent to 1/0.".format(val, key) ) elif option.action == "append": val = val.split() val = [self.check_default(option, key, v) for v in val] elif option.action == "callback": assert option.callback is not None late_eval.add(option.dest) opt_str = option.get_opt_string() val = option.convert_value(opt_str, val) # From take_action args = option.callback_args or () kwargs = option.callback_kwargs or {} option.callback(option, opt_str, val, self, *args, **kwargs) else: val = self.check_default(option, key, val) defaults[option.dest] = val for key in late_eval: defaults[key] = getattr(self.values, key) self.values = None return defaults
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
308
https://github.com/jindongwang/transferlearning.git
1,029
def _update_defaults(self, defaults): # type: (Dict[str, Any]) -> Dict[str, Any] # Accumulate complex default state. self.values = optparse.Values(self.defaults) late_eval = set()
35
518
_update_defaults
24
0
2
8
ppdet/modeling/backbones/mobileone.py
210,982
Add SIoU and MobileOne block (#6312) * Add SIoU and MobileOne block * add paddle copyright * mobileone block k>1 bugfix * format code style
PaddleDetection
13
Python
22
mobileone.py
def _pad_1x1_to_3x3_tensor(self, kernel1x1): if kernel1x1 is None: return 0 else: padding_size = (self.kernel_size - 1) // 2 return nn.functional.pad( kernel1x1, [padding_size, padding_size, padding_size, padding_size])
6d91289fc71f4b7440515c7eed4302066a524a22
45
https://github.com/PaddlePaddle/PaddleDetection.git
100
def _pad_1x1_to_3x3_tensor(self, kernel1x1): if kernel1x1 is None: return 0 else: padding_size = (self.kernel_size - 1) // 2 return nn.functional.pad( kernel1x1,
8
68
_pad_1x1_to_3x3_tensor
492
0
19
112
sklearn/ensemble/_hist_gradient_boosting/grower.py
261,256
ENH FEA add interaction constraints to HGBT (#21020) Co-authored-by: Loïc Estève <loic.esteve@ymail.com>
scikit-learn
16
Python
251
grower.py
def split_next(self): # Consider the node with the highest loss reduction (a.k.a. gain) node = heappop(self.splittable_nodes) tic = time() ( sample_indices_left, sample_indices_right, right_child_pos, ) = self.splitter.split_indices(node.split_info, node.sample_indices) self.total_apply_split_time += time() - tic depth = node.depth + 1 n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes) n_leaf_nodes += 2 left_child_node = TreeNode( depth, sample_indices_left, node.split_info.sum_gradient_left, node.split_info.sum_hessian_left, value=node.split_info.value_left, ) right_child_node = TreeNode( depth, sample_indices_right, node.split_info.sum_gradient_right, node.split_info.sum_hessian_right, value=node.split_info.value_right, ) node.right_child = right_child_node node.left_child = left_child_node # set start and stop indices left_child_node.partition_start = node.partition_start left_child_node.partition_stop = node.partition_start + right_child_pos right_child_node.partition_start = left_child_node.partition_stop right_child_node.partition_stop = node.partition_stop # set interaction constraints (the indices of the constraints sets) if self.interaction_cst is not None: # Calculate allowed_features and interaction_cst_indices only once. Child # nodes inherit them before they get split. ( left_child_node.allowed_features, left_child_node.interaction_cst_indices, ) = self._compute_interactions(node) right_child_node.interaction_cst_indices = ( left_child_node.interaction_cst_indices ) right_child_node.allowed_features = left_child_node.allowed_features if not self.has_missing_values[node.split_info.feature_idx]: # If no missing values are encountered at fit time, then samples # with missing values during predict() will go to whichever child # has the most samples. node.split_info.missing_go_to_left = ( left_child_node.n_samples > right_child_node.n_samples ) self.n_nodes += 2 self.n_categorical_splits += node.split_info.is_categorical if self.max_leaf_nodes is not None and n_leaf_nodes == self.max_leaf_nodes: self._finalize_leaf(left_child_node) self._finalize_leaf(right_child_node) self._finalize_splittable_nodes() return left_child_node, right_child_node if self.max_depth is not None and depth == self.max_depth: self._finalize_leaf(left_child_node) self._finalize_leaf(right_child_node) return left_child_node, right_child_node if left_child_node.n_samples < self.min_samples_leaf * 2: self._finalize_leaf(left_child_node) if right_child_node.n_samples < self.min_samples_leaf * 2: self._finalize_leaf(right_child_node) if self.with_monotonic_cst: # Set value bounds for respecting monotonic constraints # See test_nodes_values() for details if ( self.monotonic_cst[node.split_info.feature_idx] == MonotonicConstraint.NO_CST ): lower_left = lower_right = node.children_lower_bound upper_left = upper_right = node.children_upper_bound else: mid = (left_child_node.value + right_child_node.value) / 2 if ( self.monotonic_cst[node.split_info.feature_idx] == MonotonicConstraint.POS ): lower_left, upper_left = node.children_lower_bound, mid lower_right, upper_right = mid, node.children_upper_bound else: # NEG lower_left, upper_left = mid, node.children_upper_bound lower_right, upper_right = node.children_lower_bound, mid left_child_node.set_children_bounds(lower_left, upper_left) right_child_node.set_children_bounds(lower_right, upper_right) # Compute histograms of children, and compute their best possible split # (if needed) should_split_left = not left_child_node.is_leaf should_split_right = not right_child_node.is_leaf if should_split_left or should_split_right: # We will compute the histograms of both nodes even if one of them # is a leaf, since computing the second histogram is very cheap # (using histogram subtraction). n_samples_left = left_child_node.sample_indices.shape[0] n_samples_right = right_child_node.sample_indices.shape[0] if n_samples_left < n_samples_right: smallest_child = left_child_node largest_child = right_child_node else: smallest_child = right_child_node largest_child = left_child_node # We use the brute O(n_samples) method on the child that has the # smallest number of samples, and the subtraction trick O(n_bins) # on the other one. tic = time() smallest_child.histograms = self.histogram_builder.compute_histograms_brute( smallest_child.sample_indices ) largest_child.histograms = ( self.histogram_builder.compute_histograms_subtraction( node.histograms, smallest_child.histograms ) ) self.total_compute_hist_time += time() - tic tic = time() if should_split_left: self._compute_best_split_and_push(left_child_node) if should_split_right: self._compute_best_split_and_push(right_child_node) self.total_find_split_time += time() - tic # Release memory used by histograms as they are no longer needed # for leaf nodes since they won't be split. for child in (left_child_node, right_child_node): if child.is_leaf: del child.histograms # Release memory used by histograms as they are no longer needed for # internal nodes once children histograms have been computed. del node.histograms return left_child_node, right_child_node
5ceb8a6a031ddff26a7ede413db1b53edb64166a
642
https://github.com/scikit-learn/scikit-learn.git
1,959
def split_next(self): # Consider the node with the highest loss reduction (a.k.a. gain) node = heappop(self.splittable_nodes) tic = time() ( sample_indices_left, sample_indices_right, right_child_pos, ) = self.splitter.split_indices(node.split_info, node.sample_indices) self.total_apply_split_time += time() - tic depth = node.depth + 1 n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes) n_leaf_nodes += 2 left_child_node = TreeNode( depth, sample_indices_left, node.split_info.sum_gradient_left, node.split_info.sum_hessian_left, value=node.split_info.value_left, ) right_child_node = TreeNode( depth, sample_indices_right, node.split_info.sum_gradient_right, node.split_info.sum_hessian_right, value=node.split_info.value_right, ) node.right_child = right_child_node node.left_child = left_child_node # set start and stop indices left_child_node.partition_start = node.partition_start left_child_node.partition_stop = node.partition_start + right_child_pos right_child_node.partition_start = left_child_node.partition_stop right_child_node.partition_stop = node.partition_stop # set interaction constraints (the indices of the constraints sets) if self.interaction_cst is not None: # Calculate allowed_features and interaction_cst_indices only once. Child # nodes inherit them before they get split. ( left_child_node.allowed_features, left_child_node.interaction_cst_indices, ) = self._compute_interactions(node) right_child_node.interaction_cst_indices = ( left_child_node.interaction_cst_indices ) right_child_node.allowed_features = left_child_node.allowed_features if not self.has_missing_values[node.split_info.feature_idx]: # If no missing values are encountered at fit time, then samples # with missing values during predict() will go to whichever child # has the most samples. node.split_info.missing_go_to_left = ( left_child_node.n_samples > right_child_node.n_samples ) self.n_nodes += 2 self.n_categorical_splits += node.split_info.is_categorical if self.max_leaf_nodes is not None and n_leaf_nodes == self.max_leaf_nodes: self._finalize_leaf(left_child_node) self._finalize_leaf(right_child_node) self._finalize_splittable_nodes() return left_child_node, right_child_node if self.max_depth is not None and depth == self.max_depth: self._finalize_leaf(left_child_node) self._finalize_leaf(right_child_node) return left_child_node, right_child_node if left_child_node.n_samples < self.min_samples_leaf * 2: self._finalize_leaf(left_child_node) if right_child_node.n_samples < self.min_samples_leaf * 2: self._finalize_leaf(right_child_node) if self.with_monotonic_cst: # Set value bounds for respecting monotonic constraints # See test_nodes_values() for details if ( self.monotonic_cst[node.split_info.feature_idx] == MonotonicConstraint.NO_CST ): lower_left = lower_right = node.children_lower_bound upper_left = upper_right = node.children_upper_bound else: mid = (left_child_node.value + right_child_node.value) / 2 if ( self.monotonic_cst[node.split_info.feature_idx] == MonotonicConstraint.POS ): lower_left, upper_left = node.children_lower_bound, mid lower_right, upper_right = mid, node.children_upper_bound else: # NEG lower_left, upper_left = mid, node.children_upper_bound lower_right, upper_right = node.children_lower_bound, mid left_child_node.set_children_bounds(lower_left, upp
78
1,022
split_next
6
0
1
2
tests/test_relations.py
48,688
Fix Pytest's deprecation warnings about nose usage (#8758) Pytest 7.2.0 deprecated plain `setup` and `teardown` functions and methods as nose idioms: https://docs.pytest.org/en/latest/changelog.html#pytest-7-2-0-2022-10-23 `setup` can be safely replaced with `setup_method`: https://docs.pytest.org/en/stable/deprecations.html#setup-teardown Fixes: https://github.com/encode/django-rest-framework/issues/8757 Signed-off-by: Stanislav Levin <slev@altlinux.org> Signed-off-by: Stanislav Levin <slev@altlinux.org>
django-rest-framework
9
Python
6
test_relations.py
def setup_method(self): self.default_hyperlink = serializers.Hyperlink('http://example.com', 'test')
78cdae69997c9fd95211ec15fb4e21f4cd45e30a
17
https://github.com/encode/django-rest-framework.git
12
def setup_method(self): self.defaul
5
31
setup_method
17
0
1
7
homeassistant/components/demo/mailbox.py
307,676
Add demo to strict-typing (#77596) * Add demo to strict-typing * Adjust component * Adjust PR * Update homeassistant/components/demo/mailbox.py Co-authored-by: Marc Mueller <30130371+cdce8p@users.noreply.github.com>
core
12
Python
17
mailbox.py
async def async_get_messages(self) -> list[dict[str, Any]]: return sorted( self._messages.values(), key=lambda item: item["info"]["origtime"], # type: ignore[no-any-return] reverse=True, )
efb482fb1dcf29468e50fca98f046d551d6355c7
45
https://github.com/home-assistant/core.git
72
async def async_get_messages(self) -> list[dict[str, Any]]:
12
74
async_get_messages
20
0
1
12
rllib/offline/estimators/tests/test_dr_learning.py
126,931
[RLlib] Fix test_ope flakiness (#27676)
ray
9
Python
19
test_dr_learning.py
def test_dr_expert_policy_mixed_data(self): print("Test DoublyRobust on expert policy on mixed dataset") check_estimate( estimator_cls=DoublyRobust, gamma=self.gamma, q_model_config=self.q_model_config, policy=self.expert_policy, batch=self.mixed_batch, mean_ret=self.expert_reward, std_ret=self.expert_std, seed=SEED, )
4607e788c1277f9396d7f45ea112b2d551383499
56
https://github.com/ray-project/ray.git
128
def test_dr_expert_policy_mixed_data(self): print("Test DoublyRobust on expert policy on mixed dataset") check_estimate( estimator_cls=DoublyRobust, gamma=self.gamma, q_model_config=self.q_mo
18
82
test_dr_expert_policy_mixed_data
29
1
4
9
erpnext/e_commerce/shopping_cart/cart.py
65,808
style: format code with black
erpnext
10
Python
26
cart.py
def get_shipping_addresses(party=None): if not party: party = get_party() addresses = get_address_docs(party=party) return [ {"name": address.name, "title": address.address_title, "display": address.display} for address in addresses if address.address_type == "Shipping" ] @frappe.whitelist()
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist()
56
https://github.com/frappe/erpnext.git
19
def get_shipping_addresses(party=None): if not party: party = get_party() addresses = get_address_docs(party=party)
12
105
get_shipping_addresses
85
0
1
13
pandas/tests/tools/test_to_datetime.py
172,096
PDEP0004: implementation (#49024) * :wastebasket: deprecate infer_datetime_format, make strict * :rotating_light: add warning about dayfirst * :white_check_mark: add/update tests * :rotating_light: add warning if format cant be guessed * :goal_net: catch warnings * :memo: update docs * :memo: add example of reading csv file with mixed formats * :wastebasket: removed now outdated tests / clean inputs * :memo: clarify whatsnew and user-guide * :art: * guess %Y-%m format * Detect format from first non-na, but also exclude now and today * :white_check_mark: fixup tests based on now and today parsing * fixup after merge * fixup after merge * fixup test * remove outdated doctest * xfail test based on issue 49767 * wip * add back examples of formats which can be guessed * start fixing up * fixups from reviews * lint * put tests back * shorten diff * add example of string which cannot be guessed * add deprecated directive, construct expected explicitly, explicit UserWarning, reword row-wise and column-wise * remove redundant example * restore newline * double backticks around False, explicitly raise UserWarning * reword warning * test both dayfirst True and False * postmerge fixup * unimportant typo to restart CI Co-authored-by: MarcoGorelli <>
pandas
11
Python
57
test_to_datetime.py
def test_parsers_timestring(self, date_str, exp_def): # must be the same as dateutil result exp_now = parse(date_str) result1, _ = parsing.parse_time_string(date_str) with tm.assert_produces_warning(UserWarning, match="Could not infer format"): result2 = to_datetime(date_str) result3 = to_datetime([date_str]) result4 = Timestamp(date_str) result5 = DatetimeIndex([date_str])[0] # parse time string return time string based on default date # others are not, and can't be changed because it is used in # time series plot assert result1 == exp_def assert result2 == exp_now assert result3 == exp_now assert result4 == exp_now assert result5 == exp_now
1d5f05c33c613508727ee7b971ad56723d474446
88
https://github.com/pandas-dev/pandas.git
204
def test_parsers_timestring(self, date_str, exp_def): # must be the same as dateutil result exp_now = parse(date_str) result1, _ = parsing.parse_time_string(date_str) with tm.assert_produces_warning(UserWarning, match="Could not infer format"): result2 = to_datetime(date_str) result3 = to_datetime([date_str]) result4 = Timestamp(date_str) result5 = DatetimeIndex([date_str])[0] # parse time string return time string based on default date # others are not, and can't be changed because it is used in # time series plot assert result1 == exp_def assert result2 == exp_now
21
145
test_parsers_timestring
29
0
1
15
keras/dtensor/layers_test.py
270,583
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
13
Python
26
layers_test.py
def setUp(self): super().setUp() backend.enable_tf_random_generator() tf_utils.set_random_seed(1337) global_ids = test_util.create_device_ids_array((2, 2)) local_device_ids = np.ravel(global_ids).tolist() mesh_dict = { "CPU": dtensor.Mesh( ["X", "Y"], global_ids, local_device_ids, test_util.create_device_list((2, 2), "CPU"), ) } self.mesh = self.configTestMesh(mesh_dict)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
91
https://github.com/keras-team/keras.git
166
def setUp(self): super().
20
149
setUp
75
0
2
12
src/pip/_internal/models/installation_report.py
174,577
install report: add version field Also, affirm the experimental status of the feature.
pip
13
Python
64
installation_report.py
def to_dict(self) -> Dict[str, Any]: return { "version": "0", "pip_version": __version__, "install": { canonicalize_name(ireq.metadata["Name"]): self._install_req_to_dict( ireq ) for ireq in self._install_requirements }, # https://peps.python.org/pep-0508/#environment-markers # TODO: currently, the resolver uses the default environment to evaluate # environment markers, so that is what we report here. In the future, it # should also take into account options such as --python-version or # --platform, perhaps under the form of an environment_override field? # https://github.com/pypa/pip/issues/11198 "environment": default_environment(), }
1fbfdc44233486299db4d4364cf8cc8ef98ceacb
56
https://github.com/pypa/pip.git
273
def to_dict(self) -> Dict[str, Any]:
12
99
to_dict
84
0
8
22
src/sentry/api/fields/actor.py
88,065
ref(hybrid-cloud): Add user services. Start tagging some model tests as stable (#40614) Notifications uses new hybrid cloud APIUser Co-authored-by: Mike Ihbe <mike.ihbe@sentry.io> Co-authored-by: Zachary Collins <zachary.collins@sentry.io> Co-authored-by: Zach Collins <recursive.cookie.jar@gmail.com>
sentry
16
Python
59
actor.py
def to_internal_value(self, data): if not data: return None try: actor = ActorTuple.from_actor_identifier(data) except Exception: raise serializers.ValidationError( "Could not parse actor. Format should be `type:id` where type is `team` or `user`." ) try: obj: APIUser | Team = actor.resolve() except (Team.DoesNotExist, User.DoesNotExist): raise serializers.ValidationError(f"{actor.type.__name__} does not exist") if actor.type == Team: if obj.organization != self.context["organization"]: raise serializers.ValidationError("Team is not a member of this organization") elif actor.type == User: if not OrganizationMember.objects.filter( organization=self.context["organization"], user_id=obj.id ).exists(): raise serializers.ValidationError("User is not a member of this organization") return actor
b38f59d9f6d9eedd7ce0606805df7c072addb000
135
https://github.com/getsentry/sentry.git
298
def to_internal_value(self, data): if not data: return None try: actor = Act
25
233
to_internal_value
32
0
3
31
erpnext/patches/v8_7/sync_india_custom_fields.py
68,999
fix: remove HR/Payroll patches
erpnext
12
Python
28
sync_india_custom_fields.py
def execute(): company = frappe.get_all("Company", filters={"country": "India"}) if not company: return frappe.reload_doc("accounts", "doctype", "tax_category") for doctype in ["Sales Invoice", "Delivery Note", "Purchase Invoice"]: frappe.db.sql( , doctype, ) make_custom_fields() frappe.db.sql( ) frappe.db.sql( )
930e557fc6e6bdd515984e2f66ab5cea29101bae
126
https://github.com/frappe/erpnext.git
17
def execute(): company = frappe.get_all("Company", filters={"country": "India"}) if not company:
10
141
execute
104
0
1
25
tests/rest/client/test_sync.py
248,164
Implement changes to MSC2285 (hidden read receipts) (#12168) * Changes hidden read receipts to be a separate receipt type (instead of a field on `m.read`). * Updates the `/receipts` endpoint to accept `m.fully_read`.
synapse
11
Python
69
test_sync.py
def test_knock_room_state(self) -> None: # Knock on a room channel = self.make_request( "POST", f"/_matrix/client/r0/knock/{self.room_id}", b"{}", self.knocker_tok, ) self.assertEqual(200, channel.code, channel.result) # We expect to see the knock event in the stripped room state later self.expected_room_state[EventTypes.Member] = { "content": {"membership": "knock", "displayname": "knocker"}, "state_key": "@knocker:test", } # Check that /sync includes stripped state from the room channel = self.make_request( "GET", self.url % self.next_batch, access_token=self.knocker_tok, ) self.assertEqual(channel.code, 200, channel.json_body) # Extract the stripped room state events from /sync knock_entry = channel.json_body["rooms"]["knock"] room_state_events = knock_entry[self.room_id]["knock_state"]["events"] # Validate that the knock membership event came last self.assertEqual(room_state_events[-1]["type"], EventTypes.Member) # Validate the stripped room state events self.check_knock_room_state_against_room_state( room_state_events, self.expected_room_state )
116a4c8340b729ffde43be33df24d417384cb28b
157
https://github.com/matrix-org/synapse.git
354
def test_knock_room_state(self) -> None: # Knock on a room channel = self.make_request( "POST", f"/_matrix/client/r0/knock/{self.room_id}", b"{}", self.knocker_tok, ) self.assertEqual(200, channel.code, channel.result) # We expect to see the knock event in the stripped room state later self.expected_room_state[EventTypes.Member] = { "content": {"membership": "knock", "displayname": "knocker"}, "state_key": "@knocker:te
19
271
test_knock_room_state
41
0
1
15
tests/sentry/snuba/metrics/test_query.py
85,939
feat(metrics): Make metrics layer accept MRI directly [TET-321] (#39003) The metrics layer entrypoint which is the `MetricsQuery` object used to accept public names. As public names is not the naming contract we guarantee not to change, this PR allows `MetricQuery` object to directly accept MRI as that is the naming contract we guarantee
sentry
16
Python
36
test_query.py
def test_validate_distribution_functions_in_orderby(): # Validate no exception is raised when all orderBy fields are presented the select metric_field_1 = MetricField(op="avg", metric_mri=TransactionMRI.DURATION.value) metric_field_2 = MetricField(op="p50", metric_mri=TransactionMRI.DURATION.value) metrics_query_dict = ( MetricsQueryBuilder() .with_select([metric_field_1, metric_field_2]) .with_orderby( [ OrderBy(field=metric_field_1, direction=Direction.ASC), OrderBy(field=metric_field_2, direction=Direction.ASC), ] ) .to_metrics_query_dict() ) MetricsQuery(**metrics_query_dict)
04077133ca6e56647aca948e5ac21d3260b81f3f
93
https://github.com/getsentry/sentry.git
145
def test_validate_distribution_functions_in_orderby(): # Validate no exception is raised when all orderBy fields are presented the select metric_field_1 = MetricField(op="avg", metric_mri=TransactionMRI.DURATION.value) metric_field_2 = MetricField(op="p50", metric_mri=TransactionMRI.DURATION.value) metrics_query_dict = ( MetricsQueryBuilder() .with_select([metri
20
148
test_validate_distribution_functions_in_orderby
102
0
3
18
lib/ansible/modules/git.py
266,557
Bypass fragile git ssh wrapper (#73404) git module now uses env vars exclusively - updated docs to clarify usage - now env vars append instead of overwrite to allow existing custom setups to keep working fixes #38104, #64673, #64674 - added note for hostkeychecking more securely fixes #69846 - keep script cause old versions still choke on env - env var cannot hold more than 'command' for older versions - all ssh_opts in one place
ansible
15
Python
83
git.py
def write_ssh_wrapper(module): try: # make sure we have full permission to the module_dir, which # may not be the case if we're sudo'ing to a non-root user if os.access(module.tmpdir, os.W_OK | os.R_OK | os.X_OK): fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/') else: raise OSError except (IOError, OSError): fd, wrapper_path = tempfile.mkstemp() # use existing git_ssh/ssh_command, fallback to 'ssh' template = b( % os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ssh'))) # write it with os.fdopen(fd, 'w+b') as fh: fh.write(template) # set execute st = os.stat(wrapper_path) os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) module.debug('Wrote temp git ssh wrapper (%s): %s' % (wrapper_path, template)) # ensure we cleanup after ourselves module.add_cleanup_file(path=wrapper_path) return wrapper_path
b493c590bcee9b64e8ae02c17d4fde2331e0598b
154
https://github.com/ansible/ansible.git
208
def write_ssh_wrapper(module): try: # make sure we have full permission to the module_dir, which # may not be the case if we're sudo'ing to a non-root user if os.access(module.tmpdir, os.W_OK | os.R_OK | os.X_OK): fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/') else: raise OSError except (IOError, OSError): fd, wrapper_path = tempfile.mkstemp() # use existing git_ssh/ssh_command, fallback to 'ssh' template = b( % os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ss
30
265
write_ssh_wrapper
120
0
1
15
sympy/integrals/tests/test_integrals.py
198,654
fix(integrals): fix degeneracy checking in heurisch Previously heurisch used solve with a single equation rather than a list containing that equation i.e. solve(eq) rather than solve([eq]). This takes different codepaths in solve and the [eq] codepath is more robust. This commit changes heurisch to use [eq] and also changes the Piecewise handling routine to produce deterministic output when there are multiple degenerate cases to handle.
sympy
19
Python
82
test_integrals.py
def test_issue_23718(): f = 1/(b*cos(x) + a*sin(x)) Fpos = (-log(-a/b + tan(x/2) - sqrt(a**2 + b**2)/b)/sqrt(a**2 + b**2) +log(-a/b + tan(x/2) + sqrt(a**2 + b**2)/b)/sqrt(a**2 + b**2)) F = Piecewise( # XXX: The zoo case here is for a=b=0 so it should just be zoo or maybe # it doesn't really need to be included at all given that the original # integrand is really undefined in that case anyway. (zoo*(-log(tan(x/2) - 1) + log(tan(x/2) + 1)), Eq(a, 0) & Eq(b, 0)), (log(tan(x/2))/a, Eq(b, 0)), (-I/(-I*b*sin(x) + b*cos(x)), Eq(a, -I*b)), (I/(I*b*sin(x) + b*cos(x)), Eq(a, I*b)), (Fpos, True), ) assert integrate(f, x) == F ap, bp = symbols('a, b', positive=True) rep = {a: ap, b: bp} assert integrate(f.subs(rep), x) == Fpos.subs(rep)
790c4cef5e61644bbb6c467db1b902a8c482ee4b
298
https://github.com/sympy/sympy.git
321
def test_issue_23718(): f = 1/(b*cos(x) + a*sin(x)) Fpos = (-log(-a/b + tan(x/2) - sqrt(a**2 + b**2)/b)/sqrt(a**2 + b**2) +log(-a/b + tan(x/2) + sqrt(a**2 + b**2)/b)/sqrt(a**2 + b**2)) F = Piecewise( # XXX: The zoo case here is for a=b=0 so it should just be zoo or maybe # it doesn't really need to be included at all given that the original # integrand is really undefined in that case anyway. (zoo*(-log(tan(x/2) - 1) + log(tan(x/2) + 1)), Eq(a, 0) & Eq(b, 0)), (log(tan(x/2))/a, Eq(b, 0)), (-I/(-I*b*sin(
23
457
test_issue_23718
145
0
8
32
rllib/algorithms/pg/pg.py
135,811
[RLlib] Move all config validation logic into AlgorithmConfig classes. (#29854)
ray
16
Python
91
pg.py
def validate(self) -> None: # Call super's validation method. super().validate() # Check for mismatches between `train_batch_size` and # `rollout_fragment_length` (if not "auto").. # Note: Only check this if `train_batch_size` > 0 (DDPPO sets this # to -1 to auto-calculate the actual batch size later). if ( self.rollout_fragment_length != "auto" and not self.in_evaluation and self.train_batch_size > 0 ): min_batch_size = ( max(self.num_rollout_workers, 1) * self.num_envs_per_worker * self.rollout_fragment_length ) batch_size = min_batch_size while batch_size < self.train_batch_size: batch_size += min_batch_size if ( batch_size - self.train_batch_size > 0.1 * self.train_batch_size or batch_size - min_batch_size - self.train_batch_size > (0.1 * self.train_batch_size) ): suggested_rollout_fragment_length = self.train_batch_size // ( self.num_envs_per_worker * (self.num_rollout_workers or 1) ) raise ValueError( f"Your desired `train_batch_size` ({self.train_batch_size}) or a " "value 10% off of that cannot be achieved with your other " f"settings (num_rollout_workers={self.num_rollout_workers}; " f"num_envs_per_worker={self.num_envs_per_worker}; " f"rollout_fragment_length={self.rollout_fragment_length})! " "Try setting `rollout_fragment_length` to 'auto' OR " f"{suggested_rollout_fragment_length}." )
2ed09c54459cc3f74e2dab13406018698559856c
136
https://github.com/ray-project/ray.git
616
def validate(self) -> None: # Call super's validation method. super().validate() # Check for mismatches between `train_batch_size` and # `rollout_fragment_length` (if not "auto").. # Note: Only check t
13
251
validate
37
0
1
31
keras/engine/training_generator_test.py
271,688
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
11
Python
25
training_generator_test.py
def test_evaluate_generator_method(self): model = test_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2 ) model.compile( loss="mse", optimizer=rmsprop.RMSprop(1e-3), metrics=["mae", metrics_module.CategoricalAccuracy()], run_eagerly=test_utils.should_run_eagerly(), ) model.evaluate_generator( custom_generator_threads(), steps=5, max_queue_size=10, workers=2, verbose=1, use_multiprocessing=True, ) model.evaluate_generator( custom_generator(), steps=5, max_queue_size=10, use_multiprocessing=False, ) model.evaluate_generator( custom_generator(), steps=5, max_queue_size=10, use_multiprocessing=False, workers=0, )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
138
https://github.com/keras-team/keras.git
326
def test_evaluate_generator_method(self): model = test_utils.get_small_mlp( num_hidden=3, num_classes=4, input_dim=2 ) model.compile( loss="mse", optimizer=rmsprop.RMSprop(1e-3), metrics=["mae", metrics_module.CategoricalAccuracy()], run_eagerly=test_utils.should_run_eagerly(), ) model.evaluate_generator( custom_generator_threads(), steps=5, max_queue_size=10, workers=2, verbose=1, use_multiprocessing=True, )
26
200
test_evaluate_generator_method
91
0
1
19
sympy/printing/tests/test_pycode.py
196,442
printing: ArrayExpr support Better support for numpy-style arrays in `TensorflowPrinter` and `NumPyPrinter`. Printing methods are now collected in the `ArrayPrinter` class to avoid code duplications/maintainance errors. Printing for `ZeroArray` and `OneArray` has been added. `ArrayDiagonal` printing now also works for multiple diagonals and diagonals spanning more than two indices. `ArrayContractiong` printing now also works when its base is not a `ArrayTensorProduct`.
sympy
11
Python
37
test_pycode.py
def test_array_printer(): A = ArraySymbol('A', (4,4,6,6,6)) I = IndexedBase('I') prntr = NumPyPrinter() assert prntr.doprint(ZeroArray(5)) == 'numpy.zeros((5,))' assert prntr.doprint(OneArray(5)) == 'numpy.ones((5,))' assert prntr.doprint(ArrayContraction(A, [2,3])) == 'numpy.einsum("abccd->abd", A)' assert prntr.doprint(I) == 'I' assert prntr.doprint(ArrayDiagonal(A, [2,3,4])) == 'numpy.einsum("abccc->abc", A)' assert prntr.doprint(ArrayDiagonal(A, [0,1], [2,3])) == 'numpy.einsum("aabbc->cab", A)' assert prntr.doprint(ArrayContraction(A, [2], [3])) == 'numpy.einsum("abcde->abe", A)' prntr = TensorflowPrinter() assert prntr.doprint(ZeroArray(5)) == 'tensorflow.zeros((5,))' assert prntr.doprint(OneArray(5)) == 'tensorflow.ones((5,))' assert prntr.doprint(ArrayContraction(A, [2,3])) == 'tensorflow.linalg.einsum("abccd->abd", A)' assert prntr.doprint(I) == 'I' assert prntr.doprint(ArrayDiagonal(A, [2,3,4])) == 'tensorflow.linalg.einsum("abccc->abc", A)' assert prntr.doprint(ArrayDiagonal(A, [0,1], [2,3])) == 'tensorflow.linalg.einsum("aabbc->cab", A)' assert prntr.doprint(ArrayContraction(A, [2], [3])) == 'tensorflow.linalg.einsum("abcde->abe", A)'
8fe2c879fe862d9ab6547130e4ff65010eecb549
268
https://github.com/sympy/sympy.git
144
def test_array_printer(): A = ArraySymbol('A', (4,4,6,6,6)) I = IndexedBase('I') prntr = NumPyPrinter() assert prntr.doprint(ZeroArray(5)) == 'numpy
13
427
test_array_printer
21
0
4
6
keras/engine/data_adapter.py
271,122
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
10
Python
17
data_adapter.py
def _is_list_of_scalars(inp): if isinstance(inp, (float, int, str, bytes, bytearray)): return True if isinstance(inp, (list, tuple)) and inp: return ListsOfScalarsDataAdapter._is_list_of_scalars(inp[0]) return False
84afc5193d38057e2e2badf9c889ea87d80d8fbf
51
https://github.com/keras-team/keras.git
63
def _is_list_of_scalars(inp): if isinstance(inp, (float,
11
73
_is_list_of_scalars
42
0
1
10
tests/integration_tests/test_torchscript.py
8,778
Fix TorchText version in tokenizers ahead of torch 1.13.0 upgrade (#2838) * fix torchtext version in tokenizers ahead of torch 1.13.0 upgrade * add truncation test to torchscript * check version before adding hf tokenizer to triton test * revert triton in case the changes affected tests? * cleanup
ludwig
13
Python
36
test_torchscript.py
def test_torchscript_e2e_text_hf_tokenizer_truncated_sequence(tmpdir, csv_filename): data_csv_path = os.path.join(tmpdir, csv_filename) input_features = [text_feature(encoder={"vocab_size": 3, "type": "bert"}, preprocessing={"max_sequence_length": 3})] output_features = [ text_feature(decoder={"vocab_size": 3}), ] backend = LocalTestBackend() config = {"input_features": input_features, "output_features": output_features, TRAINER: {"epochs": 2}} training_data_csv_path = generate_data(input_features, output_features, data_csv_path) validate_torchscript_outputs(tmpdir, config, backend, training_data_csv_path)
51e763580a130801e4af64221614777761d8b364
104
https://github.com/ludwig-ai/ludwig.git
72
def test_torchscript_e2e_text_hf_tokenizer_truncated_sequence(tmpdir, csv_filename): data_csv_path = os.path.join(tmpdir, csv_filename) input_features = [text_feature(encoder={"vocab_size": 3, "type": "bert"}, preprocessing={"max_sequence_length": 3})] output_features = [ text_feature(decoder={"vocab_size": 3}), ] backend = LocalTestBackend() config = {"input_features": input_features, "output_features": output_features, TRAINER: {"epochs": 2}} training_data_csv_path = generate_data(input_features, output_features, d
20
169
test_torchscript_e2e_text_hf_tokenizer_truncated_sequence
24
0
1
16
keras/callbacks_test.py
270,083
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
10
Python
23
callbacks_test.py
def test_default_callbacks_no_warning(self): # Test that without the callback no warning is raised model = sequential.Sequential() model.add(keras.layers.Dense(1)) model.compile( "sgd", loss="mse", run_eagerly=test_utils.should_run_eagerly() ) warning_messages = []
84afc5193d38057e2e2badf9c889ea87d80d8fbf
119
https://github.com/keras-team/keras.git
76
def test_default_callbacks_no_warning(self): # Test that without the callback no warning is raised model = seq
15
81
test_default_callbacks_no_warning
37
0
1
16
corporate/tests/test_stripe.py
83,907
typing: Access url via key "Location" instead of attribute "url". This is a part of #18777. Signed-off-by: Zixuan James Li <359101898@qq.com>
zulip
9
Python
21
test_stripe.py
def test_redirect_for_billing_home(self) -> None: user = self.example_user("iago") self.login_user(user) response = self.client_get("/billing/") self.assertEqual(response.status_code, 302) self.assertEqual("/upgrade/", response["Location"]) user.realm.plan_type = Realm.PLAN_TYPE_STANDARD_FREE user.realm.save() response = self.client_get("/billing/") self.assertEqual(response.status_code, 200) user.realm.plan_type = Realm.PLAN_TYPE_LIMITED user.realm.save() Customer.objects.create(realm=user.realm, stripe_customer_id="cus_123") response = self.client_get("/billing/") self.assertEqual(response.status_code, 302) self.assertEqual("/upgrade/", response["Location"])
c34ac1fcd428b469e85bcd3070938e4f59e60b18
145
https://github.com/zulip/zulip.git
141
def test_redirect_for_billing_home(self) -> None: user = self.example_user("iago") self.login_u
19
245
test_redirect_for_billing_home
8
0
1
2
homeassistant/components/amcrest/camera.py
310,599
Migrate amcrest integration to new async API (#56294)
core
9
Python
8
camera.py
async def _async_get_motion_recording(self) -> bool: return await self._api.async_is_record_on_motion_detection()
7781e308cd7b28c67b6cf339f9b115c7190456fe
16
https://github.com/home-assistant/core.git
14
async def _async_get_motion_recording(self) -> bool: return await self._api.async_is_record_on_motion_detection()
5
28
_async_get_motion_recording
17
1
1
2
pandas/tests/io/parser/dtypes/test_dtypes_basic.py
164,083
TST: Remove unused fixtures (#45692) * TST: Remove unused fixtures * Undo a removed fixture * Add back other fixtures * Undo a file * Try undoing this? * Revert "Try undoing this?" This reverts commit 0e56cb04f5e8cb1f7b2ac4c5e6191485bb2fe1ab.
pandas
8
Python
16
test_dtypes_basic.py
def test_decimal_and_exponential(python_parser_only, numeric_decimal, thousands): # GH#31920 decimal_number_check(python_parser_only, numeric_decimal, thousands) @pytest.mark.parametrize("thousands", ["_", None]) @pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
f46df091df3afea25a273f491d1f6b2c7d20b32c
@pytest.mark.parametrize("thousands", ["_", None]) @pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
17
https://github.com/pandas-dev/pandas.git
20
def test_decimal_and_exponential(python_parser_only, numeric_decimal, thousands):
8
84
test_decimal_and_exponential
18
0
1
7
tests/builtin_server/tests.py
201,908
Refs #33476 -- Reformatted code with Black.
django
10
Python
15
tests.py
def test_file_wrapper_uses_sendfile(self): env = {"SERVER_PROTOCOL": "HTTP/1.0"} handler = FileWrapperHandler(None, BytesIO(), BytesIO(), env) handler.run(wsgi_app_file_wrapper) self.assertTrue(handler._used_sendfile) self.assertEqual(handler.stdout.getvalue(), b"") self.assertEqual(handler.stderr.getvalue(), b"")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
72
https://github.com/django/django.git
59
def test_file_wrapper_uses_sendfile(self): env = {"SERVER_PROTOCOL": "HTTP/1.0"} ha
14
119
test_file_wrapper_uses_sendfile
189
0
4
19
PyInstaller/building/api.py
264,011
building: delay merging of reference path and name in DEPENDENCY TOC entry Within MERGE, do not combine the reference path and target file name into a single string and store it as the destination name (the first TOC element). Instead, store the target file name as destination name (the first TOC element) and the reference path into the source name (the second TOC element, which is otherwise left unused for DEPENDENCY TOC entries). Have the CArchive writer perform the final merge, before writing the entry to the PKG file. This ensures that the target name remains unchanged within the TOC, making it subject of de-duplication codepaths and duplication checks. Previously, an entry for DEPENDENCY may end up duplicating another entry (e.g., EXTENSION) at run-time, due to target name containing the reference path prefix. We can also get rid of DEPENDENCY-specific handling in `checkCache` (which returns without any processing if `fnm` contains a colon); this crutch was needed because `PKG.assemble` incorrectly handled DEPENDENCY entries and unnecessarily tried running them through `checkCache`. So we rework that part of `PKG.assemble` to process DEPENDENCY entries as part of general entry handling. At this point, this becomes necessary, because even if we kept the hack in `checkCache`, there is no colon in the `fnm` anymore, so the check would fail, leading to error...
pyinstaller
15
Python
130
api.py
def _process_toc(self, toc, path): # NOTE: unfortunately, these need to keep two separate lists. See the comment in `_merge_dependencies` on why # this is so. toc_keep = [] toc_refs = [] for i, tpl in enumerate(toc): if not tpl[1] in self._dependencies: logger.debug("Adding dependency %s located in %s", tpl[1], path) self._dependencies[tpl[1]] = path # Add entry to list of kept TOC entries toc_keep.append(tpl) else: dep_path = self._get_relative_path(path, self._dependencies[tpl[1]]) # Ignore references that point to the origin package. This can happen if the same resource is listed # multiple times in TOCs (e.g., once as binary and once as data). if dep_path.endswith(path): logger.debug( "Ignoring self-reference of %s for %s, located in %s - duplicated TOC entry?", tpl[1], path, dep_path ) # The entry is a duplicate, and should be ignored (i.e., do not add it to either of output TOCs). continue logger.debug("Referencing %s to be a dependency for %s, located in %s", tpl[1], path, dep_path) # Create new DEPENDENCY entry; under destination path (first element), we store the original destination # path, while source path contains the relative reference path. toc_refs.append((tpl[0], dep_path, "DEPENDENCY")) return toc_keep, toc_refs # TODO: use pathlib.Path.relative_to() instead.
8bd9c6726280aa0094c5e83ffcf31a0dbc7a0336
147
https://github.com/pyinstaller/pyinstaller.git
565
def _process_toc(self, toc, path): # NOTE: unfortunately, these need to keep two separate lists. See the comment in `_merge_dependencies` on why # this is so. toc_keep = [] toc_refs = [] for i, tpl in enumerate(toc): if not tpl[1] in self._dependencies: logger.debug("Adding dependency %s located in %s", tpl[1], path) self._dependencies[tpl[1]] = path # Add entry to list of kept TOC entries toc_keep.append(tpl) else: dep_path = self._get_relative_path(path, self._dependencies[tpl[1]]) # Ignore references that point to the origin package. This can happen if the same resource is listed # multiple times in TOCs (e.g., once as binary and once as data). if dep_path.endswith(path): logger.debug( "Ignoring self-reference of %s for %s, located in %s - duplicated TOC entry?", tpl[1], path, dep_path ) # The entry is a duplicate, and should be ignored (i.e., do not add it to either of output TOCs). continue logger.debug("Referencing %s to be a dependency for %s, located in %s", tpl[1], path, dep_path) # Create new DEPENDENCY entry; under destination path (first element), we store the original destination # path, while source path contains the relative reference path. toc_refs.append((tpl[0], dep_path, "DEPENDENCY")) return toc_keep, toc_refs # TODO: use pathlib.Path.relative_to() instead.
16
236
_process_toc
22
0
3
7
erpnext/hr/doctype/exit_interview/exit_interview.py
66,109
style: format code with black
erpnext
12
Python
20
exit_interview.py
def get_interviews(interviews): import json if isinstance(interviews, str): interviews = json.loads(interviews) if not len(interviews): frappe.throw(_("Atleast one interview has to be selected.")) return interviews
494bd9ef78313436f0424b918f200dab8fc7c20b
41
https://github.com/frappe/erpnext.git
15
def get_interviews(interviews): import json if isinstance(interviews, str): interviews = json.loads(interviews) if not len(interviews): frappe.throw(_("Atleast one interview has to be
10
70
get_interviews
78
0
2
12
pandas/tests/frame/indexing/test_indexing.py
163,444
DEPR: inconsistent series[i:j] slicing with Int64Index GH#45162 (#45324)
pandas
11
Python
60
test_indexing.py
def test_iloc_row_slice_view(self, using_array_manager): df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2)) original = df.copy() # verify slice is view # setting it makes it raise/warn subset = df.iloc[slice(4, 8)] assert np.shares_memory(df[2], subset[2]) msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame" with pytest.raises(com.SettingWithCopyError, match=msg): subset.loc[:, 2] = 0.0 exp_col = original[2].copy() # TODO(ArrayManager) verify it is expected that the original didn't change if not using_array_manager: exp_col._values[4:8] = 0.0 tm.assert_series_equal(df[2], exp_col)
51675d0839480ba7ada44cc93ba8a8df94d33de0
135
https://github.com/pandas-dev/pandas.git
183
def test_iloc_row_slice_view(self, using_array_manager): df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2)) original = df.copy() # verify slice is view # setting it makes it raise/warn subset = df.iloc[slice(4, 8)] assert np.shares_memory(df[2], subset[2]) msg = r"\nA value is tryi
27
202
test_iloc_row_slice_view
26
0
4
9
django/db/models/sql/compiler.py
205,821
Refs #33476 -- Reformatted code with Black.
django
10
Python
23
compiler.py
def _expr_refs_base_model(cls, expr, base_model): if isinstance(expr, Query): return expr.model == base_model if not hasattr(expr, "get_source_expressions"): return False return any( cls._expr_refs_base_model(source_expr, base_model) for source_expr in expr.get_source_expressions() )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
54
https://github.com/django/django.git
97
def _expr_refs_base_model(cls, expr, base_model): if isinstance(expr, Query):
11
83
_expr_refs_base_model
13
0
1
7
erpnext/templates/pages/partners.py
68,076
style: format code with black
erpnext
9
Python
13
partners.py
def get_context(context): partners = frappe.db.sql( , as_dict=True, ) return {"partners": partners, "title": page_title}
494bd9ef78313436f0424b918f200dab8fc7c20b
30
https://github.com/frappe/erpnext.git
7
def get_context(context): partners = frappe.db.sql( , as_dict=True, ) return {"partners": partners, "title": p
8
51
get_context
7
0
1
2
asv_bench/benchmarks/benchmarks.py
155,242
TEST-#5261: port indexing, reindex and fillna benchmarks from pandas github (#5244) Signed-off-by: arunjose696 <arunjose696@gmail.com> Co-authored-by: Anatoly Myachev <anatoliimyachev@mail.com>
modin
10
Python
7
benchmarks.py
def time_getitem_slice(self, shape, index, index_structure): execute(self.data[: self.index_to_query])
7c009c747caa90554607e30b9ac2bd1b190b8c7d
23
https://github.com/modin-project/modin.git
13
def time_getitem_slice(self, shape, index, index_structure): execute(self.data[: self.index_to_query])
8
34
time_getitem_slice
19
0
2
4
python3.10.4/Lib/encodings/punycode.py
217,166
add python 3.10.4 for windows
XX-Net
11
Python
19
punycode.py
def decode(self, input, final=False): if self.errors not in ('strict', 'replace', 'ignore'): raise UnicodeError("Unsupported error handling "+self.errors) return punycode_decode(input, self.errors)
8198943edd73a363c266633e1aa5b2a9e9c9f526
43
https://github.com/XX-net/XX-Net.git
43
def decode(self, input, final=False): if self.errors not in ('strict', 'replace', 'ignore'): raise UnicodeError("Unsupported error handlin
7
70
decode
69
0
1
2
src/prefect/settings.py
53,811
Fix display of settings in api reference
prefect
11
Python
58
settings.py
def unreduce_settings(json): return Settings.parse_raw(json) # Dynamically create a pydantic model that includes all of our settings SettingsFieldsMixin = create_model( "SettingsFieldsMixin", __base__=BaseSettings, **{setting.name: (setting.type, setting.field) for setting in SETTINGS.values()}, ) # Defining a class after this that inherits the dynamic class rather than setting # __base__ to the following class ensures that mkdocstrings properly generates # reference documentation. It does support module-level variables, even if they have # __doc__ set.
60e203e0eef82f49853fca133ed457f600044e8e
13
https://github.com/PrefectHQ/prefect.git
77
def unreduce_settings(json): return Settings.parse_raw(json) # Dynamically create a pydantic model that includes all of our settings SettingsFieldsMixin = create_model( "SettingsFieldsMixin", __base__=BaseSettings, **{setting.name: (setting.type, setting.field) f
14
83
unreduce_settings
6
0
1
3
apps/settings/models.py
188,451
Fix rbac (#7713) * fix: token 系统用户增加 protocol * fix: 修复清除orphan session时同时清除对应的 session_task * perf: 修改 connection token api * fix: 修复无法获取系统角色绑定的问题 * perf: 增加 db terminal 及 magnus 组件 * perf: 修改 migrations * fix: 修复AUTHENTICATION_BACKENDS相关的逻辑 * fix: 修改判断backend认证逻辑 * fix: 修复资产账号查看密码跳过mfa * fix: 修复用户组授权权限错误 * feat: 支持COS对象存储 * feat: 升级依赖 jms_storage==0.0.42 * fix: 修复 koko api 问题 * feat: 修改存储翻译信息 * perf: 修改 ticket 权限 * fix: 修复获取资产授权系统用户 get_queryset * perf: 抽取 ticket * perf: 修改 cmd filter 的权限 * fix: 修改 ticket perm * fix: 修复oidc依赖问题 Co-authored-by: Eric <xplzv@126.com> Co-authored-by: ibuler <ibuler@qq.com> Co-authored-by: 小冯 <xiaofeng@xiaofengdeMacBook-Pro.local> Co-authored-by: feng626 <1304903146@qq.com>
jumpserver
8
Python
6
models.py
def refresh_setting(self): setattr(settings, self.name, self.cleaned_value) self.refresh_keycloak_to_openid_if_need()
03afa4f9743fb8e6892be62a44b19dc48e0ed7f0
22
https://github.com/jumpserver/jumpserver.git
19
def refresh_setting(self): setattr(settings, self.name, self.cleaned_value) self.refresh_keycloak_to_openid
7
35
refresh_setting
13
0
1
11
src/prefect/client.py
55,904
Block capabilities (PrefectHQ/orion#1898) * Add capabilities to BlockSchemas * Remove type field from BlockSchemas * Create postgres migration, bump API version
prefect
11
Python
13
client.py
async def read_block_schemas(self) -> List[schemas.core.BlockSchema]: response = await self._client.post(f"/block_schemas/filter", json={}) return pydantic.parse_obj_as(List[schemas.core.BlockSchema], response.json())
168483e9cf038a3629f880f838b5aa9291a48411
52
https://github.com/PrefectHQ/prefect.git
34
async def read_block_schemas(self) -> List[schemas.core.BlockSchema]: response = await self._client.post(f"/block_sc
12
84
read_block_schemas
7
0
1
4
wagtail/snippets/views/snippets.py
77,938
Add RevisionsCompare view in snippets
wagtail
11
Python
7
snippets.py
def history_label(self): return _("{model_name} history").format( model_name=self.model._meta.verbose_name )
e0a604e227efbaed6b072d17132e7ca806ef4948
23
https://github.com/wagtail/wagtail.git
31
def history_label(self): return _("{model_name} history").format( model_name=self.model._meta.ver
8
39
history_label
11
0
1
2
doc/source/serve/doc_code/deploying_serve_example.py
127,014
[Serve][Doc] Update the doc code to use new api (#27689) Co-authored-by: Archit Kulkarni <architkulkarni@users.noreply.github.com>
ray
7
Python
11
deploying_serve_example.py
def hello(request): return "hello world" serve.run(hello.bind()) # __deploy_in_k8s_end__ subprocess.check_output(["ray", "stop", "--force"])
786c7f45cfb3495527894f81097712eb76f77e63
7
https://github.com/ray-project/ray.git
10
def hello(request): return "hello world" serve.run(hello.bind
7
55
hello
55
0
1
17
python/ray/train/tests/test_predictor.py
126,321
[AIR - Datasets] Hide tensor extension from UDFs. (#27019) We previously added automatic tensor extension casting on Datasets transformation outputs to allow the user to not have to worry about tensor column casting; however, this current state creates several issues: 1. Not all tensors are supported, which means that we’ll need to have an opaque object dtype (i.e. ndarray of ndarray pointers) fallback for the Pandas-only case. Known unsupported tensor use cases: a. Heterogeneous-shaped (i.e. ragged) tensors b. Struct arrays 2. UDFs will expect a NumPy column and won’t know what to do with our TensorArray type. E.g., torchvision transforms don’t respect the array protocol (which they should), and instead only support Torch tensors and NumPy ndarrays; passing a TensorArray column or a TensorArrayElement (a single item in the TensorArray column) fails. Implicit casting with object dtype fallback on UDF outputs can make the input type to downstream UDFs nondeterministic, where the user won’t know if they’ll get a TensorArray column or an object dtype column. 3. The tensor extension cast fallback warning spams the logs. This PR: 1. Adds automatic casting of tensor extension columns to NumPy ndarray columns for Datasets UDF inputs, meaning the UDFs will never have to see tensor extensions and that the UDF input column types will be consistent and deterministic; this fixes both (2) and (3). 2. No longer implicitly falls back to an opaque object dtype when TensorArray casting fails (e.g. for ragged tensors), and instead raises an error; this fixes (4) but removes our support for (1). 3. Adds a global enable_tensor_extension_casting config flag, which is True by default, that controls whether we perform this automatic casting. Turning off the implicit casting provides a path for (1), where the tensor extension can be avoided if working with ragged tensors in Pandas land. Turning off this flag also allows the user to explicitly control their tensor extension casting, if they want to work with it in their UDFs in order to reap the benefits of less data copies, more efficient slicing, stronger column typing, etc.
ray
12
Python
44
test_predictor.py
def test_predict(convert_to_pandas_mock, convert_from_pandas_mock): input = pd.DataFrame({"x": [1, 2, 3]}) expected_output = input * 4.0 convert_to_pandas_mock.return_value = input convert_from_pandas_mock.return_value = expected_output checkpoint = Checkpoint.from_dict( {"factor": 2.0, PREPROCESSOR_KEY: DummyPreprocessor()} ) predictor = DummyPredictor.from_checkpoint(checkpoint) actual_output = predictor.predict(input) pd.testing.assert_frame_equal(actual_output, expected_output) # Ensure the proper conversion functions are called. convert_to_pandas_mock.assert_called_once_with(input, False) convert_from_pandas_mock.assert_called_once() pd.testing.assert_frame_equal( convert_from_pandas_mock.call_args[0][0], expected_output ) assert convert_from_pandas_mock.call_args[1]["type"] == DataType.PANDAS
df124d0ad58ea7189e88f9fe42c1ee377ade9c8d
133
https://github.com/ray-project/ray.git
113
def test_predict(convert_to_pandas_mock, convert_from_pandas_mock): input = pd.DataFrame({"x": [1, 2, 3]})
25
204
test_predict
14
0
1
8
tests/rest/media/v1/test_filepath.py
247,366
Add type hints to `tests/rest` (#12146) * Add type hints to `tests/rest` * newsfile * change import from `SigningKey`
synapse
10
Python
14
test_filepath.py
def test_remote_media_thumbnail_legacy(self) -> None: self.assertEqual( self.filepaths.remote_media_thumbnail_rel_legacy( "example.com", "GerZNDnDZVjsOtardLuwfIBg", 800, 600, "image/jpeg" ), "remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg", )
7e91107be1a4287873266e588a3c5b415279f4c8
32
https://github.com/matrix-org/synapse.git
83
def test_remote_media_thumbnail_legacy(self) -> None: self.assertEqual(
5
56
test_remote_media_thumbnail_legacy
32
0
4
7
airbyte-integrations/connectors/source-hubspot/source_hubspot/api.py
3,378
Source Hubspot: Some incremental CRM objects and engagements (#8887)
airbyte
14
Python
24
api.py
def _update_state(self, latest_cursor): if latest_cursor: new_state = max(latest_cursor, self._state) if self._state else latest_cursor if new_state != self._state: logger.info(f"Advancing bookmark for {self.name} stream from {self._state} to {latest_cursor}") self._state = new_state self._start_date = self._state
25fb7e7fd744f3852ebe8152db5514513f8a2c9a
52
https://github.com/airbytehq/airbyte.git
105
def _update_state(self, latest_cursor): if latest_cursor: new_state = max(latest_cursor, self._state) if self._state else latest_cursor if new_state != self._state: logger.info(f"Advancing bookmark for {self.name} stream from {self._state} to {latest_cursor}") self._state = new_state self._start_date = self._state
10
99
_update_state
7
0
1
3
python/ray/tune/utils/resource_updater.py
146,811
[Tune] Move resource updater out of trial executor (#23178) * simplify trial executor * update test * fix: proper resource update before initialization * add test to BUILD * add doc for resource updater
ray
7
Python
7
resource_updater.py
def get_num_cpus(self) -> int: self.update_avail_resources() return self._avail_resources.cpu
cc1728120f7d49b0016d190971bc8056d3245c5d
18
https://github.com/ray-project/ray.git
20
def get_num_cpus(self) -> int: self.update_avail_resources() return self._avail_resources.
6
30
get_num_cpus
58
0
1
21
tests/sentry/api/endpoints/test_user_notification_fine_tuning.py
100,143
ref(tests): Remove `get_valid_response()` (#34822)
sentry
12
Python
42
test_user_notification_fine_tuning.py
def test_permissions(self): new_user = self.create_user(email="b@example.com") new_org = self.create_organization(name="New Org") new_team = self.create_team(name="New Team", organization=new_org, members=[new_user]) new_project = self.create_project( organization=new_org, teams=[new_team], name="New Project" ) data = {str(new_org.id): 0} self.get_error_response("me", "reports", status_code=403, **data) assert not UserOption.objects.filter( user=self.user, organization=new_org, key="reports" ).exists() data = {str(new_project.id): 1} self.get_error_response("me", "alerts", status_code=403, **data) value = NotificationSetting.objects.get_settings( ExternalProviders.EMAIL, NotificationSettingTypes.ISSUE_ALERTS, user=self.user, project=new_project, ) assert value == NotificationSettingOptionValues.DEFAULT
096b5511e244eecd8799b2a0324655207ce8985e
178
https://github.com/getsentry/sentry.git
221
def test_permissions(self): new_user = self.create_user(email="b@example.com") new_org = self.create_organization(name="New Org") new_team = self.create_team(name="New Team", organization=new_org, members=[new_user]) new_project = self.create_project( organization=new_org, teams=[new_team], name="New Project" ) data = {str(new_org.id): 0} self.get_error_response("me", "reports", status_code=403, **data) assert not UserOption.objects.filter( user=s
36
283
test_permissions
386
0
1
83
ivy_tests/test_core/test_container.py
213,790
renamed dev_str arg to dev for all methods.
ivy
17
Python
79
test_container.py
def test_container_structural_diff(dev, call): # all different keys or shapes container_0 = Container({'a': ivy.array([1], dev=dev), 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}) container_1 = Container({'a': ivy.array([[4]], dev=dev), 'b': {'c': ivy.array([[[5]]], dev=dev), 'e': ivy.array([3], dev=dev)}}) container_diff = ivy.Container.structural_diff(container_0, container_1) assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1])) assert np.equal(ivy.to_numpy(container_diff.a.diff_1), np.array([[4]])) assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2])) assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([[[5]]])) assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3])) assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([3])) container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only') assert container_diff_diff_only.to_dict() == container_diff.to_dict() container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only') assert container_diff_same_only.to_dict() == {} # some different shapes container_0 = Container({'a': ivy.array([1], dev=dev), 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}) container_1 = Container({'a': ivy.array([4], dev=dev), 'b': {'c': ivy.array([[5]], dev=dev), 'd': ivy.array([6], dev=dev)}}) container_diff = ivy.Container.structural_diff(container_0, container_1) assert np.equal(ivy.to_numpy(container_diff.a), np.array([1])) assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2])) assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5])) assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3])) container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only') assert 'a' not in container_diff_diff_only assert 'b' in container_diff_diff_only assert 'c' in container_diff_diff_only['b'] assert 'd' not in container_diff_diff_only['b'] container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only') assert 'a' in container_diff_same_only assert 'b' in container_diff_same_only assert 'c' not in container_diff_same_only['b'] assert 'd' in container_diff_same_only['b'] # all different keys container_0 = Container({'a': ivy.array([1], dev=dev), 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}) container_1 = Container({'e': ivy.array([4], dev=dev), 'f': {'g': ivy.array([5], dev=dev), 'h': ivy.array([6], dev=dev)}}) container_diff = ivy.Container.structural_diff(container_0, container_1) assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1])) assert np.equal(ivy.to_numpy(container_diff.b.diff_0.c), np.array([2])) assert np.equal(ivy.to_numpy(container_diff.b.diff_0.d), np.array([3])) assert np.equal(ivy.to_numpy(container_diff.e.diff_1), np.array([4])) assert np.equal(ivy.to_numpy(container_diff.f.diff_1.g), np.array([5])) assert np.equal(ivy.to_numpy(container_diff.f.diff_1.h), np.array([6])) container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only') assert container_diff_diff_only.to_dict() == container_diff.to_dict() container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only') assert container_diff_same_only.to_dict() == {} # some different keys container_0 = Container({'a': ivy.array([1], dev=dev), 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}) container_1 = Container({'a': ivy.array([4], dev=dev), 'b': {'c': ivy.array([5], dev=dev), 'e': ivy.array([6], dev=dev)}}) container_diff = ivy.Container.structural_diff(container_0, container_1) assert np.equal(ivy.to_numpy(container_diff.a), np.array([1])) assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2])) assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3])) assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([6])) container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only') assert 'a' not in container_diff_diff_only assert 'b' in container_diff_diff_only assert 'c' not in container_diff_diff_only['b'] assert 'd' in container_diff_diff_only['b'] assert 'e' in container_diff_diff_only['b'] container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only') assert 'a' in container_diff_same_only assert 'b' in container_diff_same_only assert 'c' in container_diff_same_only['b'] assert 'd' not in container_diff_same_only['b'] assert 'e' not in container_diff_same_only['b'] # all same container_0 = Container({'a': ivy.array([1], dev=dev), 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}) container_1 = Container({'a': ivy.array([4], dev=dev), 'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([6], dev=dev)}}) container_diff = ivy.Container.structural_diff(container_0, container_1) assert np.equal(ivy.to_numpy(container_diff.a), np.array([1])) assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2])) assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3])) container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only') assert container_diff_diff_only.to_dict() == {} container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only') assert container_diff_same_only.to_dict() == container_diff.to_dict()
d743336b1f3654cd0315f380f43eed4116997c1d
1,556
https://github.com/unifyai/ivy.git
896
def test_container_structural_diff(dev, call): # all different keys or shapes container_0 = Container({'a': ivy.array([1], dev=dev), 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}) container_1 = Container({'a': ivy.array([[4]], dev=dev), 'b': {'c': ivy.array([[[5]]], dev=dev), 'e': ivy.array([3], dev=dev)}}) container_diff = ivy.Container.structural_diff(container_0, container_1) assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1])) assert np.equal(ivy.to_numpy(container_diff.a.diff_1), np.array([[4]])) assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2])) assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([[[5]]])) assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3])) assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([3])) container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only') assert container_diff_diff_only.to_dict() == container_diff.to_dict() container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only') assert container_diff_same_only.to_dict() == {} # some different shapes container_0 = Container({'a': ivy.array([1], dev=dev), 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}) container_1 = Container({'a': ivy.array([4], dev=dev), 'b': {'c': ivy.array([[5]], dev=dev), 'd': ivy.array([6], dev=dev)}}) container_diff = ivy.Container.structural_diff(container_0, container_1) assert np.equal(ivy.to_numpy(container_diff.a), np.array([1])) assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2])) assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5])) assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3])) container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only') assert 'a' not in container_diff_diff_only assert 'b' in container_diff_diff_only assert 'c' in container_diff_diff_only['b'] assert 'd' not in container_diff_diff_only['b'] container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only') assert 'a' in container_diff_same_only assert 'b' in container_diff_same_only assert 'c' not in container_diff_same_only['b'] assert 'd' in container_diff_same_only['b'] # all different keys container_0 = Container({'a': ivy.array([1], dev=dev), 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}) container_1 = Container({'e': ivy.array([4], dev=dev), 'f': {'g': ivy.array([5], dev=dev), 'h': ivy.array([6], dev=dev)}}) container_diff = ivy.Container.structural_diff(container_0, container_1) assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1])) assert np.equal(ivy.to_numpy(container_diff.b.diff_0.c), np.array([2])) assert np.equal(ivy.to_numpy(container_diff.b.diff_0.d), np.array([3])) assert np.equal(ivy.to_numpy(container_diff.e.diff_1), np.array([4])) assert np.equal(ivy.to_numpy(container_diff.f.diff_1.g), np.array([5])) assert np.equal(ivy.to_numpy(container_diff.f.diff_1.h), np.array([6])) container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only') assert container_diff_diff_only.to_dict() == container_diff.to_dict() container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only') assert container_diff_same_only.to_dict() == {} # some different keys container_0 = Container({'a': ivy.array([1], dev=dev),
27
2,474
test_container_structural_diff
27
0
1
8
tests/test_api_validate.py
187,151
plugin.api.validate: implement ValidationError - Implement `ValidationError` - Inherit from `ValueError` to preserve backwards compatiblity - Allow collecting multiple errors (AnySchema) - Keep an error stack of parent `ValidationError`s or other exceptions - Format error stack when converting error to string - Raise `ValidationError` instead of `ValueError` - Add error contexts where it makes sense - Add schema names to error instances - Add and update tests
streamlink
11
Python
24
test_api_validate.py
def test_parse_json(self): assert validate(parse_json(), '{"a": ["b", true, false, null, 1, 2.3]}') == {"a": ["b", True, False, None, 1, 2.3]} with self.assertRaises(ValueError) as cm: validate(parse_json(), "invalid") assert_validationerror(cm.exception, )
3d44da082b3ba202b9d0557bfd8ce747a1d7960c
60
https://github.com/streamlink/streamlink.git
58
def test_parse_json(self): assert validate(parse_json(), '{"a": ["b", true, false, null, 1, 2.3]}') == {"a": ["b", True, False, None, 1, 2.3]} with self.assertRaises(ValueError) as cm: validate(parse_json(),
9
99
test_parse_json
129
0
3
13
python/ccxt/aax.py
15,128
add fetchdeposits
ccxt
10
Python
84
aax.py
def fetch_deposits(self, code=None, since=None, limit=None, params={}): self.load_markets() request = { # status Not required - Deposit status, "1: pending,2: confirmed, 3:failed" # currency: Not required - String Currency # startTime Not required Integer Default: 90 days from current timestamp. # endTime Not required Integer Default: present timestamp. } currency = None if code is not None: currency = self.currency(code) request['currency'] = currency['id'] if since is not None: request['startTime'] = since # default 90 days response = self.privateGetAccountDeposits(self.extend(request, params)) # { "code": 1, # "data": [{ # "currency": "USDT", # "network": "USDT", # "quantity": "19.000000000000", # "txHash": "75eb2e5f037b025c535664c49a0f7cc8f601dae218a5f4fe82290ff652c43f3d", # "address": "1GkB7Taf7uttcguKEb2DmmyRTnihskJ9Le", # "status": "2", # "createdTime": "2021-01-08T19:45:01.354Z", # "updatedTime": "2021-01-08T20:03:05.000Z", # }] # "message": "success", # "ts": 1573561743499 # } deposits = self.safe_value(response, 'data', []) return self.parse_transactions(deposits, code, since, limit)
9c7c3aab121a5e6be89197156432970625688a70
110
https://github.com/ccxt/ccxt.git
451
def fetch_deposits(self, code=None, since=None, limit=None, params={}): self.load_markets() request = { # status Not required - Deposit status, "1: pending,2: confirmed, 3:failed"
15
191
fetch_deposits
84
0
6
26
plugins/dbms/db2/fingerprint.py
123,567
Fixing DeprecationWarning (logger.warn)
sqlmap
15
Python
44
fingerprint.py
def checkDbms(self): if not conf.extensiveFp and Backend.isDbmsWithin(DB2_ALIASES): setDbms(DBMS.DB2) return True logMsg = "testing %s" % DBMS.DB2 logger.info(logMsg) result = inject.checkBooleanExpression("[RANDNUM]=(SELECT [RANDNUM] FROM SYSIBM.SYSDUMMY1)") if result: logMsg = "confirming %s" % DBMS.DB2 logger.info(logMsg) result = inject.checkBooleanExpression("JULIAN_DAY(CURRENT DATE) IS NOT NULL") if not result: warnMsg = "the back-end DBMS is not %s" % DBMS.DB2 logger.warning(warnMsg) return False version = self._versionCheck() if version: Backend.setVersion(version) setDbms("%s %s" % (DBMS.DB2, Backend.getVersion())) else: setDbms(DBMS.DB2) return True else: warnMsg = "the back-end DBMS is not %s" % DBMS.DB2 logger.warning(warnMsg) return False
df4293473d2fb6e887e31522cab5aff95e201581
149
https://github.com/sqlmapproject/sqlmap.git
358
def checkDbms(self): if not conf.extensiveFp and Backend.isDbmsWithin(DB2_ALIASES): setDbms(DBMS.DB2) return True logMsg = "testing %s" % DBMS.DB2 logger.info(logMsg) result = inject.checkBooleanExpression("[RANDNUM]=(SELECT [RANDNUM] FROM SYSIBM.SYSDUMMY1)") if result:
22
258
checkDbms
42
0
1
12
netbox/dcim/tests/test_natural_ordering.py
266,185
Clean up tests
netbox
10
Python
30
test_natural_ordering.py
def setUpTestData(cls): site = Site.objects.create(name='Test Site 1', slug='test-site-1') manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1') devicetype = DeviceType.objects.create( manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1' ) devicerole = DeviceRole.objects.create( name='Test Device Role 1', slug='test-device-role-1', color='ff0000' ) Device.objects.create( device_type=devicetype, device_role=devicerole, name='Test Device 1', site=site )
d4a231585ac9a25d9739552d8c9e433dbf9398af
99
https://github.com/netbox-community/netbox.git
130
def setUpTestData(cls): site = Site.objects.create(name='Test Site 1', slug='test-site-1') manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1') devicetype = DeviceType.objects.create( manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1' ) devicerole = DeviceRole.objects.create( name='Test Devic
19
166
setUpTestData
78
0
1
26
tests/ludwig/benchmarking/test_resource_usage_tracker.py
7,396
adding hardware usage and software packages tracker (#2195) * adding hardware usage and software packages tracker * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed stdout redirection to null during import * reverting * updated `tracker.py` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * improved docstring style * removing unnecessary `torch.cuda.synchronize()` call * using the `multiprocessing` library instead of the `@processify` wrapper to spawn the `Tracker` monitor process * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * style changes * adding s3fs to `requirements.txt` * name change to `resource_usage_tracker.py` * added test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tag name validation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * flake8 updates * fixed test file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update test file * fixing empty utilization (due to very short experiment) * added # noqa E402 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
ludwig
12
Python
59
test_resource_usage_tracker.py
def test_resource_usage_tracker(tmpdir): train_df = pd.DataFrame(np.random.normal(0, 1, size=(100, 3)), columns=["input_1", "input_2", "output_1"]) eval_df = pd.DataFrame(np.random.normal(0, 1, size=(20, 3)), columns=["input_1", "input_2", "output_1"]) config = { "input_features": [{"name": "input_1", "type": "number"}, {"name": "input_2", "type": "number"}], "output_features": [{"name": "output_1", "type": "number"}], "combiner": {"type": "concat", "output_size": 14}, TRAINER: {"epochs": 1}, } model = LudwigModel(config=config, backend="local") with ResourceUsageTracker(tag="train", output_dir=tmpdir, logging_interval=0.05, num_examples=len(train_df)): model.train( dataset=train_df, output_directory=tmpdir, skip_save_training_description=True, skip_save_training_statistics=True, skip_save_model=True, skip_save_progress=True, skip_save_log=True, skip_save_processed_input=True, ) with ResourceUsageTracker(tag="evaluate", output_dir=tmpdir, logging_interval=0.05, num_examples=len(eval_df)): model.evaluate(dataset=eval_df) assert os.path.exists(os.path.join(tmpdir, "train_resource_usage_metrics.json")) assert os.path.exists(os.path.join(tmpdir, "evaluate_resource_usage_metrics.json")) shutil.rmtree(tmpdir)
ae8de108e14111afef08a5e9c429bb19e368c0b3
286
https://github.com/ludwig-ai/ludwig.git
244
def test_resource_usage_tracker(tmpdir): train_df = pd.DataFrame(np.random.normal(0, 1, size=(100, 3)), columns=["input_1", "input_2", "output_1"]) eval_df = pd.DataFrame(np.random.normal(0, 1, size=(20, 3)), columns=["input_1", "input_2", "output_1"]) config = { "input_features": [{"name": "input_1", "type": "number"}, {"name": "input_2", "type": "number"}], "output_features": [{"name": "output_1", "typ
38
464
test_resource_usage_tracker
83
0
11
81
code/default/smart_router/local/ip_region.py
219,351
Roll back 4.6.8 from upgrade
XX-Net
13
Python
61
ip_region.py
def generate_db(self): keeprange = ( '0.0.0.0/8', # 本地网络 '10.0.0.0/8', # 私有网络 '100.64.0.0/10', # 地址共享(运营商 NAT) '127.0.0.0/8', # 环回地址 '169.254.0.0/16', # 链路本地 '172.16.0.0/12', # 私有网络 '192.0.0.0/24', # 保留地址(IANA) '192.0.2.0/24', # TEST-NET-1 '192.88.99.0/24', # 6to4 中继 '192.168.0.0/16', # 私有网络 '198.18.0.0/15', # 网络基准测试 '198.51.100.0/24', # TEST-NET-2 '203.0.113.0/24', # TEST-NET-3 # 连续地址直到 IP 结束,特殊处理 # '224.0.0.0/4', #组播地址(D类) # '240.0.0.0/4', #保留地址(E类) ) keeplist = [] for iprange in keeprange: ip, mask = iprange.split('/') keeplist.append((utils.ip_string_to_num(ip), 32 - int(mask))) mask_dict = dict((str(2 ** i), i) for i in range(8, 25))
42dde73cebb1d524b6adfcde69fd947ed9b2440b
537
https://github.com/XX-net/XX-Net.git
487
def generate_db(self): keeprange = ( '0.0.0.0/8', # 本地网络 '10.0.0.0/8', # 私有网络 '100.64.0.0/10', # 地址共享(运营商 NAT) '127.0.0.0/8', # 环回地址 '169.254.0.0/16', # 链路本地 '172.16.0.0/12', # 私有网络 '192.0.
17
182
generate_db
12
0
2
4
python/ray/runtime_env.py
146,321
[runtime env] Deletes the proto cache on RuntimeEnv (#22944) Mainly the following things: - This PR deletes the proto cache on RuntimeEnv, ensuring that the user's modification of RuntimeEnv can take effect in the Proto message. - validate whole runtime env when serialize runtime_env. - overload method `__setitem__` to parse and validate field when it has to modify.
ray
9
Python
11
runtime_env.py
def py_container_image(self) -> Optional[str]: if not self.has_py_container(): return None return self["container"].get("image", "")
0c5440ee724a9f2b0fd94b7e6055c5be71968a84
32
https://github.com/ray-project/ray.git
36
def py_container_image(self) -> Optional[str]: if not self.has_py_container(): return None return self["container"].get("image", "")
6
56
py_container_image
99
0
2
28
sklearn/ensemble/tests/test_bagging.py
260,854
MAINT rename and deprecate `base_estimator` in favor of `estimator` in ensemble classes (#23819) Co-authored-by: Adrian Trujillo Duron <adrian.td96@gmail.com> Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
scikit-learn
13
Python
80
test_bagging.py
def test_oob_score_classification(): # Check that oob prediction is a good estimation of the generalization # error. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split( iris.data, iris.target, random_state=rng ) for estimator in [DecisionTreeClassifier(), SVC()]: clf = BaggingClassifier( estimator=estimator, n_estimators=100, bootstrap=True, oob_score=True, random_state=rng, ).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert abs(test_score - clf.oob_score_) < 0.1 # Test with few estimators warn_msg = ( "Some inputs do not have OOB scores. This probably means too few " "estimators were used to compute any reliable oob estimates." ) with pytest.warns(UserWarning, match=warn_msg): clf = BaggingClassifier( estimator=estimator, n_estimators=1, bootstrap=True, oob_score=True, random_state=rng, ) clf.fit(X_train, y_train)
306608e622bb3fb55095a97405b9ef0f1ad901d9
151
https://github.com/scikit-learn/scikit-learn.git
364
def test_oob_score_classification(): # Check that oob prediction is a good estimation of the generalization # error. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split( iris.data, iris.target, random_state=rng ) for estimator in [DecisionTreeClassifier(), SVC()]: clf = BaggingClassifier( estimator=estimator, n_estimators=100, bootstrap=True, oob_score=True, random_state=rng, ).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert abs(test_score - clf.oob_score_) < 0.1 # Test with few estimators warn_msg = ( "Some inputs do not have OOB scores. This probably means too few " "estimators were used to compute any reliable oob estimates." ) with pytest.
30
227
test_oob_score_classification
106
0
2
40
onnx/backend/test/case/node/if.py
254,806
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fixes Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * remove extra blank line Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotations Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix Operators.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix TestCoverage.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
onnx
12
Python
71
if.py
def export_if() -> None: # Given a bool scalar input cond. # return constant tensor x if cond is True, otherwise return constant tensor y. then_out = onnx.helper.make_tensor_value_info('then_out', onnx.TensorProto.FLOAT, [5]) else_out = onnx.helper.make_tensor_value_info('else_out', onnx.TensorProto.FLOAT, [5]) x = np.array([1, 2, 3, 4, 5]).astype(np.float32) y = np.array([5, 4, 3, 2, 1]).astype(np.float32) then_const_node = onnx.helper.make_node( 'Constant', inputs=[], outputs=['then_out'], value=onnx.numpy_helper.from_array(x) ) else_const_node = onnx.helper.make_node( 'Constant', inputs=[], outputs=['else_out'], value=onnx.numpy_helper.from_array(y) ) then_body = onnx.helper.make_graph( [then_const_node], 'then_body', [], [then_out] ) else_body = onnx.helper.make_graph( [else_const_node], 'else_body', [], [else_out] ) if_node = onnx.helper.make_node( 'If', inputs=['cond'], outputs=['res'], then_branch=then_body, else_branch=else_body ) cond = np.array(1).astype(bool) res = x if cond else y expect(if_node, inputs=[cond], outputs=[res], name='test_if', opset_imports=[onnx.helper.make_opsetid("", 11)])
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
287
https://github.com/onnx/onnx.git
483
def export_if() -> None: # Given a bool scalar input cond. # return constant tensor x if cond is True, otherwise return constant tensor y. then_out = onnx.helper.make_tensor_value_info('then_out', onnx.TensorProto.FLOAT, [5]) else_out = onnx.helper.make_tensor_value_info('else_out', onnx.TensorProto.FLOAT, [5]) x = np.array([1, 2, 3, 4, 5]).astype(np.float32) y = np.array([5, 4, 3, 2, 1]).astyp
35
443
export_if
45
0
1
11
tests/api/common/test_mark_tasks.py
43,987
Use `DagRun.run_id` instead of `execution_date` when updating state of TIs(UI & REST API) (#18724) We can now use run_id as well as execution_date to update states of task instances Co-authored-by: Tzu-ping Chung <uranusjr@gmail.com> Co-authored-by: Ash Berlin-Taylor <ash_github@firemirror.com>
airflow
10
Python
37
test_mark_tasks.py
def test_set_running_dag_run_to_success(self): date = self.execution_dates[0] dr = self._create_test_dag_run(State.RUNNING, date) middle_time = timezone.utcnow() self._set_default_task_instance_states(dr) altered = set_dag_run_state_to_success(dag=self.dag1, run_id=dr.run_id, commit=True) # All except the SUCCESS task should be altered. expected = self._get_num_tasks_with_starting_state(State.SUCCESS, inclusion=False) assert len(altered) == expected self._verify_dag_run_state(self.dag1, date, State.SUCCESS) self._verify_task_instance_states(self.dag1, date, State.SUCCESS) self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
2b4bf7fe67fc656ceb7bdaad36453b7a5b83ef04
123
https://github.com/apache/airflow.git
121
def test_set_running_dag_run_to_success(self): date = self.execution_dates[0]
26
185
test_set_running_dag_run_to_success
339
1
14
50
networkx/algorithms/smallworld.py
176,777
Use isort with pre-commit to enforce import guidelines (#5659) * Add isort to pre-commit * Run isort on all python files (except __init__.py ones)
networkx
17
Python
216
smallworld.py
def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None): import numpy as np from networkx.utils import cumulative_distribution, discrete_sequence local_conn = nx.connectivity.local_edge_connectivity if len(G) < 4: raise nx.NetworkXError("Graph has less than four nodes.") # Instead of choosing uniformly at random from a generated edge list, # this algorithm chooses nonuniformly from the set of nodes with # probability weighted by degree. G = G.copy() keys, degrees = zip(*G.degree()) # keys, degree cdf = cumulative_distribution(degrees) # cdf of degree nnodes = len(G) nedges = nx.number_of_edges(G) if D is None: D = np.zeros((nnodes, nnodes)) un = np.arange(1, nnodes) um = np.arange(nnodes - 1, 0, -1) u = np.append((0,), np.where(un < um, un, um)) for v in range(int(np.ceil(nnodes / 2))): D[nnodes - v - 1, :] = np.append(u[v + 1 :], u[: v + 1]) D[v, :] = D[nnodes - v - 1, :][::-1] niter = niter * nedges # maximal number of rewiring attempts per 'niter' max_attempts = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2)) for _ in range(niter): n = 0 while n < max_attempts: # pick two random edges without creating edge list # choose source node indices from discrete distribution (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed) if ai == ci: continue # same source, skip a = keys[ai] # convert index to label c = keys[ci] # choose target uniformly from neighbors b = seed.choice(list(G.neighbors(a))) d = seed.choice(list(G.neighbors(c))) bi = keys.index(b) di = keys.index(d) if b in [a, c, d] or d in [a, b, c]: continue # all vertices should be different # don't create parallel edges if (d not in G[a]) and (b not in G[c]): if D[ai, bi] + D[ci, di] >= D[ai, ci] + D[bi, di]: # only swap if we get closer to the diagonal G.add_edge(a, d) G.add_edge(c, b) G.remove_edge(a, b) G.remove_edge(c, d) # Check if the graph is still connected if connectivity and local_conn(G, a, b) == 0: # Not connected, revert the swap G.remove_edge(a, d) G.remove_edge(c, b) G.add_edge(a, b) G.add_edge(c, d) else: break n += 1 return G @py_random_state(3) @not_implemented_for("directed") @not_implemented_for("multigraph")
5c0b11afb4c0882a070d522ef3fa41482ba935d3
@py_random_state(3) @not_implemented_for("directed") @not_implemented_for("multigraph")
516
https://github.com/networkx/networkx.git
976
def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None): import numpy as np from networkx.utils import cumulative_distribution, discrete_sequence local_conn = nx.connectivity.local_edge_connectivity if len(G) < 4: raise nx.NetworkXError("Graph has less than four nodes.") # Instead of choosing uniformly at random from a generated edge list, # this algorithm chooses nonuniformly from the set of nodes with # probability weighted by degree. G = G.copy() keys, degrees = zip(*G.degree()) # keys, degree cdf = cumulative_distribution(degrees) # cdf of degree nnodes = len(G) nedges = nx.number_of_edges(G) if D is None: D = np.zeros((nnodes, nnodes)) un = np.arange(1, nnodes) um = np.arange(nnodes - 1, 0, -1) u = np.append((0,), np.where(un < um, un, um)) for v in range(int(np.ceil(nnodes / 2))): D[nnodes - v - 1, :] = np.append(u[v + 1 :], u[: v + 1]) D[v, :] = D[nnodes - v - 1, :][::-1] niter = niter * nedges # maximal number of rewiring attempts per 'niter' max_attempts = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2)) for _ in range(niter): n = 0 while n < max_attempts: # pick two random edges without creating edge list # choose source node indices from discrete distribution (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed) if ai == ci: continue # same source, skip a = keys[ai] # convert index to label c = keys[ci] # choose target uniformly from neighbors b = seed.choice(list(G.neighbors(a))) d = seed.choice(list(G.neighbors(c))) bi = keys.index(b) di = keys.index(d) if b in [a, c, d] or d in [a, b, c]: continue # all vertices should be different # don't create parallel edges if (d not in G[a]) and (b not in G[c]): if D[ai, bi] + D[ci, di] >= D[ai, ci] + D[bi, di]: # only swap if we get closer to the diagonal
57
829
lattice_reference
69
1
1
16
tests/t5/test_modeling_tf_t5.py
36,213
TF: add beam search tests (#16202)
transformers
13
Python
62
test_modeling_tf_t5.py
def test_beam_search_generate(self): model = TFT5ForConditionalGeneration.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") sentences = ["I really love my", "Translate English to German: the transformers are truly amazing"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids generation_kwargs = { "bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids], "no_repeat_ngram_size": 3, "do_sample": False, "repetition_penalty": 2.2, "num_beams": 4, } output_ids = model.generate(input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = ["Ich liebe es so sehr!", "die Transformatoren sind wirklich erstaunlich"] self.assertListEqual(expected_output_string, output_strings) @require_tf @require_sentencepiece @require_tokenizers
204c54d411c2b4c7f31405203533a51632f46ab1
@require_tf @require_sentencepiece @require_tokenizers
122
https://github.com/huggingface/transformers.git
190
def test_beam_search_generate(self): model = TFT5ForConditionalGeneration.from_pretrained("t5-small"
22
216
test_beam_search_generate
37
0
1
6
tests/cli/test_profile.py
57,678
Refactor tests for clarity
prefect
15
Python
30
test_profile.py
def authorized_cloud(self): # attempts to reach the Cloud 2 workspaces endpoint implies a good connection # to Prefect Cloud as opposed to a hosted Prefect Orion instance with respx.mock: authorized = respx.get( "https://mock-cloud.prefect.io/api/me/workspaces", ).mock(return_value=Response(200, json={})) yield authorized
a0b82ae203029e65ba4dad2a93e545960eaca6ab
36
https://github.com/PrefectHQ/prefect.git
105
def authorized_cloud(self): # attempts to reach the Cloud 2 workspaces endpoint implies a good connection # to Prefect Cloud as opposed to a hosted Prefect Orion instance with respx.mock: authorized = respx.get( "https://mock-cloud.prefect.io/api/me/workspaces", ).mock(return_value=Response(200, json={})) yield authorized
9
64
authorized_cloud
24
0
1
18
test/test_pipeline_yaml.py
257,130
Change YAML version exception into a warning (#2385) * Change exception into warning, add strict_version param, and remove compatibility between schemas * Simplify update_json_schema * Rename unstable into master * Prevent validate_config from changing the config to validate * Fix version validation and add tests * Rename master into ignore * Complete parameter rename Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
haystack
12
Python
21
test_pipeline_yaml.py
def test_load_yaml_missing_version(tmp_path): with open(tmp_path / "tmp_config.yml", "w") as tmp_file: tmp_file.write( ) with pytest.raises(PipelineConfigError, match="Validation failed") as e: Pipeline.load_from_yaml(path=tmp_path / "tmp_config.yml") assert "version" in str(e)
4eec2dc45ee60e8b8780aa4f956aea8ad3624da3
54
https://github.com/deepset-ai/haystack.git
69
def test_load_yaml_missing_version(tmp_path): with open(tmp_path / "tmp_config.yml", "w") as tmp_file: tmp_file.write( ) with pytest.raises(PipelineConfigError, match="Validation failed") as e: Pipeline.load_from_yaml(path=tmp_path / "tmp_config.yml") assert "version" in str(e)
14
103
test_load_yaml_missing_version
66
0
6
26
sympy/tensor/tensor.py
200,587
TensMul._dedupe_indices: rename variable
sympy
13
Python
44
tensor.py
def _dedupe_indices(new, exclude): exclude = set(exclude) dums_new = set(get_dummy_indices(new)) conflicts = dums_new.intersection(exclude) if len(conflicts) == 0: return None exclude.update(dums_new) exclude_for_gen = [(i, None) for i in exclude] gen = _IndexStructure._get_generator_for_dummy_indices(exclude_for_gen) repl = {} for d in conflicts: if -d in repl.keys(): continue newname = gen(d.tensor_index_type) new_d = d.func(newname, *d.args[1:]) repl[d] = new_d repl[-d] = -new_d if len(repl) == 0: return None new_renamed = new._replace_indices(repl) return new_renamed
3e01222efcf2cf445f441eddc71e1c8194cee216
148
https://github.com/sympy/sympy.git
257
def _dedupe_indices(new, exclude): exclude = set(exclude) dums_new = set(get_dummy_indices(new)) conflicts = dums_new.intersection(exclude) if len(confl
25
240
_dedupe_indices
23
0
1
10
tests/cli/test_deployment_preview.py
56,900
Fix path for deployments test files
prefect
13
Python
23
test_deployment_preview.py
def test_preview_works_for_unnamed_deployments(deployments_path): result = invoke_and_assert( [ "deployment", "preview", str(deployments_path / "single_unnamed_deployment.py"), ], expected_output_contains="kind: Job", ) assert "Preview for <unnamed deployment specification>" in result.stdout
d97eb751d3d526bae64b9d9580c75ebc0623121f
35
https://github.com/PrefectHQ/prefect.git
89
def test_preview_works_for_unnamed_deployments(deployments_path): result = invoke_and_assert( [ "deployment", "preview", str(deployments_path / "single_unnamed_deployment.py"), ], expected_
7
64
test_preview_works_for_unnamed_deployments
18
0
2
8
python3.10.4/Lib/bdb.py
221,122
add python 3.10.4 for windows
XX-Net
10
Python
17
bdb.py
def clear_bpbynumber(self, arg): try: bp = self.get_bpbynumber(arg) except ValueError as err: return str(err) bp.deleteMe() self._prune_breaks(bp.file, bp.line) return None
8198943edd73a363c266633e1aa5b2a9e9c9f526
47
https://github.com/XX-net/XX-Net.git
82
def clear_bpbynumber(self, arg): try: bp = self.get_b
12
79
clear_bpbynumber
17
1
1
7
tests/components/picnic/test_services.py
290,579
Add service for adding products to a Picnic order (#67877) * Add Picnic services for searching products and adding products to the cart * Improve the Picnic services implementation and add unit tests * Fix pre-commit check issues * Fix comments and example product name * Remove search service, update add_product service schema * Fix pylint suggestion * Add more tests and removed unused code * Remove code needed for the removed service, clean tests from obvious comments and add type hints * Remove unused import * Remove unnecessary comments and simplify getting the config entry id Co-authored-by: Allen Porter <allen.porter@gmail.com> * Don't use hass.data in tests, make device id mandatory for service * Rewrite all service tests so using lru.cache is not needed * Add test for uncovered line in _product_search() * Require a config entry id as service parameter instead of device id * Use explicit check in get_api_client() and raise HomeAssistantError * Fix HomeAssistantError import, fix services tests * Change HomeAssistantError to ValueError when config entry is not found Co-authored-by: Allen Porter <allen.porter@gmail.com>
core
11
Python
14
test_services.py
def picnic_api_client(): with patch( "homeassistant.components.picnic.create_picnic_client" ) as create_picnic_client_mock: picnic_client_mock = create_picnic_api_client(UNIQUE_ID) create_picnic_client_mock.return_value = picnic_client_mock yield picnic_client_mock @pytest.fixture
a848dc11556624f8ebf2a09aff7192b84ab4f66e
@pytest.fixture
26
https://github.com/home-assistant/core.git
53
def picnic_api_client(): with patch( "homeassistant.components.picnic.create_picnic_client" ) as create_picnic_client_mock: picnic_client_mock = create_picnic_api_client(UNIQUE_ID)
9
59
picnic_api_client
11
0
2
5
docs/examples/introduction/stopwatch.py
184,537
fix for call_later and scroll_to_widget
textual
11
Python
11
stopwatch.py
def action_remove_stopwatch(self) -> None: timers = self.query("#timers Stopwatch") if timers: timers.last().remove()
c891f6b70a0e885d2afe9a02bebb40e4af2864a6
28
https://github.com/Textualize/textual.git
43
def action_remove_stopwatch(self) -> None: timers = self.query("#timers Stopwatch")
6
52
action_remove_stopwatch
21
0
1
13
tests/openbb_terminal/common/behavioural_analysis/test_sentimentinvestor_view.py
285,232
Here we merge all API Refactor related branches (#2236) * Update api.py * Updated forex menu * refactor ycrv command * refactor ycrv command black * refactor ecocal command * Minh changes * Adding space to test pushing * title fix ecocal df * get economic calendar annotation * fix investingcom tests * refactor index command * refactor overview command * give defaults to wsj view function args * rename date args investincom * refacto bigmac command * fix ecocal typo * refactor rtps command * alphavantage gdp * alphavantage gdp per capita * alphavantage cpi * alphavantage tyld * alphavantage inf * refactor macro command * refactor macro command w helpers * refactor treasury command * fix macro on terminal * treasury labels * refactor maturities * update treasury maturities doc strings * refactor get economic calendar finhub * refactor map command api * display map filter choices * route economy api to performance map * route economy api to performance map * display group choices on valuation command * refactor performance and valuation commands * refactor spectrum model and view * add choices to spectrum controller * delete image after view * fix model tests finviz * fix finciz view tests * refactor futures * fix some tests * fix more tests * fix controller test * refactor fred series notes * update fred notes docstring * refacto fred series ids * fix pred and qa when empty datasets * refactor fred * uncomment stuff * refacto get series data * fix some tests * set defaults on args * refactor fred yield curve * black * fix spell and remove ecocal names * fix linting * linting * pylint fix * change dangerous defaults * Working through crypto fixes (#2256) * Working through crypto fixes * Continued adding crypto stuff * Added crypto overview * Added test fixes * Added fixtures * Fixed tests * Fixed charting issue * Removed broken APIs * Final adjustments * Added test fixes * map get groups and get ycrv countries into old api * exposed econdb helper funcs * remove helpers * refactor search indices * linting * refactor arg currency * pylint from currency * Started switching crpyto ascending to ascend * Merging * Portfolio model arguements, params, and docstring * Refactored for etf commands (#2292) * Refactored for etf commands * Fixed tests * Added load command * Fixed menu * Portfolio logic fixes * Added econometrics (#2260) * Added econometrics * Fixed tests * Simplified API * Added test fixes * Added test csv * Allowed examples to be loaded * Fund refactor (#2291) * Fund refactor * Changed fund_name and fund to name * Changed ascending to ascend * Stock menu refactoring for easier API usage (#2194) * Stocks refactoring for easier API usage * Linting * Refactor newly added features * Linting * Fixing tests * Refactor common files used by stocks menu * Fixing flake8 * Fix linting and tests * Linting * Fix flake8 * refactor insider_data * refactor mentions * refactor watchlist * refactor sentiment * refactor sentiment * fix yahoofinance tests * refactor load and candle * refactor get_news and display_news * refactor stocks.ins.act * candle default matplotlib * fix yahoofinance_view tests * fix ark model tests * fix ark view tests * fix business insider model * fix business insider view * refactor csimarket model * fix tests csi market model * update dd controller * fix get suppliers tests * fix dd controller tests * fix finhub tests * fix finviz tests * fix fmp tests * fix marketwatch tests * corrected argument keywords in test_bt_model * corrected argument keywords in test_bt_view * refactor fa controller * refactor marketwatch view * refactor gov controller * fix tests fa av * fix tests elect * fix dcf tests * fix polygon tests * fix fmp tests * fix quiverquant tests * fix yahoofinance fa tests * fix more fa tests * fix insider tests * fix more tests * fix more tests * fix options tests * fix stock gov tests * fix tests test_ba_controller * fix tests for test_finviz_compare_model.py * fixed 2 tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fix final tests * fixed tests * fixed tests * Fix tests * black * forgot to black tests * fixed tests * fixed tests * fixed tests * fixed tests * flakefix * Tests + code : Stocks / Discovery * fix tests * added recorder * fixed tests * fixed tests * black * black * remove unused imports * refactor display raw * sia dicts fix * pylint * linting * remove dangerous default * fix tests * fix beta model test * black * skip screener qa test * change sector path to sectors * update tests readme * fix metric defaults * black * substitute lost ticker * defaults cpic * another round on sia * refactor cramer * reduce default tweets on sentiment * refactor yf hist, corr, volume * arkorders default * refactor income, balance, cashflow * refacto scorr, screener, getfinnhub * refactor stockgrid * ibkr refactor * another round on stockgrid * add dividens end point * refactor discovery endpoints * update docstrings with similar input * refactor messages * refactor ba * refactor regioons * refactor twitter sentiment * refactor hist * refactor regions * give default to timeframe * refactor bunch of defaults and arg names * remove leftover imports * refactor vwap * let tests run * fix tests * fix stock tests * fix stockanalysis tests * flake * MYPY * Made important changes * added fixes * Fixed big issue * Added fixes to tests * fix qa tests * fix tests * fix 1 more test * last stocks failing * fix crypto test Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: colin99d <colin99delahunty@gmail.com> * fix portfolio tests * change period to window * update ca docstrings * refactor get_similar_companies func * Fixed * Update CI * Update CI 2 * Update CI 3 * Update dependencies Co-authored-by: colin99d <colin99delahunty@gmail.com> Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: James Simmons <simmonsj330@gmail.com> Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt>
OpenBBTerminal
10
Python
20
test_sentimentinvestor_view.py
def test_display_trending_empty_df(mocker): view = "openbb_terminal.common.behavioural_analysis.sentimentinvestor_view" # MOCK GET_HISTORICAL mocker.patch( target=f"{view}.sentimentinvestor_model.get_trending", return_value=pd.DataFrame(), ) sentimentinvestor_view.display_trending( start_date=datetime(2021, 12, 21), hour=9, number=10, limit=10, export="", )
9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b
58
https://github.com/OpenBB-finance/OpenBBTerminal.git
87
def test_display_trending_empty_df(mocker): view = "o
16
94
test_display_trending_empty_df
28
0
2
9
src/prefect/agent.py
59,966
Agent: Add limit to control number of concurrent flow runs (#7361) Co-authored-by: Thomas Pedersen <tpe@neogrid.dk> Co-authored-by: Michael Adkins <michael@prefect.io>
prefect
11
Python
23
agent.py
async def start(self): self.started = True self.task_group = anyio.create_task_group() self.limiter = ( anyio.CapacityLimiter(self.limit) if self.limit is not None else None ) self.client = get_client() await self.client.__aenter__() await self.task_group.__aenter__()
045492f4d2205a0029514f5f00ec7560c06059a8
65
https://github.com/PrefectHQ/prefect.git
87
async def start(self): self.started = True self.task_group = anyio.create_task_group() self.limiter = ( anyio.CapacityLimiter(self
12
107
start
32
0
5
9
lib/matplotlib/axis.py
107,773
Refactor handling of tick and ticklabel visiblity in Axis.clear() This is a follow-up to #20826, which makes the exceptions from clearing more explicit.
matplotlib
11
Python
27
axis.py
def _reset_major_tick_kw(self, keep_tick_and_label_visibility=False): backup = {name: value for name, value in self._major_tick_kw.items() if name in ['tick1On', 'tick2On', 'label1On', 'label2On']} self._major_tick_kw.clear() if keep_tick_and_label_visibility: self._major_tick_kw.update(backup) self._major_tick_kw['gridOn'] = ( mpl.rcParams['axes.grid'] and mpl.rcParams['axes.grid.which'] in ('both', 'major'))
2357c92d87d96d519c8470776e76180e71663d0b
87
https://github.com/matplotlib/matplotlib.git
125
def _reset_major_tick_kw(self, keep_tick_and_label_visibility=False): backup = {name: value for name, value in self._major_tick_kw.items() if name in ['tick1On', 'tick2On', 'label1On', 'label2On']} self._major_tick_kw.clear() if keep_tick_and_label_visibility: self._major_tick_kw.update(backup) self._major_tick_kw['gridOn'] = ( mpl.rcParams[
12
150
_reset_major_tick_kw
246
0
1
134
python/ccxt/async_support/bitbns.py
15,728
1.67.89 [ci skip]
ccxt
16
Python
161
bitbns.py
def describe(self): return self.deep_extend(super(bitbns, self).describe(), { 'id': 'bitbns', 'name': 'Bitbns', 'countries': ['IN'], # India 'rateLimit': 1000, 'certified': False, 'pro': False, 'version': 'v2', # new metainfo interface 'has': { 'spot': True, 'margin': None, 'swap': False, 'future': False, 'option': False, 'cancelOrder': True, 'createOrder': True, 'fetchBalance': True, 'fetchDepositAddress': True, 'fetchDeposits': True, 'fetchFundingHistory': False, 'fetchFundingRate': False, 'fetchFundingRateHistory': False, 'fetchFundingRates': False, 'fetchIndexOHLCV': False, 'fetchIsolatedPositions': False, 'fetchLeverage': False, 'fetchMarkets': True, 'fetchMarkOHLCV': False, 'fetchMyTrades': True, 'fetchOHLCV': None, 'fetchOpenOrders': True, 'fetchOrder': True, 'fetchOrderBook': True, 'fetchPositions': False, 'fetchPositionsRisk': False, 'fetchPremiumIndexOHLCV': False, 'fetchStatus': True, 'fetchTicker': 'emulated', 'fetchTickers': True, 'fetchTrades': True, 'fetchWithdrawals': True, 'reduceMargin': False, 'setLeverage': False, 'setPositionMode': False, }, 'timeframes': { }, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/117201933-e7a6e780-adf5-11eb-9d80-98fc2a21c3d6.jpg', 'api': { 'www': 'https://bitbns.com', 'v1': 'https://api.bitbns.com/api/trade/v1', 'v2': 'https://api.bitbns.com/api/trade/v2', }, 'www': 'https://bitbns.com', 'referral': 'https://ref.bitbns.com/1090961', 'doc': [ 'https://bitbns.com/trade/#/api-trading/', ], 'fees': 'https://bitbns.com/fees', }, 'api': { 'www': { 'get': [ 'order/fetchMarkets', 'order/fetchTickers', 'order/fetchOrderbook', 'order/getTickerWithVolume', 'exchangeData/ohlc', # ?coin=${coin_name}&page=${page} 'exchangeData/orderBook', 'exchangeData/tradedetails', ], }, 'v1': { 'get': [ 'platform/status', 'tickers', 'orderbook/sell/{symbol}', 'orderbook/buy/{symbol}', ], 'post': [ 'currentCoinBalance/EVERYTHING', 'getApiUsageStatus/USAGE', 'getOrderSocketToken/USAGE', 'currentCoinBalance/{symbol}', 'orderStatus/{symbol}', 'depositHistory/{symbol}', 'withdrawHistory/{symbol}', 'withdrawHistoryAll/{symbol}', 'depositHistoryAll/{symbol}', 'listOpenOrders/{symbol}', 'listOpenStopOrders/{symbol}', 'getCoinAddress/{symbol}', 'placeSellOrder/{symbol}', 'placeBuyOrder/{symbol}', 'buyStopLoss/{symbol}', 'sellStopLoss/{symbol}', 'placeSellOrder/{symbol}', 'cancelOrder/{symbol}', 'cancelStopLossOrder/{symbol}', 'listExecutedOrders/{symbol}', 'placeMarketOrder/{symbol}', 'placeMarketOrderQnty/{symbol}', ], }, 'v2': { 'post': [ 'orders', 'cancel', 'getordersnew', 'marginOrders', ], }, }, 'fees': { 'trading': { 'feeSide': 'quote', 'tierBased': False, 'percentage': True, 'taker': self.parse_number('0.0025'), 'maker': self.parse_number('0.0025'), }, }, 'exceptions': { 'exact': { '400': BadRequest, # {"msg":"Invalid Request","status":-1,"code":400} '409': BadSymbol, # {"data":"","status":0,"error":"coin name not supplied or not yet supported","code":409} '416': InsufficientFunds, # {"data":"Oops ! Not sufficient currency to sell","status":0,"error":null,"code":416} '417': OrderNotFound, # {"data":[],"status":0,"error":"Nothing to show","code":417} }, 'broad': {}, }, })
4e4e4e5d50f9a10f38d2aac5ea07696b84b365c4
434
https://github.com/ccxt/ccxt.git
2,545
def describe(self): return self.deep_extend(super(bitbns, self).describe(), { 'id': 'bitbns', 'name': 'Bitbns', 'countries': ['IN'], # India 'rateLimit': 1000, 'certified': False, 'pro': False, 'version': 'v2', # new metainfo interface 'has': { 'spot': True, 'margin': None, 'swap': False, 'future': False, 'option': False, 'cancelOrder': True, 'createOrder': True, 'fetchBalance': True, 'fetchDepositAddress': True, 'fetchDeposits': True, 'fetchFundingHistory': False, 'fetchFundingRate': False, 'fetchFundingRateHistory': False, 'fetchFundingRates': False, 'fetchIndexOHLCV': False, 'fetchIsolatedPositions': False, 'fetchLeverage': False, 'fetchMarkets': True, 'fetchMarkOHLCV': False, 'fetchMyTrades': True, 'fetchOHLCV': None, 'fetchOpenOrders': True, 'fetchOrder': True, 'fetchOrderBook': True, 'fetchPositions': False, 'fetchPositionsRisk': False, 'fetchPremiumIndexOHLCV': False, 'fetchStatus': True, 'fetchTicker': 'emulated', 'fetchTickers': True, 'fetchTrades': True, 'fetchWithdrawals': True, 'reduceMargin': False, 'setLeverage': False, 'setPositionMode': False, }, 'timeframes': { }, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/117201933-e7a6e780-adf5-11eb-9d80-98fc2a21c3d6.jpg', 'api': { 'www': 'https://bitbns.com', 'v1': 'https://api.bitbns.com/api/trade/v1', 'v2': 'https://api.bitbns.com/api/trade/v2', }, 'www': 'https://bitbns.com', 'referral': 'https://ref.bitbns.com/1090961', 'doc': [ 'https://bitbns.com/trade/#/api-trading/', ], 'fees': 'https://bitbns.com/fees', }, 'api': { 'www': { 'get': [ 'order/fetchMarkets', 'order/fetchTickers', 'order/fetchOrderbook', 'order/getTickerWit
10
814
describe
52
0
1
5
python3.10.4/Lib/ctypes/test/test_memfunctions.py
222,071
add python 3.10.4 for windows
XX-Net
12
Python
36
test_memfunctions.py
def test_overflow(self): # string_at and wstring_at must use the Python calling # convention (which acquires the GIL and checks the Python # error flag). Provoke an error and catch it; see also issue # #3554: <http://bugs.python.org/issue3554> self.assertRaises((OverflowError, MemoryError, SystemError), lambda: wstring_at(u"foo", sys.maxint - 1)) self.assertRaises((OverflowError, MemoryError, SystemError), lambda: string_at("foo", sys.maxint - 1))
8198943edd73a363c266633e1aa5b2a9e9c9f526
56
https://github.com/XX-net/XX-Net.git
144
def test_overflow(self): # string_at and wstring_at must use the Python calling # convention (which acquires the GIL and checks the Python # error flag). Provoke an error and catch it; see also issue # #3554: <http://b
10
87
test_overflow
15
0
1
6
trainer/craft/data/dataset.py
122,991
add CRAFT training code
EasyOCR
10
Python
13
dataset.py
def resize_to_half(self, ground_truth, interpolation): return cv2.resize( ground_truth, (self.output_size // 2, self.output_size // 2), interpolation=interpolation, )
f50a6a2867b77250cbd375217d0f7f32297891d8
26
https://github.com/JaidedAI/EasyOCR.git
61
def resize_to_half(self, ground_truth, interpolation): return cv2.resize( ground_truth, (self.output_size // 2, self
7
49
resize_to_half
8
0
1
4
tests/sentry/integrations/slack/test_requests.py
91,371
ref: replace self.assertRaises with pytest.raises (#35685) * add flake8 plugin to detect assertRaises * ref: replace self.assertRaises with pytest.raises * non-sed fixes
sentry
10
Python
8
test_requests.py
def test_validate_missing_event_type(self): self.request.data["event"] = {} with pytest.raises(SlackRequestError): self.slack_request.validate()
284e980df0018f8baee659999268bdd4c7d08255
31
https://github.com/getsentry/sentry.git
32
def test_validate_missing_event_type(self): self.request.data["ev
9
55
test_validate_missing_event_type
194
0
8
72
homeassistant/components/sensibo/coordinator.py
312,482
Bugfix temp step list out of range sensibo (#65782)
core
15
Python
153
coordinator.py
async def _async_update_data(self) -> dict[str, dict[str, Any]]: devices = [] try: for dev in await self.client.async_get_devices(): devices.append(dev) except (pysensibo.SensiboError) as error: raise UpdateFailed from error device_data: dict[str, dict[str, Any]] = {} for dev in devices: unique_id = dev["id"] name = dev["room"]["name"] temperature = dev["measurements"].get("temperature", 0.0) humidity = dev["measurements"].get("humidity", 0) ac_states = dev["acState"] target_temperature = ac_states.get("targetTemperature") hvac_mode = ac_states.get("mode") running = ac_states.get("on") fan_mode = ac_states.get("fanLevel") swing_mode = ac_states.get("swing") available = dev["connectionStatus"].get("isAlive", True) capabilities = dev["remoteCapabilities"] hvac_modes = list(capabilities["modes"]) if hvac_modes: hvac_modes.append("off") current_capabilities = capabilities["modes"][ac_states.get("mode")] fan_modes = current_capabilities.get("fanLevels") swing_modes = current_capabilities.get("swing") temperature_unit_key = dev.get("temperatureUnit") or ac_states.get( "temperatureUnit" ) temperatures_list = ( current_capabilities["temperatures"] .get(temperature_unit_key, {}) .get("values", [0, 1]) ) if temperatures_list: temperature_step = temperatures_list[1] - temperatures_list[0] features = list(ac_states) state = hvac_mode if hvac_mode else "off" fw_ver = dev["firmwareVersion"] fw_type = dev["firmwareType"] model = dev["productModel"] calibration_temp = dev["sensorsCalibration"].get("temperature", 0.0) calibration_hum = dev["sensorsCalibration"].get("humidity", 0.0) device_data[unique_id] = { "id": unique_id, "name": name, "ac_states": ac_states, "temp": temperature, "humidity": humidity, "target_temp": target_temperature, "hvac_mode": hvac_mode, "on": running, "fan_mode": fan_mode, "swing_mode": swing_mode, "available": available, "hvac_modes": hvac_modes, "fan_modes": fan_modes, "swing_modes": swing_modes, "temp_unit": temperature_unit_key, "temp_list": temperatures_list, "temp_step": temperature_step, "features": features, "state": state, "fw_ver": fw_ver, "fw_type": fw_type, "model": model, "calibration_temp": calibration_temp, "calibration_hum": calibration_hum, } return device_data
07edbc42a48a4ccedab660ec20fa0e93fe79ad46
454
https://github.com/home-assistant/core.git
1,071
async def _async_update_data(self) -> dict[str, dict[str, Any]]: devices = [] try: for dev in await self.client.async_get_devices(): devices.append(dev) except (pysensibo.SensiboError) as error: raise UpdateFailed from error device_data: dict[str, dict[str, Any]] = {} for dev in devices: unique_id = dev["id"] name = dev["room"]["name"] temperature = dev["measurements"].get("temperature", 0.0)
43
775
_async_update_data
28
0
2
13
label_studio/labels_manager/api.py
177,694
feat: DEV-1926: Add labels api (#2128) * feat: DEV-1926: Add labels api * Update DM to master branch Co-authored-by: hlomzik <hlomzik@gmail.com>
label-studio
12
Python
25
api.py
def post(self, request): serializer = LabelBulkUpdateSerializer(data=request.data) serializer.is_valid(raise_exception=True) project = serializer.validated_data['project'] if project is not None: self.check_object_permissions(self.request, project) updated_count = bulk_update_label( old_label=serializer.validated_data['old_label'], new_label=serializer.validated_data['new_label'], organization=self.request.user.active_organization, project=project, ) return Response({'annotations_updated': updated_count})
03bd7e0238b7c21d6276e0b927a1722ed7c0aedc
95
https://github.com/heartexlabs/label-studio.git
131
def post(self, request): serializer = LabelBulkUpdateSerializer(data=request.data) serializer.is_valid(raise_exception=True) project = serializer.validated_data['project'] if project is not
19
150
post
21
0
1
15
tests/providers/google/cloud/hooks/test_dataplex.py
47,006
Fix new MyPy errors in main (#22884) Those MyPe errors are side effect of some new dependencies.
airflow
11
Python
21
test_dataplex.py
def test_list_tasks(self, mock_client): self.hook.list_tasks(project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID) parent = f'projects/{PROJECT_ID}/locations/{REGION}/lakes/{LAKE_ID}' mock_client.return_value.list_tasks.assert_called_once_with( request=dict( parent=parent, page_size=None, page_token=None, filter=None, order_by=None, ), retry=DEFAULT, timeout=None, metadata=(), )
6933022e94acf139b2dea9a589bb8b25c62a5d20
77
https://github.com/apache/airflow.git
178
def test_list_tasks(self, mock_client): self.hook.list_tasks(project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID)
24
122
test_list_tasks
117
1
2
35
tests/test_models.py
214,576
make `add_unk` optional and don't use it for ner
flair
12
Python
93
test_models.py
def test_sequence_tagger_transformer_finetune(results_base_path, tasks_base_path): flair.set_seed(123) # load dataset corpus: Corpus = ColumnCorpus( data_folder=tasks_base_path / "trivial" / "trivial_bioes", column_format={0: "text", 1: "ner"}, ) tag_dictionary = corpus.make_label_dictionary("ner", add_unk=False) # tagger without CRF tagger: SequenceTagger = SequenceTagger( hidden_size=64, embeddings=TransformerWordEmbeddings("distilbert-base-uncased", fine_tune=True), tag_dictionary=tag_dictionary, tag_type="ner", use_crf=False, use_rnn=False, reproject_embeddings=False, ) # train trainer = ModelTrainer(tagger, corpus) trainer.fine_tune( results_base_path, mini_batch_size=2, max_epochs=10, shuffle=True, learning_rate=0.5e-4, ) loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt") sentence = Sentence("this is New York") sentence_empty = Sentence(" ") loaded_model.predict(sentence) loaded_model.predict([sentence, sentence_empty]) loaded_model.predict([sentence_empty]) # check if loaded model can predict entities = [label.data_point.text for label in sentence.get_labels("ner")] assert "New York" in entities # check if loaded model successfully fit the training data result: Result = loaded_model.evaluate(corpus.test, gold_label_type="ner") assert result.classification_report["micro avg"]["f1-score"] == 1.0 del loaded_model @pytest.mark.integration
6ed3648502ddc7d44e8b6b3f9f8e6adcb15cf134
@pytest.mark.integration
231
https://github.com/flairNLP/flair.git
294
def test_sequence_tagger_transformer_finetune(results_base_path, tasks_base_path): flair.set_seed(123) # load dataset corpus: Corpus = ColumnCorpus( data_folder=tasks_base_path / "trivial" / "trivial_bioes", column_format={0: "text", 1: "ner"}, ) tag_dictionary = corpus.make_label_dictionary("ner", add_unk=False) # tagger without CR
49
377
test_sequence_tagger_transformer_finetune
20
0
1
10
tests/basic/tests.py
201,885
Refs #33476 -- Reformatted code with Black.
django
11
Python
19
tests.py
def test_objects_attribute_is_only_available_on_the_class_itself(self): with self.assertRaisesMessage( AttributeError, "Manager isn't accessible via Article instances" ): getattr( Article(), "objects", ) self.assertFalse(hasattr(Article(), "objects")) self.assertTrue(hasattr(Article, "objects"))
9c19aff7c7561e3a82978a272ecdaad40dda5c00
48
https://github.com/django/django.git
110
def test_objects_attribute_is_only_available_on_the_class_itself(self): with self.assertRaisesMessage( AttributeError, "Manager isn't accessible via Article instances" ): getattr
9
86
test_objects_attribute_is_only_available_on_the_class_itself
24
0
2
8
tests/components/lcn/test_cover.py
314,845
Add tests for LCN sensor and binary_sensor platforms (#67263)
core
11
Python
22
test_cover.py
async def test_setup_lcn_cover(hass, entry, lcn_connection): for entity_id in ( COVER_OUTPUTS, COVER_RELAYS, ): state = hass.states.get(entity_id) assert state is not None assert state.state == STATE_OPEN
b7b8feda0ffb7487954545c96c50e7f64e2195bc
41
https://github.com/home-assistant/core.git
68
async def test_setup_lcn_cover(hass, entry, lcn_connection): for entity_id in ( COVER_OUTPUTS, COVER_RELAYS, ): state = hass.states.get(entity_id) assert state is not None assert state.stat
11
63
test_setup_lcn_cover
25
0
4
8
src/sentry/integrations/jira/client.py
86,766
ref: type sentry/utils/assets.py and sentry/utils/http.py (#39624)
sentry
11
Python
20
client.py
def get_project_key_for_id(self, project_id) -> str: if not project_id: return "" projects = self.get_projects_list() for project in projects: if project["id"] == project_id: return project["key"] return ""
e9ce61066783c3601acd75fa74a9f4af6bd696c1
42
https://github.com/getsentry/sentry.git
89
def get_project_key_for_id(self, project_id) -> str: if not project_id: return "" projects = self.get_projects_list() for project in projects: if proj
7
73
get_project_key_for_id
15
0
2
5
wagtail/api/v2/tests/test_pages.py
72,791
Reformat with black
wagtail
13
Python
14
test_pages.py
def test_remove_id_field(self): response = self.get_response(fields="-id") content = json.loads(response.content.decode("UTF-8")) for page in content["items"]: self.assertEqual(set(page.keys()), {"meta", "title"})
d10f15e55806c6944827d801cd9c2d53f5da4186
57
https://github.com/wagtail/wagtail.git
46
def test_remove_id_field(self): response = self.get_response(fields="-id") content = json.loads(response.content.decode("UTF-8")) for
13
99
test_remove_id_field
6
0
1
3
modules/image/Image_editing/super_resolution/swinir_m_real_sr_x2/test.py
52,018
Add swinir_m_real_sr_x2 Module (#2074) * add swinir_m_real_sr_x2 * update README * fix typo * fix typo
PaddleHub
8
Python
6
test.py
def tearDownClass(cls) -> None: shutil.rmtree('tests') shutil.rmtree('swinir_m_real_sr_x2_output')
57d977303b4f6002eb8cc40ccb774146921c984a
19
https://github.com/PaddlePaddle/PaddleHub.git
19
def tearDownClass(cls) -> None: shutil.r
4
36
tearDownClass
44
0
1
8
pandas/tests/reshape/concat/test_index.py
167,627
BUG: concat losing columns dtypes for join=outer (#47586)
pandas
12
Python
32
test_index.py
def test_concat_index_keep_dtype(self, dtype): # GH#47329 df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype=dtype)) df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype=dtype)) result = concat([df1, df2], ignore_index=True, join="outer", sort=True) expected = DataFrame( [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype=dtype) ) tm.assert_frame_equal(result, expected)
1ac13910aabaabeec0f00319d14d31a08e294475
138
https://github.com/pandas-dev/pandas.git
103
def test_concat_index_keep_dtype(self, dtype): # GH#47329 df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype=dtype)) df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype=dtype)) result = concat([df1, df2]
18
190
test_concat_index_keep_dtype
145
0
9
30
jina/serve/networking.py
12,750
feat: do not await gather endpoints, simply schedule task (#5015)
jina
18
Python
94
networking.py
async def _get_next_connection(self, num_retries=3): try: connection = None for i in range(len(self._connections)): internal_rr_counter = (self._rr_counter + i) % len(self._connections) connection = self._connections[internal_rr_counter] # connection is None if it is currently being reset. In that case, try different connection if connection is not None: break all_connections_unavailable = connection is None and num_retries <= 0 if all_connections_unavailable: if num_retries <= 0: raise EstablishGrpcConnectionError( f'Error while resetting connections {self._connections}. Connections cannot be used.' ) elif connection is None: # give control back to async event loop so connection resetting can be completed; then retry self._logger.debug( f' No valid connection found, give chance for potential resetting of connection' ) try: await asyncio.wait_for( self._destroyed_event.wait(), timeout=GRACE_PERIOD_DESTROY_CONNECTION, ) finally: return await self._get_next_connection(num_retries=num_retries - 1) except IndexError: # This can happen as a race condition while _removing_ connections self._rr_counter = 0 connection = self._connections[self._rr_counter] self._rr_counter = (self._rr_counter + 1) % len(self._connections) return connection
6ba1d165a2aad8e863006be69c813b5cac3d8a21
168
https://github.com/jina-ai/jina.git
620
async def _get_next_connection(self, num_retries=3): try: connection = None for i in range(len(self._connections)): internal_rr_counter = (self._rr_counter + i) % len(self._connections) connection = self._connections[internal_rr_counter] # connection is None if it is currently being reset. In that case, try different connec
21
280
_get_next_connection
43
1
1
8
tests/gamestonk_terminal/stocks/research/test_res_controller.py
280,997
Tests : Stocks > Research + Screener (#1131) * Updating tests : stocks/research * Updating tests : stocks/screener * Updating tests : stocks/screener
OpenBBTerminal
12
Python
37
test_res_controller.py
def test_print_help(): controller = res_controller.ResearchController( ticker="MOCK_TICKER", start=datetime.strptime("2021-12-01", "%Y-%m-%d"), interval="MOCK_INTERVAL", queue=None, ) controller.print_help() @pytest.mark.vcr(record_mode="none") @pytest.mark.parametrize( "an_input, expected_queue", [ ("", []), ("/help", ["quit", "quit", "help"]), ("help/help", ["help"]), ("q", ["quit"]), ("h", []), ( "r", [ "quit", "quit", "reset", "stocks", "load MOCK_TICKER", "res", ], ), ], )
8f8147c3af76f03223943fe630a94dfb326b13c7
@pytest.mark.vcr(record_mode="none") @pytest.mark.parametrize( "an_input, expected_queue", [ ("", []), ("/help", ["quit", "quit", "help"]), ("help/help", ["help"]), ("q", ["quit"]), ("h", []), ( "r", [ "quit", "quit", "reset", "stocks", "load MOCK_TICKER", "res", ], ), ], )
39
https://github.com/OpenBB-finance/OpenBBTerminal.git
257
def test_print_help(): controller = res_controller.ResearchController( ticker="MOCK_TICKER", start=datetim
16
216
test_print_help
30
0
3
6
docs/img/plugin-events.py
224,966
Add plugin events that persist across builds in `mkdocs serve` "One-time events" `on_startup(command)`, `on_shutdown`. Their presence also shows that a plugin *wants* to persist across builds. Otherwise they will be re-created, to not change any existing behavior.
mkdocs
13
Python
29
plugin-events.py
def event(g, name, parameters): with cluster( g, f"cluster_{name}", href=f"#{name}", bgcolor="#ffff3388", pencolor="#00000088" ) as c: label = "|".join(f"<{p}>{p}" for p in parameters.split()) node(c, name, shape="record" if parameters else "point", label=label, fillcolor="#ffffff55")
a56ac6e0513bdea6860ed1fdc3debc10410638cd
72
https://github.com/mkdocs/mkdocs.git
56
def event(g, name, parameters): with cluster( g, f"cluster_{name}", href=f"#{
16
134
event
126
0
8
39
tools/infer_vqa_token_ser_re.py
24,211
add dygraph2static support of layoutlm series SER model
PaddleOCR
14
Python
81
infer_vqa_token_ser_re.py
def make_input(ser_inputs, ser_results): entities_labels = {'HEADER': 0, 'QUESTION': 1, 'ANSWER': 2} entities = ser_inputs[8][0] ser_results = ser_results[0] assert len(entities) == len(ser_results) # entities start = [] end = [] label = [] entity_idx_dict = {} for i, (res, entity) in enumerate(zip(ser_results, entities)): if res['pred'] == 'O': continue entity_idx_dict[len(start)] = i start.append(entity['start']) end.append(entity['end']) label.append(entities_labels[res['pred']]) entities = dict(start=start, end=end, label=label) # relations head = [] tail = [] for i in range(len(entities["label"])): for j in range(len(entities["label"])): if entities["label"][i] == 1 and entities["label"][j] == 2: head.append(i) tail.append(j) relations = dict(head=head, tail=tail) batch_size = ser_inputs[0].shape[0] entities_batch = [] relations_batch = [] entity_idx_dict_batch = [] for b in range(batch_size): entities_batch.append(entities) relations_batch.append(relations) entity_idx_dict_batch.append(entity_idx_dict) ser_inputs[8] = entities_batch ser_inputs.append(relations_batch) # remove ocr_info segment_offset_id and label in ser input ser_inputs.pop(7) ser_inputs.pop(6) ser_inputs.pop(5) return ser_inputs, entity_idx_dict_batch
8d46a1fbbe33d37fc858c53afd0e9fcd9cc185fa
310
https://github.com/PaddlePaddle/PaddleOCR.git
324
def make_input(ser_inputs, ser_results): entities_labels = {'HEADER': 0, 'QUESTION': 1, 'ANSWER': 2} entities = ser_inputs[8][0] ser_results = ser_results[0] assert len(entities) == len(ser_results) # entities start = [] end = [] label = [] entity_idx_dict = {} for i, (res, entity) in enumerate(zip(ser_results, entities)): if res['pred'] == 'O': continue entity_idx_dict[len(start)] = i
29
507
make_input
7
0
1
3
homeassistant/components/hunterdouglas_powerview/cover.py
301,684
Add support for topdown shades to hunterdouglas_powerview (#62788) Co-authored-by: J. Nick Koston <nick@koston.org>
core
9
Python
7
cover.py
def open_position(self) -> PowerviewShadeMove: return PowerviewShadeMove(self._shade.open_position, {})
45e4dd379b54847174b1f69ca138ba5fe73d24f9
20
https://github.com/home-assistant/core.git
21
def open_position(self) -> PowerviewShadeMove: return PowerviewShadeMove(self._shade.open_position, {})
4
34
open_position
178
1
5
48
keras/utils/composite_tensor_support_test.py
278,417
resolve line-too-long in utils
keras
14
Python
112
composite_tensor_support_test.py
def test_sparse_tensors(self, use_dict, use_dataset, action): data = [ ( tf.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3] ), np.array([[[1, -1, -1]], [[2, 3, -1]]]), ), ( tf.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8], [3, 1, 4], ), np.array( [[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -1]]] ), ), ] # Prepare the model to test. input_name = get_input_name(use_dict) model_input = input_layer.Input( shape=(1, None), sparse=True, name=input_name, dtype=tf.int32 ) layers = [ToDense(default_value=-1)] model = get_model_from_layers_with_input( layers, model_input=model_input ) model.compile( optimizer="sgd", loss="mse", metrics=["accuracy"], **get_test_mode_kwargs() ) kwargs = get_kwargs(use_dataset, action) # Prepare the input data for data_element in data: input_data, expected_output = prepare_inputs( data_element, use_dict, use_dataset, action, input_name ) # Perform the action. if action == "predict": result = model.predict(input_data, **kwargs) self.assertAllEqual(expected_output, result) if action == "evaluate": result = model.evaluate(input_data, expected_output, **kwargs) self.assertAllEqual(1.0, result[-1]) if action == "fit": # TODO(momernick): What's the best way of validating that fit # happened? _ = model.fit( input_data, expected_output, shuffle=False, **kwargs ) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes
80ee2fa4e1db2dda14370110830db82be3eb97b7
@test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes
392
https://github.com/keras-team/keras.git
803
def test_sparse_tensors(self, use_dict, use_dataset, action): data = [ ( tf.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3] ), np.array([[[1, -1, -1]], [[2, 3, -1]]]), ), ( tf.SparseTensor( [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]], [5, 6, 7, 8], [3, 1, 4], ), np.array( [[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -
46
564
test_sparse_tensors
13
0
2
5
test/prototype_transforms_kernel_infos.py
194,313
rename features._Feature to datapoints._Datapoint (#7002) * rename features._Feature to datapoints.Datapoint * _Datapoint to Datapoint * move is_simple_tensor to transforms.utils * fix CI * move Datapoint out of public namespace
vision
12
Python
13
prototype_transforms_kernel_infos.py
def sample_inputs_adjust_hue_image_tensor(): for image_loader in make_image_loaders( sizes=["random"], color_spaces=(datapoints.ColorSpace.GRAY, datapoints.ColorSpace.RGB) ): yield ArgsKwargs(image_loader, hue_factor=_ADJUST_HUE_FACTORS[0])
a8007dcdfb5159a711fa343d2ac4bb7df826975f
44
https://github.com/pytorch/vision.git
32
def sample_inputs_adjust_hue_image_tensor(): for image_loader in make_image_loaders( sizes=["random"], color_spaces=(datapoints.ColorSpace.GRAY, datapoints.ColorSp
12
68
sample_inputs_adjust_hue_image_tensor