n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
62
0
1
15
tests/basic/tests.py
201,868
Refs #33476 -- Reformatted code with Black.
django
10
Python
46
tests.py
def test_does_not_exist(self): # Django raises an Article.DoesNotExist exception for get() if the # parameters don't match any object. with self.assertRaisesMessage( ObjectDoesNotExist, "Article matching query does not exist." ): Article.objects.get( id__exact=2000, ) # To avoid dict-ordering related errors check only one lookup # in single assert. with self.assertRaises(ObjectDoesNotExist): Article.objects.get(pub_date__year=2005, pub_date__month=8) with self.assertRaisesMessage( ObjectDoesNotExist, "Article matching query does not exist." ): Article.objects.get( pub_date__week_day=6, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
69
https://github.com/django/django.git
231
def test_does_not_exist(self): # Django raises an Article.DoesNotExist exception for get() if the # parameters don't match any object. with self.assertRaisesMessage( ObjectDoesNotExist, "Article matching query does not exist." ): Article.objects.get( id__exact=2000, ) # To avoid dict-ordering related errors check only one lookup # in single assert. with self.assertRaises(ObjectDoesNotExist): Article.objects.get(pub_date__year=2005, pub_date__month=8) with self.assertRaisesMessage( ObjectDoesNotExist, "Article matching query does not exist." ):
12
121
test_does_not_exist
97
0
7
26
scripts/extract.py
101,299
Data Augmentation update (#1263) - lib.detected_face - Subclass Masks for Landmark based masks - Add training mask propery + methods to DetectedFace - lib.training_training - subclass TrainingDataGenerator for training and preview data - Split cache into own module - Reduce thread count to 1 to prevent image corruption + data re-use - Process on largest model input/output size rather than stored image size - Size and crop masks during caching stage - Implement ring buffer for data flow - Fix preview reload bug - augmentation - typing - switch color aug order - better initialization - Fix warp + landmark warp to correctly apply at different image scales - Slightly improved warp caching - Don't store whether image is_preview. Handle all data as training images implicitly - plugins.trainer: Typing and fixes to work with trainingdata refactor
faceswap
13
Python
69
extract.py
def _set_skip_list(self) -> None: if self._skip_num == 1 and not self._alignments.data: logger.debug("No frames to be skipped") return skip_list = [] for idx, filename in enumerate(self._images.file_list): if idx % self._skip_num != 0: logger.trace("Adding image '%s' to skip list due to " # type: ignore "extract_every_n = %s", filename, self._skip_num) skip_list.append(idx) # Items may be in the alignments file if skip-existing[-faces] is selected elif os.path.basename(filename) in self._alignments.data: self._existing_count += 1 logger.trace("Removing image: '%s' due to previously existing", # type: ignore filename) skip_list.append(idx) if self._existing_count != 0: logger.info("Skipping %s frames due to skip_existing/skip_existing_faces.", self._existing_count) logger.debug("Adding skip list: %s", skip_list) self._images.add_skip_list(skip_list)
2beceffad9b15c1fd78f06b9b272563321c5a41e
143
https://github.com/deepfakes/faceswap.git
368
def _set_skip_list(self) -> None: if self._skip_num == 1 and not self._al
21
242
_set_skip_list
26
0
1
2
tests/server.py
248,183
Use `getClientAddress` instead of `getClientIP`. (#12599) getClientIP was deprecated in Twisted 18.4.0, which also added getClientAddress. The Synapse minimum version for Twisted is currently 18.9.0, so all supported versions have the new API.
synapse
8
Python
25
server.py
def getPeer(self): # We give an address so that getClientAddress/getClientIP returns a non null entry, # causing us to record the MAU return address.IPv4Address("TCP", self._ip, 3423)
7fbf42499d92ec3c9a05d9f36ec5fecd1ab1f18c
18
https://github.com/matrix-org/synapse.git
46
def getPeer(self): # We give an address so that getClientAddress/getClientIP returns a non null entry, # causing us to record the MAU return address.IPv4Addre
5
31
getPeer
10
0
2
13
mkdocs/tests/config/config_options_tests.py
225,061
Refactor: use config_options module through a short alias 'c'
mkdocs
11
Python
10
config_options_tests.py
def test_incorrect_type_error(self): for cls in c.Dir, c.File, c.FilesystemObject: with self.subTest(cls):
f1da904a7fae401c5f96ef6494bfd2bbfcb8c29e
81
https://github.com/mkdocs/mkdocs.git
27
def test_incorrect_type_error(self): for cls in c.Dir, c.File, c.FilesystemObject: with self.subTest(cls):
8
45
test_incorrect_type_error
9
0
6
22
keras/saving/saved_model/save_impl.py
276,127
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
7
Python
9
save_impl.py
def _replace_child_layer_functions(layer, serialization_cache): # pylint: disable=protected-access original_fns = {}
84afc5193d38057e2e2badf9c889ea87d80d8fbf
106
https://github.com/keras-team/keras.git
18
def _replace_child_layer_functions(layer, serialization_cache): # pylint: disable=protected-access original_fns = {}
4
23
_replace_child_layer_functions
56
0
2
12
keras/mixed_precision/layer_test.py
274,995
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
14
Python
42
layer_test.py
def test_gradient(self, strategy_fn): x = tf.constant([1.0]) with strategy_fn().scope() as strategy: with policy.policy_scope("mixed_float16"): layer = mp_test_util.MultiplyLayer(assert_type=tf.float16) # Learning rate is small enough that if applied to a float16 variable, # the variable will not change. So this tests the learning rate is not # applied to a float16 value, but instead the float32 variable. opt = gradient_descent.SGD(2**-14)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
119
https://github.com/keras-team/keras.git
155
def test_gradient(self, strategy_fn): x = tf.constant([1.0]) with strategy_fn().scope() as strategy: with policy.policy_scope("mixed_float16"): layer = mp_test_util.MultiplyLayer(assert_type=tf.float16) # Learning rate is small enough that if applied to a float16 variable, # the variable will not change. So this tests the learning rate is not # applied to a float16 value, but instead
18
106
test_gradient
345
0
1
2
examples/mixture/plot_gmm_selection.py
261,545
DOC Rework Gaussian Mixture example (#24721) Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
scikit-learn
10
Python
221
plot_gmm_selection.py
def gmm_bic_score(estimator, X): # Make it negative since GridSearchCV expects a score to maximize return -estimator.bic(X) param_grid = { "n_components": range(1, 7), "covariance_type": ["spherical", "tied", "diag", "full"], } grid_search = GridSearchCV( GaussianMixture(), param_grid=param_grid, scoring=gmm_bic_score ) grid_search.fit(X) # %% # Plot the BIC scores # ------------------- # # To ease the plotting we can create a `pandas.DataFrame` from the results of # the cross-validation done by the grid search. We re-inverse the sign of the # BIC score to show the effect of minimizing it. import pandas as pd df = pd.DataFrame(grid_search.cv_results_)[ ["param_n_components", "param_covariance_type", "mean_test_score"] ] df["mean_test_score"] = -df["mean_test_score"] df = df.rename( columns={ "param_n_components": "Number of components", "param_covariance_type": "Type of covariance", "mean_test_score": "BIC score", } ) df.sort_values(by="BIC score").head() # %% import seaborn as sns sns.catplot( data=df, kind="bar", x="Number of components", y="BIC score", hue="Type of covariance", ) plt.show() # %% # In the present case, the model with 2 components and full covariance (which # corresponds to the true generative model) has the lowest BIC score and is # therefore selected by the grid search. # # Plot the best model # ------------------- # # We plot an ellipse to show each Gaussian component of the selected model. For # such purpose, one needs to find the eigenvalues of the covariance matrices as # returned by the `covariances_` attribute. The shape of such matrices depends # on the `covariance_type`: # # - `"full"`: (`n_components`, `n_features`, `n_features`) # - `"tied"`: (`n_features`, `n_features`) # - `"diag"`: (`n_components`, `n_features`) # - `"spherical"`: (`n_components`,) from matplotlib.patches import Ellipse from scipy import linalg color_iter = sns.color_palette("tab10", 2)[::-1] Y_ = grid_search.predict(X) fig, ax = plt.subplots() for i, (mean, cov, color) in enumerate( zip( grid_search.best_estimator_.means_, grid_search.best_estimator_.covariances_, color_iter, ) ): v, w = linalg.eigh(cov) if not np.any(Y_ == i): continue plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], 0.8, color=color) angle = np.arctan2(w[0][1], w[0][0]) angle = 180.0 * angle / np.pi # convert to degrees v = 2.0 * np.sqrt(2.0) * np.sqrt(v) ellipse = Ellipse(mean, v[0], v[1], angle=180.0 + angle, color=color) ellipse.set_clip_box(fig.bbox) ellipse.set_alpha(0.5) ax.add_artist(ellipse) plt.title( f"Selected GMM: {grid_search.best_params_['covariance_type']} model, " f"{grid_search.best_params_['n_components']} components" ) plt.axis("equal") plt.show()
fa4376a5815ce8b15f48f220bc353de0e06aa259
16
https://github.com/scikit-learn/scikit-learn.git
427
def gmm_bic_score(estimator, X): # Make it negative since GridSearchCV expects a score to maximize return -estimator.bic(X) param_grid = { "n_components": range(1, 7), "covariance_type": ["spherical", "tied", "diag", "full"], } grid_search = GridSearchCV( GaussianMixture(), param_grid=param_grid, scoring=gmm_bic_score ) grid_search.fit(X) # %% # Plot the BIC scores # ------------------- # # To ease the plotting we can create a `pandas.DataFrame` from the results of # the cross-validation done by the grid search. We re-inverse the sign of the # BIC score to show the effect of minimizing it. import pandas as pd df = pd.DataFrame(grid_search.cv_results_)[ ["param_n_components", "param_covariance_type", "mean_test_score"] ] df["mean_test_score"] = -df["mean_test_score"] df = df.rename( columns={ "param_n_components": "Number of components", "param_covariance_type": "Type of covariance", "mean_test_score": "BIC score", } ) df.sort_values(by="BIC score").head() # %% import seaborn as sns sns.catplot( data=df, kind="bar", x="Number of components", y="BIC score", hue="Type of covariance", ) plt.show() # %% # In the present case, the model with 2 components and full covariance (which # corresponds to the true generative model) has the lowest BIC score and is # therefore selected by the grid search. # # Plot the best model # ------------------- # # We plot an ellipse to show each Gaussian component of the selected model. For # such purpose, one needs to find the eigenvalues of the covariance matrices as # returned by the `covariances_` attribute. The shape of such matrices depends # on the `covariance_type`: # # - `"full"`: (`n_components`, `n_features`, `n_features`) # - `"tied"`: (`n_features`, `n_features`) # - `"diag"`: (`n_components`, `n_features`) # - `"spherical"`: (`n_components`,) from matplotlib.patches import Ellipse from scipy import linalg color_iter = sns.color_palette("tab10", 2)[::-1] Y_ = grid_search.predict(X) fig, ax = plt.subplots() for i, (mean, cov, color) in enumerate( zip( grid_search.best_estimator_.means_, grid_search.best_estimator_.covariances_, color_iter, ) ): v, w = linalg.eigh(cov) if not np.any(Y_ == i): continue plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], 0.8, color=color) angle = np.arctan2(w[0][1], w[0][0]) angle = 180.0 * angle / np.pi # convert to degrees v = 2.0 * np.sqrt(2.0) * np.sqrt(v) ellipse = Ellipse(mean, v[0], v[1], angle=180.0 + angle, color=c
70
708
gmm_bic_score
48
0
1
11
python/ray/tune/tests/test_logger.py
132,526
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
13
Python
39
test_logger.py
def testCSV(self): config = {"a": 2, "b": 5, "c": {"c": {"D": 123}, "e": None}} t = Trial(evaluated_params=config, trial_id="csv", logdir=self.test_dir) logger = CSVLoggerCallback() logger.on_trial_result(0, [], t, result(0, 4)) logger.on_trial_result(1, [], t, result(1, 5)) logger.on_trial_result( 2, [], t, result(2, 6, score=[1, 2, 3], hello={"world": 1}) ) logger.on_trial_complete(3, [], t) self._validate_csv_result()
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
143
https://github.com/ray-project/ray.git
121
def testCSV(self): config = {"a": 2, "b": 5, "c": {"c": {"D": 123}, "e": None}}
17
221
testCSV
29
0
1
8
tests/data/test_btanalysis.py
148,463
Update bt_results filename to new.json
freqtrade
10
Python
23
test_btanalysis.py
def test_analyze_trade_parallelism(testdatadir): filename = testdatadir / "backtest-result_new.json" bt_data = load_backtest_data(filename) res = analyze_trade_parallelism(bt_data, "5m") assert isinstance(res, DataFrame) assert 'open_trades' in res.columns assert res['open_trades'].max() == 3 assert res['open_trades'].min() == 0
28011a39076d41e6f1f2182215cbcb420bcb3fa5
59
https://github.com/freqtrade/freqtrade.git
49
def test_analyze_trade_parallelism(testdatadir): filename = testdatadir / "backtest-result_new.json" bt_data = load_backtest_data(filename) res = analyze_trade_parallelism(bt_data, "5m") assert isinstance(res, DataFrame) assert 'open_trades' in res.columns assert
12
102
test_analyze_trade_parallelism
32
0
3
11
src/diffusers/utils/logging.py
335,006
changes comments and env vars in `utils/logging` removes mentions of 🤗Transformers with 🤗Diffusers equivalent.
diffusers
19
Python
29
logging.py
def _get_default_logging_level(): env_level_str = os.getenv("DIFFUSERS_VERBOSITY", None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys()) }" ) return _default_log_level
c3cc8eb23c8095217388d350409b454ea396c12b
45
https://github.com/huggingface/diffusers.git
121
def _get_default_logging_level(): env_level_str = os.getenv("DIFFUSERS_VERBOSITY", None) if env_level_str:
11
104
_get_default_logging_level
30
0
1
12
saleor/graphql/meta/tests/test_meta_mutations.py
29,971
Move checkout metadata to separate model (#11264) * seperate checkout matadata to CheckoutMetadata model * change naming of checkout metadata class field * fix tests after rebase * add dataloaders,move resolving to checkout type instead of meta type, fix review remarks * fix tests * clen up migrations * add missing migration, add missing metdata filter for checkout * fixes for cases when checkout has no metadata_storage, cosmetic changes after review * fixes for cases when checkout has no metadata_storage, cosmetic changes after review * update changelog * move comment to proper place * delete index from state * add using helper to ensure metadata exists * fix migration, add get_or_create fo metadata calls * fix tests after rebase, fixes after review * fix in migration queryset
saleor
11
Python
26
test_meta_mutations.py
def test_delete_public_metadata_for_checkout_by_token(api_client, checkout): # given checkout.metadata_storage.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE}) checkout.metadata_storage.save(update_fields=["metadata"]) checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk) # when response = execute_clear_public_metadata_for_item( api_client, None, checkout.token, "Checkout" ) # then assert item_without_public_metadata( response["data"]["deleteMetadata"]["item"], checkout.metadata_storage, checkout_id, )
10356eb2845766684a13cb3adcbde2ad0e5e07ec
80
https://github.com/saleor/saleor.git
87
def test_delete_public_metadata_for_checkout_by_token(api_client, checkout): # given checkout.metadata_storage.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE}) checkout.metadata_storage.save(update_fields=["metadata"]) checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk) # when response = execute_clear_public_metadata_for_item( api_client, None, checkout.token, "Checkout" ) # then assert item_without_public_metadata( response["data"]["deleteMetadata"]["item"], checkout.metadata_storage, checkout_id, )
18
132
test_delete_public_metadata_for_checkout_by_token
23
0
1
8
keras/legacy_tf_layers/convolutional_test.py
274,305
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
15
Python
17
convolutional_test.py
def testFunctionalConv1DNoReuse(self): with tf.Graph().as_default(): length = 10 data = tf.random.uniform((5, length, 3), seed=1) conv_layers.separable_conv1d(data, 32, 3) self.assertEqual(len(tf.compat.v1.trainable_variables()), 3) conv_layers.separable_conv1d(data, 32, 3) self.assertEqual(len(tf.compat.v1.trainable_variables()), 6)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
97
https://github.com/keras-team/keras.git
95
def testFunctionalConv1DNoReuse(self): with tf.Graph().as_defaul
17
151
testFunctionalConv1DNoReuse
75
0
2
24
keras/layers/normalization/batch_normalization_test.py
272,727
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
17
Python
56
batch_normalization_test.py
def test_batchnorm_convnet(self): if tf.test.is_gpu_available(cuda_only=True): with self.session(): model = keras.models.Sequential() norm = keras.layers.BatchNormalization( axis=1, input_shape=(3, 4, 4), momentum=0.8 ) model.add(norm) model.compile( loss="mse", optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01), run_eagerly=test_utils.should_run_eagerly(), ) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1)) out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1)) np.testing.assert_allclose( np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1 ) np.testing.assert_allclose( np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1 )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
278
https://github.com/keras-team/keras.git
446
def test_batchnorm_convnet(self): if tf.test.is_gpu_available(cuda_only=True): with self.session(): model = keras.models.Sequential() norm = keras.layers.BatchNormalization( axis=1, input_shape=(3, 4, 4), momentum=0.8 ) model.add(norm) model.compile( loss="mse", optimizer=tf
50
383
test_batchnorm_convnet
67
0
7
20
yt_dlp/extractor/musicdex.py
162,444
[Musicdex] Add extractors (#2421) Closes #2204 Authored by: Ashish0804
yt-dlp
17
Python
55
musicdex.py
def _return_info(self, track_json, album_json, id): return { 'id': str(id), 'title': track_json.get('name'), 'track': track_json.get('name'), 'description': track_json.get('description'), 'track_number': track_json.get('number'), 'url': format_field(track_json, 'url', 'https://www.musicdex.org/%s'), 'duration': track_json.get('duration'), 'genre': [genre.get('name') for genre in track_json.get('genres') or []], 'like_count': track_json.get('likes_count'), 'view_count': track_json.get('plays'), 'artist': [artist.get('name') for artist in track_json.get('artists') or []], 'album_artist': [artist.get('name') for artist in album_json.get('artists') or []], 'thumbnail': format_field(album_json, 'image', 'https://www.musicdex.org/%s'), 'album': album_json.get('name'), 'release_year': try_get(album_json, lambda x: date_from_str(unified_strdate(x['release_date'])).year), 'extractor_key': MusicdexSongIE.ie_key(), 'extractor': 'MusicdexSong', }
fb62afd6f047aea7e88a6b0df00b49f78ba16e84
219
https://github.com/yt-dlp/yt-dlp.git
267
def _return_info(self, track_json, album_json, id): return { 'id': str(id), 'title': track_json.get('name'), 'track': track_json.get('name'), 'description': track_json.get('description'), 'track_number': track_json.get('number'), 'url': format_field(track_json, 'url', 'https://www.musicdex.org/%s'), 'duration': track_json.get('duration'), 'genre': [genre.get('name') for genre in track_json.get('genres') or []], 'like_count': track_json.get('likes_count'), 'view_count': track_json.get('plays'), 'artist': [artist.get('name') for artist in track_json.get('artists') or []], 'album_artist': [artist.get('name') for artist in album_json.get('artists') or []], 'thumbnail': format_field(album_json, 'image', 'https://www.musicdex.org/%s'), 'album': album_json.get('name'),
17
389
_return_info
41
0
2
12
tests/sentry/api/endpoints/test_user_roles.py
95,384
feat: Add endpoints for managing user roles (#30993) - Add endpoints for creating, updating, deleting user roles. - Add endpoints for assigning and unassigning roles from users. - Add permission validation to endpoints (permissions now must be known). - Add sudo/superuser requirements to various endpoints involving permissions. - Add standard audit logs to all permission related endpoints. Additionally this cleans up the various permission endpoints, improving testing, adding various security (sudo, superuser-only).
sentry
11
Python
30
test_user_roles.py
def test_lookup_self(self): role = UserRole.objects.create(name="support", permissions=["broadcasts.admin"]) role.users.add(self.user) role2 = UserRole.objects.create(name="admin", permissions=["users.admin"]) role2.users.add(self.user) UserRole.objects.create(name="other", permissions=["users.edit"]) resp = self.get_response("me") assert resp.status_code == 200 assert len(resp.data) == 2, resp.data role_names = [r["name"] for r in resp.data] assert "support" in role_names assert "admin" in role_names
45750ab53007ebae64d7a82d5020e65ab94b6da7
126
https://github.com/getsentry/sentry.git
117
def test_lookup_self(self): role = UserRole.objects.create(name="support", permissions=["broadcasts.admin"]) role.users.add(self.user) role2 = UserRole.objects.create(name="admin", permissions=["users.admin"]) role2.users.add(self.user) UserRole.objects.create(name="other", permissions=["users.edit"]) resp = self.get_response("me") assert resp.status_code == 200 assert len(resp.data) == 2, resp.data role_names = [r
19
212
test_lookup_self
30
0
2
7
tests/orion/database/test_queries.py
58,792
Return a work queue ID
prefect
9
Python
28
test_queries.py
async def test_get_runs_in_queue_limit(self, session, db, fr_1, fr_2, fr_3): query = db.queries.get_scheduled_flow_runs_from_work_queues( db=db, limit_per_queue=1 ) result = await session.execute(query) runs = result.all() assert [r[0].id for r in runs] == [fr_1.id, fr_3.id]
da5115381e62b084922641a8b1270806f695055f
70
https://github.com/PrefectHQ/prefect.git
75
async def test_get_runs_in_queue_limit(self, session, db, fr_1, fr_2, fr_3): query = db.queries.get_scheduled_flow_runs_from_work_queues( db=db, limit_per_queue=1 ) result = await session.execute(query) runs = result.all() assert
17
103
test_get_runs_in_queue_limit
23
1
2
9
airflow/cli/commands/dag_command.py
45,966
Add `list-import-errors` to `airflow dags` command (#22084) This will help users to see the dags with import error and enable scripts process the output
airflow
12
Python
21
dag_command.py
def dag_list_import_errors(args): dagbag = DagBag(process_subdir(args.subdir)) data = [] for filename, errors in dagbag.import_errors.items(): data.append({"filepath": filename, "error": errors}) AirflowConsole().print_as( data=data, output=args.output, ) @cli_utils.action_cli @suppress_logs_and_warning
e1134590973355549272b1f3a213dbfa29698df7
@cli_utils.action_cli @suppress_logs_and_warning
65
https://github.com/apache/airflow.git
60
def dag_list_import_errors(args): dagbag = DagBag(process_subd
18
119
dag_list_import_errors
48
0
3
26
mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
114,781
before integration tests
mindsdb
15
Python
31
mysql_proxy.py
def process_query(self, sql): executor = Executor( session=self.session, sqlserver=self ) executor.query_execute(sql) if executor.error is not None: resp = SQLAnswer( answer_type = ANSWER_TYPE.ERROR, error_code=executor.error['code'], error_message=executor.error['message'] ) elif executor.data is None: resp = SQLAnswer( answer_type = ANSWER_TYPE.OK, state_track=executor.state_track, ) else: resp = SQLAnswer( answer_type=ANSWER_TYPE.TABLE, state_track=executor.state_track, columns=self.to_mysql_columns(executor.columns), data=executor.data, status=executor.server_status ) return resp
e8a8d9b71deae2c291efb49ff11573285f3aec35
130
https://github.com/mindsdb/mindsdb.git
334
def process_query(self, sql): executor = Executor( session=self.session, sqlserver=self ) executor.query_execute(sql) if executor.error is not None: resp = SQLAnswer( answer_type = ANSWER_TYPE.ERROR, error_code=executor.error['code'], error_message=executor.error['message'] ) elif executor.data is None: resp = SQLAnswer( answer_type = ANSWER_TYPE.OK, state_track=executor.state_track, ) else: resp = SQLAnswer( answer_type=ANSWER_TYPE.TABLE, state_track=executor.state_track, columns=self.to_mysql_columns(executor.columns), data=executor.data, stat
24
197
process_query
13
0
1
4
pandas/tests/frame/methods/test_to_csv.py
171,873
BUG/API: Indexes on empty frames/series should be RangeIndex (#49637) * BUG/API: ndexes on empty frames/series should be RangeIndex, are Index[object] * fix black * fix window stuff * Add docs * double ticks * unneeded line * update thatsnew text * update whatsnew text * fix rst * Update doc/source/whatsnew/v2.0.0.rst Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com> Co-authored-by: Terji Petersen <terjipetersen@Terjis-Air.fritz.box> Co-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>
pandas
11
Python
12
test_to_csv.py
def test_to_csv_empty(self): df = DataFrame(index=np.arange(10)) result, expected = self._return_result_expected(df, 1000) tm.assert_frame_equal(result, expected, check_column_type=False)
e93ee07729afe0bc7661655755df6adad657c23b
42
https://github.com/pandas-dev/pandas.git
33
def test_to_csv_empty(self):
13
65
test_to_csv_empty
86
0
2
18
src/textual/_terminal_features.py
183,561
[terminal buffering] Add support for the "mode 2026" That task is definitely way more complicated that it seemed to be 😅
textual
12
Python
71
_terminal_features.py
def from_autodetect(cls) -> TerminalSupportedFeatures: # Using macOS, but not using the default terminal: let's assume we're on iTerm2 iterm2_synchronized_update = ( platform.system() == "Darwin" and os.environ.get("TERM_PROGRAM", "") != "Apple_Terminal" ) # Detecting "mode2026" is more complicated, as we have to use an async request/response # machinery with the terminal emulator - for now we should just assume it's not supported. # See the use of the Mode and ModeReportParameter classes in the Textual code to check this machinery. mode2026_synchronized_update = False return cls( iterm2_synchronized_update=iterm2_synchronized_update, mode2026_synchronized_update=mode2026_synchronized_update, )
d14659c1a3760eade2dd3479b66eb8b2e7711db0
47
https://github.com/Textualize/textual.git
200
def from_autodetect(cls) -> TerminalSupportedFeatures: # Using macOS, but not using the default terminal: let's assume we're on iTerm2 iterm2_synchronized_update = ( platform.system() == "Darwin" and os.environ.get("TERM_PROGRAM", "") != "Apple_Terminal" ) # Detecting "mode2026" is more complicated, as we have to use an async request/response # machinery with the terminal emulator - for now we should just assume it's not supported. # See the use of the Mode and ModeReportParameter classes in the Textual code to check this machinery. mode2026_synchronized_update = False return cls( iterm2_synchronized_update=iterm2_synchronized_update, mode2026_synchronized_update=mode2026_synchronized_update, )
10
87
from_autodetect
32
0
3
12
python/ray/autoscaler/_private/gcp/config.py
130,471
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
16
Python
27
config.py
def _get_service_account(account, config, iam): project_id = config["provider"]["project_id"] full_name = "projects/{project_id}/serviceAccounts/{account}" "".format( project_id=project_id, account=account ) try: service_account = iam.projects().serviceAccounts().get(name=full_name).execute() except errors.HttpError as e: if e.resp.status != 404: raise service_account = None return service_account
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
79
https://github.com/ray-project/ray.git
88
def _get_service_account(account, config, iam): project_id = config["provider"]["project_id"] full_name = "projects/{project_id}/serviceAccounts/{account}" "".format( project_id=project_id, account=account ) try: service_account = iam.projects().serviceAccounts().get(name=full_name).execute() except errors.HttpError as e: if e.resp.status != 404: raise service_account = None return service_account
18
134
_get_service_account
27
0
1
5
pandas/tests/indexes/categorical/test_reindex.py
170,773
DEPR: Index.reindex with non-unique Index (#49485)
pandas
11
Python
23
test_reindex.py
def test_reindex_list_non_unique_unused_category(self): msg = "cannot reindex on an axis with duplicate labels" ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) with pytest.raises(ValueError, match=msg): ci.reindex(["a", "c"])
a215264d472e79c48433fa3a04fa492abc41e38d
56
https://github.com/pandas-dev/pandas.git
58
def test_reindex_list_non_unique_unused_category(self): msg = "cannot reindex on an axis with duplicate labels" ci = CategoricalIndex(["a", "b", "c", "a"], categorie
11
104
test_reindex_list_non_unique_unused_category
21
0
2
7
networkx/utils/misc.py
177,053
make lazy_import private and remove its internal use (#5878) * make lazy_import private and remove its internal use * add back the non-lazy imports of numpy to utils.misc
networkx
13
Python
18
misc.py
def choice(self, seq): import numpy as np if isinstance(self._rng, np.random.Generator): idx = self._rng.integers(0, len(seq)) else: idx = self._rng.randint(0, len(seq)) return seq[idx]
afef7ebde11cbe9d92d6a98319fe431a219d9f8c
62
https://github.com/networkx/networkx.git
70
def choice(self, seq): import numpy as np if isinstance(self._rng, np.random.Generator): idx = self.
13
96
choice
25
0
2
9
tests/sentry/api/endpoints/test_accept_organization_invite.py
89,542
chore(hybrid-cloud): use organization_slug in AcceptOrganizationInvite API (#42138)
sentry
11
Python
23
test_accept_organization_invite.py
def test_not_needs_authentication(self): self.login_as(self.user) om = OrganizationMember.objects.create( email="newuser@example.com", token="abc", organization=self.organization ) for path in self._get_paths([om.id, om.token]): resp = self.client.get(path) assert resp.status_code == 200 assert not resp.data["needsAuthentication"]
e94d7cd092d813d88c2216fca3ca6bd48e0747a3
77
https://github.com/getsentry/sentry.git
96
def test_not_needs_authentication(self): self.login_as(self.user)
19
123
test_not_needs_authentication
9
0
1
7
wagtail/documents/widgets.py
74,928
Reformat with black
wagtail
12
Python
9
widgets.py
def media(self): return forms.Media( js=[ versioned_static("wagtaildocs/js/document-chooser-modal.js"), versioned_static("wagtaildocs/js/document-chooser.js"), ] )
d10f15e55806c6944827d801cd9c2d53f5da4186
25
https://github.com/wagtail/wagtail.git
74
def media(self): return forms.Media( js=[ versioned_static("wagtaildocs/js/document-chooser-modal.js"), versioned_static("wagtaildocs/js/document-choose
6
43
media
31
0
5
13
glances/globals.py
70,088
Improve code quality
glances
18
Python
20
globals.py
def json_dumps_dictlist(data, item): if isinstance(data, dict): try: return json_dumps({item: data[item]}) except: return None elif isinstance(data, list): try: # Source: # http://stackoverflow.com/questions/4573875/python-get-index-of-dictionary-item-in-list return json_dumps({item: map(itemgetter(item), data)}) except: return None else: return None
004288eac5b4ffbcced7149113150d7cc42df28e
68
https://github.com/nicolargo/glances.git
140
def json_dumps_dictlist(data, item): if isinstance(
9
110
json_dumps_dictlist
42
0
1
5
pandas/tests/indexing/test_loc.py
163,363
BUG: can_hold_element size checks on ints/floats (#45273)
pandas
11
Python
33
test_loc.py
def test_loc_setitem_uint8_upcast(): # GH#26049 df = DataFrame([1, 2, 3, 4], columns=["col1"], dtype="uint8") df.loc[2, "col1"] = 300 # value that can't be held in uint8 # TODO: would be better to get uint16? expected = DataFrame([1, 2, 300, 4], columns=["col1"], dtype="int64") tm.assert_frame_equal(df, expected)
37c33438837cbb7e41a949b44a20c82b82289498
70
https://github.com/pandas-dev/pandas.git
60
def test_loc_setitem_uint8_upcast(): # GH#26049
9
111
test_loc_setitem_uint8_upcast
37
0
6
10
homeassistant/helpers/template.py
296,640
Make `this` variable available in template entities (#65201) * feat: make this variable available in template entities This makes the variable `this` available in template entities. It will simplify the use of self-referencing template entities. Because, without this, we have to repeat the entity id every time. If we can solve this without explicitly spelling the entity id, code can be re-used much better. As a side-effect, this will allow to use `variables`-like patterns, where attributes can be used as variables to calculate subsequent attributes or state. Example: ```yaml template: sensor: - name: test state: "{{ this.attributes.test }}" # not: "{{ state_attr('sensor.test', 'test' }}" attributes: test: "{{ now() }}" ``` * expose entity_id instead of this * add test * Refactor to expose this variable * Tweak repr dunder Co-authored-by: Erik <erik@montnemery.com>
core
15
Python
28
template.py
def __getitem__(self, item): if item in _COLLECTABLE_STATE_ATTRIBUTES: # _collect_state inlined here for performance if self._collect and _RENDER_INFO in self._hass.data: self._hass.data[_RENDER_INFO].entities.add(self._entity_id) return getattr(self._state, item) if item == "entity_id": return self._entity_id if item == "state_with_unit": return self.state_with_unit raise KeyError
d20a620590bc2df74eb2f9c0d35e1b7f12be5ded
72
https://github.com/home-assistant/core.git
142
def __getitem__(self, item): if item in _COLLECTABLE_STATE_ATTRIBUTES: # _collect_state inlined here for performance if self._collect and _RENDER_INFO in self._hass.data: self._hass.data[_RENDER_INFO].entities.add(self._entity_id) return getattr(self._state, item) if item == "entity_id": return self._entity_id if item == "state_with_unit": return self.state_with_
15
118
__getitem__
220
1
1
104
tests/components/hassio/test_sensor.py
295,258
Add auto_update property to supervisor and addon update entities (#69055)
core
16
Python
117
test_sensor.py
def mock_all(aioclient_mock, request): aioclient_mock.post("http://127.0.0.1/homeassistant/options", json={"result": "ok"}) aioclient_mock.get("http://127.0.0.1/supervisor/ping", json={"result": "ok"}) aioclient_mock.post("http://127.0.0.1/supervisor/options", json={"result": "ok"}) aioclient_mock.get( "http://127.0.0.1/info", json={ "result": "ok", "data": {"supervisor": "222", "homeassistant": "0.110.0", "hassos": None}, }, ) aioclient_mock.get( "http://127.0.0.1/store", json={ "result": "ok", "data": {"addons": [], "repositories": []}, }, ) aioclient_mock.get( "http://127.0.0.1/host/info", json={ "result": "ok", "data": { "result": "ok", "data": { "chassis": "vm", "operating_system": "Debian GNU/Linux 10 (buster)", "kernel": "4.19.0-6-amd64", }, }, }, ) aioclient_mock.get( "http://127.0.0.1/core/info", json={"result": "ok", "data": {"version_latest": "1.0.0", "version": "1.0.0"}}, ) aioclient_mock.get( "http://127.0.0.1/os/info", json={"result": "ok", "data": {"version_latest": "1.0.0", "version": "1.0.0"}}, ) aioclient_mock.get( "http://127.0.0.1/supervisor/info", json={ "result": "ok", "data": { "result": "ok", "version": "1.0.0", "version_latest": "1.0.0", "addons": [ { "name": "test", "state": "started", "slug": "test", "installed": True, "update_available": False, "version": "2.0.0", "version_latest": "2.0.1", "repository": "core", "url": "https://github.com/home-assistant/addons/test", }, { "name": "test2", "state": "stopped", "slug": "test2", "installed": True, "update_available": False, "version": "3.1.0", "version_latest": "3.2.0", "repository": "core", "url": "https://github.com", }, ], }, }, ) aioclient_mock.get( "http://127.0.0.1/addons/test/stats", json={ "result": "ok", "data": { "cpu_percent": 0.99, "memory_usage": 182611968, "memory_limit": 3977146368, "memory_percent": 4.59, "network_rx": 362570232, "network_tx": 82374138, "blk_read": 46010945536, "blk_write": 15051526144, }, }, ) aioclient_mock.get("http://127.0.0.1/addons/test/changelog", text="") aioclient_mock.get( "http://127.0.0.1/addons/test/info", json={"result": "ok", "data": {"auto_update": True}}, ) aioclient_mock.get("http://127.0.0.1/addons/test2/changelog", text="") aioclient_mock.get( "http://127.0.0.1/addons/test2/info", json={"result": "ok", "data": {"auto_update": False}}, ) aioclient_mock.get( "http://127.0.0.1/ingress/panels", json={"result": "ok", "data": {"panels": {}}} ) @pytest.mark.parametrize( "entity_id,expected", [ ("sensor.home_assistant_operating_system_version", "1.0.0"), ("sensor.home_assistant_operating_system_newest_version", "1.0.0"), ("sensor.test_version", "2.0.0"), ("sensor.test_newest_version", "2.0.1"), ("sensor.test2_version", "3.1.0"), ("sensor.test2_newest_version", "3.2.0"), ("sensor.test_cpu_percent", "0.99"), ("sensor.test2_cpu_percent", "unavailable"), ("sensor.test_memory_percent", "4.59"), ("sensor.test2_memory_percent", "unavailable"), ], )
78e4d7e1ca8f49068d8f63f6c80bb3048f5ad8e8
@pytest.mark.parametrize( "entity_id,expected", [ ("sensor.home_assistant_operating_system_version", "1.0.0"), ("sensor.home_assistant_operating_system_newest_version", "1.0.0"), ("sensor.test_version", "2.0.0"), ("sensor.test_newest_version", "2.0.1"), ("sensor.test2_version", "3.1.0"), ("sensor.test2_newest_version", "3.2.0"), ("sensor.test_cpu_percent", "0.99"), ("sensor.test2_cpu_percent", "unavailable"), ("sensor.test_memory_percent", "4.59"), ("sensor.test2_memory_percent", "unavailable"), ], )
470
https://github.com/home-assistant/core.git
1,473
def mock_all(aioclient_mock, request): aioclient_mock.post("http://127.0.0.1/homeassistant/options", json={"result": "ok"}) aioclient_mock.get("http://127.0.0.1/supervisor/ping", json={"result": "ok"}) aioclient_mock.post("http://127.0.0.1/supervisor/options", json={"result": "ok"}) aioclient_mock.get( "http://127.0.0.1/info", json={ "result": "ok", "data": {"supervisor": "222", "homeassistant": "0.110.0", "hassos": None}, }, ) aioclient_mock.get( "http://127.0.0.1/store", json={ "result": "ok", "data": {"addons": [], "repositories": []}, }, ) aioclient_mock.get( "http://127.0.0.1/host/info", json={ "result": "ok", "data": { "result": "ok", "data": { "chassis": "vm", "operating_system": "Debian GNU/Linux 10 (buster)", "kernel": "4.19.0-6-amd64", }, }, }, ) aioclient_mock.get( "http://127.0.0.1/core/info", json={"result": "ok", "data": {"version_latest": "1.0.0", "version": "1.0.0"}}, ) aioclient_mock.get( "http://127.0.0.1/os/info", json={"result": "ok", "data": {"version_latest": "1.0.0", "version": "1.0.0"}}, ) aioclient_mock.get( "http://127.0.0.1/supervisor/info", json={ "result": "ok", "data": { "result": "ok", "version": "1.0.0",
10
1,034
mock_all
19
0
2
5
pandas/_testing/asserters.py
167,587
TYP: misc return type annotations (#47558)
pandas
11
Python
19
asserters.py
def assert_is_sorted(seq) -> None: if isinstance(seq, (Index, Series)): seq = seq.values # sorting does not change precisions assert_numpy_array_equal(seq, np.sort(np.array(seq)))
f538568afc2c76c2d738d32e3544cf9fe6742960
41
https://github.com/pandas-dev/pandas.git
38
def assert_is_sorted(seq) -> None: if isinstance(seq, (Index, Series)): seq = seq.values # sorting does not change precisions
10
67
assert_is_sorted
37
0
2
7
django/db/backends/postgresql/operations.py
205,149
Refs #33476 -- Reformatted code with Black.
django
10
Python
32
operations.py
def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == "DateField": lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), params return super().subtract_temporals(internal_type, lhs, rhs)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
58
https://github.com/django/django.git
94
def subtract_temporals(self, internal_type, lhs, rhs):
11
90
subtract_temporals
22
0
5
10
src/transformers/training_args.py
36,752
update smddp api to v1.4.0 (#16371) * update smddp api to v1.4.0 * Update src/transformers/trainer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/trainer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * address comments * fix style * remove unused import * fix indent * disable style check for import * fix space Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
transformers
10
Python
15
training_args.py
def local_process_index(self): if is_torch_tpu_available(): return xm.get_local_ordinal() elif is_sagemaker_mp_enabled(): return smp.local_rank() elif is_sagemaker_dp_enabled(): return dist.get_rank() elif self.local_rank != -1: return self.local_rank return 0
81ac45f85c35244831f11f73c09ea10eee4f953a
53
https://github.com/huggingface/transformers.git
108
def local_process_index(self): if is_torch_tpu_available(): return xm.get_local_ordinal() elif is_sagemaker_mp_enabled(): return smp.local_rank()
11
92
local_process_index
125
0
17
39
nuitka/freezer/Standalone.py
178,615
macOS: Massive improvements for dependency scans * Was not recursively scanning dependencies and therefore could miss some of them. * Made internal functions private. * Make sure to pass proper "package" value to DLL scans, so it can include the needed directories. * Do not mutate information of DLL map, it is used later for other things and we now detect errors in that.
Nuitka
15
Python
73
Standalone.py
def getScanDirectories(package_name, original_dir): # Many cases, pylint: disable=too-many-branches cache_key = package_name, original_dir if cache_key in _scan_dir_cache: return _scan_dir_cache[cache_key] scan_dirs = [sys.prefix] if package_name is not None: scan_dirs.extend(_getPackageSpecificDLLDirectories(package_name)) if original_dir is not None: scan_dirs.append(original_dir) scan_dirs.extend(getSubDirectories(original_dir)) if ( Utils.isWin32Windows() and package_name is not None and package_name.isBelowNamespace("win32com") ): pywin32_dir = getPyWin32Dir() if pywin32_dir is not None: scan_dirs.append(pywin32_dir) for path_dir in os.environ["PATH"].split(";"): if not os.path.isdir(path_dir): continue if areSamePaths(path_dir, os.path.join(os.environ["SYSTEMROOT"])): continue if areSamePaths(path_dir, os.path.join(os.environ["SYSTEMROOT"], "System32")): continue if areSamePaths(path_dir, os.path.join(os.environ["SYSTEMROOT"], "SysWOW64")): continue scan_dirs.append(path_dir) result = [] # Remove directories that hold no DLLs. for scan_dir in scan_dirs: sys.stdout.flush() # These are useless, but plenty. if os.path.basename(scan_dir) == "__pycache__": continue scan_dir = getDirectoryRealPath(scan_dir) # No DLLs, no use. if not any(entry[1].lower().endswith(".dll") for entry in listDir(scan_dir)): continue result.append(os.path.realpath(scan_dir)) _scan_dir_cache[cache_key] = result return result
a470b75c8e045312ea22dbfb6c5fc6702835b31c
286
https://github.com/Nuitka/Nuitka.git
390
def getScanDirectories(package_name, original_dir): # Many cases, pylint: disable=too-many-branches cache_key = package_name, original_dir if cache_key in _scan_dir_cache: return _scan_dir_cache[cache_key] scan_dirs = [sys.prefix] if package_name is not None: scan_dirs.extend(_getPackageSpecificDLLDirectories(package_name)) if original_dir is not None: scan_dirs.append(original_dir) scan_dirs.extend(getSubDirectories(original_dir)) if ( Utils.isWin32Windows() and package_name is not None and package_name.isBelowNamespace("win32com") ): pywin32_dir = getPyWin32Dir() if pywin32_dir is not None: scan_dirs.append(pywin32_dir) for path_dir in os.environ["PATH"].split(";"): if not os.path.isdir(path_dir): continue if areSamePaths(path_dir, os.path.join(os.environ["SYSTEMROOT"])): continue if areSamePaths(path_dir, os.path.join(os.environ["SYSTEMROOT"], "System32")): continue if areSamePaths(path_dir, os.path.join(os.environ["SYSTEMROOT"], "SysWOW64")): continue scan_dirs.append(path_dir) result = [] # Remove directories that hold no DLLs. for scan_dir in scan_dirs: sys.stdout.flush() # These are useless, but plenty. if os.path.basename(scan_dir) == "__pycache__": continue scan_dir = getDirectoryRealPath(scan_dir) # No DLLs, no use. if not any(en
37
473
getScanDirectories
26
0
3
12
pytorch_lightning/plugins/environments/lsf_environment.py
241,622
Modify LSFEnvironment to use more reliable environment variable (#10825) Co-authored-by: thomas chaton <thomas@grid.ai> Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <Borda@users.noreply.github.com>
lightning
12
Python
21
lsf_environment.py
def _get_node_rank(self) -> int: hosts = self._read_hosts() count: Dict[str, int] = {} for host in hosts: if host not in count: count[host] = len(count) return count[socket.gethostname()]
dbf1acd5a553ffc1546734be164cc89cef2b741d
55
https://github.com/Lightning-AI/lightning.git
87
def _get_node_rank(self) -> int: hosts = self._read_hosts() count: Dict[str, int] = {} for host in hosts: if host not in count: count[host] = len(count)
12
88
_get_node_rank
33
0
2
13
dask/dataframe/tests/test_dataframe.py
156,987
Add `DataFrame` and `Series` `median` method (#9483)
dask
18
Python
30
test_dataframe.py
def test_median_approximate(method): df = pd.DataFrame({"x": range(100), "y": range(100, 200)}) ddf = dd.from_pandas(df, npartitions=10) if PANDAS_GT_110: assert_eq( ddf.median_approximate(method=method), df.median(), atol=1, ) else: result = ddf.median_approximate(method=method) expected = df.median() assert ((result - expected).abs() < 1).all().compute()
142de2608df2494bf11e08038aadddb544b4500c
107
https://github.com/dask/dask.git
112
def test_median_approximate(method): df = pd.DataFrame({"x": range(100), "y": range(100, 200)}) ddf = dd.from_pandas(d
20
173
test_median_approximate
51
0
1
7
pandas/tests/series/test_constructors.py
163,316
BUG: Series(floatlike, dtype=intlike) inconsistent with non-ndarray data (#45142)
pandas
10
Python
38
test_constructors.py
def test_constructor_coerce_float_fail(self, any_int_numpy_dtype): # see gh-15832 # Updated: make sure we treat this list the same as we would treat # the equivalent ndarray vals = [1, 2, 3.5] res = Series(vals, dtype=any_int_numpy_dtype) expected = Series(np.array(vals), dtype=any_int_numpy_dtype) tm.assert_series_equal(res, expected) alt = Series(np.array(vals)) # i.e. we ignore the dtype kwd tm.assert_series_equal(alt, expected)
ad9d42a4c847eb9f341dd6743466a4bed70a0a6e
70
https://github.com/pandas-dev/pandas.git
115
def test_constructor_coerce_float_fail(self, any_int_numpy_dtype): # see gh-15832 # Updated: make sure we treat this list the same as we would treat # the equivalent ndarray vals = [1, 2, 3.5] res = Series(vals, dtype=any_in
13
107
test_constructor_coerce_float_fail
47
0
1
11
seaborn/tests/_core/test_subplots.py
41,583
Begin removal of data/layers as Plotter attributes
seaborn
11
Python
34
test_subplots.py
def test_row_faceted_x_paired(self): x = ["f", "s"] key = "a" order = list("abc") facet_spec = {"variables": {"row": key}, "row_order": order} s = Subplots({}, facet_spec, {"x": x}) assert s.n_subplots == len(order) * len(x) assert s.subplot_spec["ncols"] == len(x) assert s.subplot_spec["nrows"] == len(order) assert s.subplot_spec["sharex"] == "col" assert s.subplot_spec["sharey"] is True
6b61a26a462effaea1c80518e98185abb12174ed
107
https://github.com/mwaskom/seaborn.git
116
def test_row_faceted_x_paired(self): x = ["f", "s"] key = "a" order = list("abc") facet_spec = {"variables": {"row": key}, "row_order": order} s = Subplots({}, facet_spec, {"x": x}) assert s.n_subplots == len(order) * le
12
188
test_row_faceted_x_paired
196
0
2
59
python/ccxt/async_support/bittrex.py
17,716
1.72.42 [ci skip]
ccxt
18
Python
115
bittrex.py
async def fetch_markets(self, params={}): response = await self.publicGetMarkets(params) # # [ # { # "symbol":"LTC-BTC", # "baseCurrencySymbol":"LTC", # "quoteCurrencySymbol":"BTC", # "minTradeSize":"0.01686767", # "precision":8, # "status":"ONLINE", # "OFFLINE" # "createdAt":"2014-02-13T00:00:00Z" # }, # { # "symbol":"VDX-USDT", # "baseCurrencySymbol":"VDX", # "quoteCurrencySymbol":"USDT", # "minTradeSize":"300.00000000", # "precision":8, # "status":"ONLINE", # "OFFLINE" # "createdAt":"2019-05-23T00:41:21.843Z", # "notice":"USDT has swapped to an ERC20-based token as of August 5, 2019." # } # ] # result = [] for i in range(0, len(response)): market = response[i] baseId = self.safe_string(market, 'baseCurrencySymbol') quoteId = self.safe_string(market, 'quoteCurrencySymbol') base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) status = self.safe_string(market, 'status') result.append({ 'id': self.safe_string(market, 'symbol'), 'symbol': base + '/' + quote, 'base': base, 'quote': quote, 'settle': None, 'baseId': baseId, 'quoteId': quoteId, 'settleId': None, 'type': 'spot', 'spot': True, 'margin': False, 'swap': False, 'future': False, 'option': False, 'active': (status == 'ONLINE'), 'contract': False, 'linear': None, 'inverse': None, 'contractSize': None, 'expiry': None, 'expiryDatetime': None, 'strike': None, 'optionType': None, 'precision': { 'price': self.safe_integer(market, 'precision', 8), 'amount': int('8'), }, 'limits': { 'leverage': { 'min': None, 'max': None, }, 'amount': { 'min': self.safe_number(market, 'minTradeSize'), 'max': None, }, 'price': { 'min': None, 'max': None, }, 'cost': { 'min': None, 'max': None, }, }, 'info': market, }) return result
101076f6f891ddfbcc5ba99a7858decf58565537
297
https://github.com/ccxt/ccxt.git
1,488
async def fetch_markets(self, params={}): response = await self.publicGetMarkets(params) # # [ # { # "symbol":"LTC-BTC", # "baseCurrencySymbol":"LTC", # "quot
21
537
fetch_markets
77
0
5
20
django/db/models/base.py
205,432
Refs #33476 -- Reformatted code with Black.
django
17
Python
61
base.py
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = "gt" if is_next else "lt" order = "" if is_next else "-" param = getattr(self, field.attname) q = Q((field.name, param), (f"pk__{op}", self.pk), _connector=Q.AND) q = Q(q, (f"{field.name}__{op}", param), _connector=Q.OR) qs = ( self.__class__._default_manager.using(self._state.db) .filter(**kwargs) .filter(q) .order_by("%s%s" % (order, field.name), "%spk" % order) ) try: return qs[0] except IndexError: raise self.DoesNotExist( "%s matching query does not exist." % self.__class__._meta.object_name )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
164
https://github.com/django/django.git
249
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = "gt" if is_next else "lt" order = "" if is_next else "-" param = getatt
30
275
_get_next_or_previous_by_FIELD
108
0
7
21
lib/gui/analysis/event_reader.py
100,305
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
faceswap
17
Python
75
event_reader.py
def _parse_outputs(self, event): serializer = get_serializer("json") struct = event.summary.value[0].tensor.string_val[0] config = serializer.unmarshal(struct)["config"] model_outputs = self._get_outputs(config) for side_outputs, side in zip(model_outputs, ("a", "b")): logger.debug("side: '%s', outputs: '%s'", side, side_outputs) layer_name = side_outputs[0][0] output_config = next(layer for layer in config["layers"] if layer["name"] == layer_name)["config"] layer_outputs = self._get_outputs(output_config) for output in layer_outputs: # Drill into sub-model to get the actual output names loss_name = output[0][0] if loss_name[-2:] not in ("_a", "_b"): # Rename losses to reflect the side output new_name = f"{loss_name.replace('_both', '')}_{side}" logger.debug("Renaming loss output from '%s' to '%s'", loss_name, new_name) loss_name = new_name if loss_name not in self._loss_labels: logger.debug("Adding loss name: '%s'", loss_name) self._loss_labels.append(loss_name) logger.debug("Collated loss labels: %s", self._loss_labels)
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
196
https://github.com/deepfakes/faceswap.git
386
def _parse_outputs(self, event): serializer = get_serializer("json") struct = event.summary.value[0].tensor.string_val[0] config = serializer.unmarshal(struct)["config"] model_outputs = self._get_outputs(config) for side_outputs, side in zip(model_outputs, ("a", "b")): logger.debug("side: '%s', outputs: '%s'", side, side_outputs) layer_name = side_outputs[0][0] output_config = next(layer for layer in config["layers"] if layer["name"] == layer_name)["config"] layer_outputs = self._get_outputs(output_config) for output in layer_outputs: # Drill into sub-model to get the actual output names loss_name = output[0][0] if loss_name[-2:] not in ("_a", "_b"): # Rename losses to reflect the side output new_name = f"{loss_nam
30
345
_parse_outputs
38
0
2
17
airflow/models/baseoperator.py
44,093
Add `--map-index` parameter to task CLI commands (#20980)
airflow
14
Python
34
baseoperator.py
def get_serialized_fields(cls): if cls.__serialized_fields is None: fields_dict = attr.fields_dict(cls) cls.__serialized_fields = frozenset( fields_dict.keys() - { 'dag', 'deps', 'inherits_from_dummy_operator', 'is_mapped', 'operator_extra_links', 'upstream_task_ids', 'task_type', } | {'template_fields'} ) return cls.__serialized_fields # TODO: Deprecate for Airflow 3.0 Chainable = Union[DependencyMixin, Sequence[DependencyMixin]]
8dabce8887f02216c1037be35e80c214edcbadfe
57
https://github.com/apache/airflow.git
275
def get_serialized_fields(cls): if cls.__serialized_fields is None: fields_dict = attr.fields_dict(cls) cls.__serialized_fields = frozenset( fields_dict.keys() - { 'dag', 'deps', 'inherits_from_dummy_operator', 'is_ma
11
116
get_serialized_fields
92
0
1
24
test/test_components.py
181,367
Create fewer temp files and make them consistently-named (#2758) * tmp files * components * changes * temp_file_sets * TempFileManager class * added file manager * internal functions * tests * formatting * changes * video tests * added tests for File * cheetah image * formatting * tests for upload button * temp files * formatting * changelog * fixed audio * tmp files * tmp files * gallery * deprecated type=file * fixing tests * patch os.path.exists * fixed test_video_postprocess_converts_to_playable_format * fixed tests * changelog * fix tests * formatting * added a download_if_needed * formatting * fixed download * fixed gallery demo * fix tests * version * fix for mac * consolidate
gradio
13
Python
47
test_components.py
def test_video_postprocess_converts_to_playable_format(self): test_file_dir = pathlib.Path(pathlib.Path(__file__).parent, "test_files") # This file has a playable container but not playable codec with tempfile.NamedTemporaryFile( suffix="bad_video.mp4", delete=False ) as tmp_not_playable_vid: bad_vid = str(test_file_dir / "bad_video_sample.mp4") assert not processing_utils.video_is_playable(bad_vid) shutil.copy(bad_vid, tmp_not_playable_vid.name) _ = gr.Video().postprocess(tmp_not_playable_vid.name) # The original video gets converted to .mp4 format full_path_to_output = pathlib.Path(tmp_not_playable_vid.name).with_suffix( ".mp4" ) assert processing_utils.video_is_playable(str(full_path_to_output)) # This file has a playable codec but not a playable container with tempfile.NamedTemporaryFile( suffix="playable_but_bad_container.mkv", delete=False ) as tmp_not_playable_vid: bad_vid = str(test_file_dir / "playable_but_bad_container.mkv") assert not processing_utils.video_is_playable(bad_vid) shutil.copy(bad_vid, tmp_not_playable_vid.name) _ = gr.Video().postprocess(tmp_not_playable_vid.name) full_path_to_output = pathlib.Path(tmp_not_playable_vid.name).with_suffix( ".mp4" ) assert processing_utils.video_is_playable(str(full_path_to_output))
20057aa946b6711ae2928eb0a81c8f00c3b0a7a9
184
https://github.com/gradio-app/gradio.git
357
def test_video_postprocess_converts_to_playable_format(self): test_file_dir = pathlib.Path(pathlib.Path(__file__).parent, "test_files") # This file has a playable container but not playable codec with tempfile.NamedTemporaryFile( suffix="bad_video.mp4", delete=False ) as tmp_not_playable_vid: bad_vid = str(test_file_dir / "bad_video_sample.mp4") assert not processing_utils.video_is_playable(bad_vid) shutil.copy(bad_vid, tmp_not_playable_vid.name) _ = gr.Video().postprocess(tmp_not_playable_vid.name) # The original video gets converted to .mp4 format full_path_to_output = pathlib.Path(tmp_not_playable_vid.name).with_suffix( ".mp4" ) assert processing_utils.video_is_playable(str(full_path_to_output)) # This file has a playable codec but not a playable container with tempfile.NamedTemporaryFile( suffix="playable_but_bad_container.mkv", delete=False ) as tmp_not_playable_vid: bad_vid = str(test_file_dir / "playable_but_bad_container.mkv") assert not processing_utils.video_is_playable(bad_vid) shutil.copy(bad_vid, tmp_not_playable_vid.name) _ = gr.Video().postprocess(tmp_not
25
314
test_video_postprocess_converts_to_playable_format
25
0
1
9
pandas/tests/io/parser/test_python_parser_only.py
166,606
REGR: index_col False and header=None inferring index names in some cases (#47139)
pandas
11
Python
22
test_python_parser_only.py
def test_index_col_false_and_header_none(python_parser_only): # GH#46955 parser = python_parser_only data = result = parser.read_csv(StringIO(data), sep=",", header=None, index_col=False) expected = DataFrame({0: [0.5, 0.1], 1: [0.03, 0.2]}) tm.assert_frame_equal(result, expected)
c50b745a99e644e7c7552f196f49f6269b79e258
72
https://github.com/pandas-dev/pandas.git
43
def test_index_col_false_and_header_none(python_parser_only): # GH#46955 parser = python
14
100
test_index_col_false_and_header_none
24
0
1
12
rllib/utils/replay_buffers/tests/test_prioritized_replay_buffer_replay_buffer_api.py
138,013
[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)
ray
13
Python
21
test_prioritized_replay_buffer_replay_buffer_api.py
def _generate_data(self): return SampleBatch( { SampleBatch.T: [np.random.random((4,))], SampleBatch.ACTIONS: [np.random.choice([0, 1])], SampleBatch.REWARDS: [np.random.rand()], SampleBatch.OBS: [np.random.random((4,))], SampleBatch.NEXT_OBS: [np.random.random((4,))], SampleBatch.TERMINATEDS: [np.random.choice([False, True])], SampleBatch.TRUNCATEDS: [np.random.choice([False, False])], } )
8e680c483ce326cefc62e44f68ab1a6948b1c3d2
136
https://github.com/ray-project/ray.git
164
def _generate_data(self): return SampleBatch( { SampleBatch.T: [np.random.random((4,))], SampleBatch.ACTIONS: [np.random.choice([0, 1])], SampleBatch.REWARDS: [np.random.rand()], SampleBatch.OBS: [np.random.random((4,))], SampleBatch.NEXT_OBS: [np.random.random((4,))], SampleBatch.TERMINATEDS: [np.random.choice([False, True])], SampleBatch.TRUNCATEDS: [np.random.choice([False, False])], }
14
199
_generate_data
147
0
5
33
sklearn/metrics/cluster/_unsupervised.py
258,457
FIX Support integers in silhouette_score for precomputed distances (#22108) Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
scikit-learn
16
Python
105
_unsupervised.py
def silhouette_samples(X, labels, *, metric="euclidean", **kwds): X, labels = check_X_y(X, labels, accept_sparse=["csc", "csr"]) # Check for non-zero diagonal entries in precomputed distance matrix if metric == "precomputed": error_msg = ValueError( "The precomputed distance matrix contains non-zero " "elements on the diagonal. Use np.fill_diagonal(X, 0)." ) if X.dtype.kind == "f": atol = np.finfo(X.dtype).eps * 100 if np.any(np.abs(np.diagonal(X)) > atol): raise ValueError(error_msg) elif np.any(np.diagonal(X) != 0): # integral dtype raise ValueError(error_msg) le = LabelEncoder() labels = le.fit_transform(labels) n_samples = len(labels) label_freqs = np.bincount(labels) check_number_of_labels(len(le.classes_), n_samples) kwds["metric"] = metric reduce_func = functools.partial( _silhouette_reduce, labels=labels, label_freqs=label_freqs ) results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func, **kwds)) intra_clust_dists, inter_clust_dists = results intra_clust_dists = np.concatenate(intra_clust_dists) inter_clust_dists = np.concatenate(inter_clust_dists) denom = (label_freqs - 1).take(labels, mode="clip") with np.errstate(divide="ignore", invalid="ignore"): intra_clust_dists /= denom sil_samples = inter_clust_dists - intra_clust_dists with np.errstate(divide="ignore", invalid="ignore"): sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists) # nan values are for clusters of size 1, and should be 0 return np.nan_to_num(sil_samples)
e4015289e0eeb390190ce0d051cee756bc5ecb33
284
https://github.com/scikit-learn/scikit-learn.git
333
def silhouette_samples(X, labels, *, metric="euclidean", **kwds): X, labels = check_X_y(X, labels, accept_sparse=["csc", "csr"]) # Check for non-zero diagonal entries in precomputed distance matrix if metric == "precomputed": error_msg = ValueError( "The precomputed distance matrix contains non-zero " "elements on the diagonal. Use np.fill_diagonal(X, 0)." ) if X.dtype.kind == "f": atol = np.finfo(X.dtype).eps * 100 if np.any(np.abs(np.diagonal(X)) > atol): raise ValueError(error_msg) elif np.any(np.diagonal(X) != 0): # integral dtype raise ValueError(error_msg) le = LabelEncoder() labels
46
473
silhouette_samples
44
0
1
16
tests/speech_encoder_decoder/test_modeling_flax_speech_encoder_decoder.py
37,466
use scale=1.0 in floats_tensor called in speech model testers (#17007) Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
transformers
11
Python
36
test_modeling_flax_speech_encoder_decoder.py
def get_pretrained_model_and_inputs(self): model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-large-lv60", "gpt2-medium" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs
e952e049b4fbb5d3e2ba6a140f10fb4049dd8654
96
https://github.com/huggingface/transformers.git
168
def get_pretrained_model_and_inputs(self): model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-large-lv60", "gpt2-medium" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0)
18
150
get_pretrained_model_and_inputs
23
1
1
5
tests/sentry/relay/test_config.py
88,244
test: Add missing tests to sentry/relay/config/__init__.py [TET-504] (#41058) This PR increase code coverage from ~82% upto 98% in sentry/relay/config/__init__.py. codecov [report](https://app.codecov.io/gh/getsentry/sentry/pull/41058): <img width="1060" alt="image" src="https://user-images.githubusercontent.com/1374633/200516881-ed23da43-37df-4fc2-b291-310fc13f0ff5.png">
sentry
10
Python
22
test_config.py
def test_project_config_setattr(default_project): project_cfg = ProjectConfig(default_project) with pytest.raises(Exception) as exc_info: project_cfg.foo = "bar" assert str(exc_info.value) == "Trying to change read only ProjectConfig object" @pytest.mark.django_db
4821e6846b007cce0092f43141e4b436beb2bedc
@pytest.mark.django_db
35
https://github.com/getsentry/sentry.git
37
def test_project_config_setattr(default_project): project_cfg = ProjectConfig(default_project) with pytest.raises(Ex
13
74
test_project_config_setattr
51
0
6
14
keras/distribute/distribute_coordinator_utils.py
270,220
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
11
Python
36
distribute_coordinator_utils.py
def _is_chief(self): if not self._cluster_spec or self._task_type in [ _TaskType.CHIEF, _TaskType.EVALUATOR, None, ]: return True # If not local and chief not in the cluster_spec, use the first worker as # chief. if ( _TaskType.CHIEF not in self._cluster_spec.jobs and self._task_type == _TaskType.WORKER and self._task_id == 0 ): return True return False
84afc5193d38057e2e2badf9c889ea87d80d8fbf
63
https://github.com/keras-team/keras.git
195
def _is_chief(self): if not self._cluster_spec or self._task_type in [
10
99
_is_chief
14
0
1
6
erpnext/patches/v14_0/remove_india_localisation.py
69,051
fix: unlink custom fields in patch
erpnext
10
Python
14
remove_india_localisation.py
def unlink_custom_fields(): frappe.db.set_value( "Custom Field", {"dt": "Item", "fieldname": "gst_hsn_code"}, {"fieldtype": "Data", "options": ""}, )
70c4117c22df5ab3efda0be842452cabc2f9aab9
33
https://github.com/frappe/erpnext.git
8
def unlink_custom_fields(): frappe.db.set_value( "Custom Field", {"dt": "Item", "fieldname": "gst_hsn_code"}, {"fieldtype": "Data", "options": ""}, )
4
66
unlink_custom_fields
76
0
1
15
sklearn/mixture/tests/test_gaussian_mixture.py
258,935
MNT Update black to stable version (#22474)
scikit-learn
12
Python
58
test_gaussian_mixture.py
def test_gaussian_suffstat_sk_spherical(): # computing spherical covariance equals to the variance of one-dimension # data after flattening, n_components=1 rng = np.random.RandomState(0) n_samples, n_features = 500, 2 X = rng.rand(n_samples, n_features) X = X - X.mean() resp = np.ones((n_samples, 1)) nk = np.array([n_samples]) xk = X.mean() covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X, nk, xk, 0) covars_pred_spherical2 = np.dot(X.flatten().T, X.flatten()) / ( n_features * n_samples ) assert_almost_equal(covars_pred_spherical, covars_pred_spherical2) # check the precision computation precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical, "spherical") assert_almost_equal(covars_pred_spherical, 1.0 / precs_chol_pred**2)
1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe
135
https://github.com/scikit-learn/scikit-learn.git
130
def test_gaussian_suffstat_sk_spherical(): # computing spherical covariance equals to the variance of one-dimension # data after flattening, n_components=1 rng = np.random.RandomState(0) n_samples, n_features = 500, 2 X = rng.rand(n_samples, n_features) X = X - X.mean() resp = np.ones((n_samples, 1)) nk = np.array([n_samples]) xk = X.mean() covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X, nk, xk, 0) covars_pred_spherical2 = np.dot(X.flatten().T, X.flatten()) / ( n_features * n_samples ) assert_almost_equal(covars_pred_spherical, covars_pred_spherical2) # check the precisio
24
211
test_gaussian_suffstat_sk_spherical
16
0
1
5
tests/utils/test_db.py
46,172
Enhance `db upgrade` args (#22102) Make `db upgrade` args more like `db downgrade`. ``` usage: airflow db upgrade [-h] [--from-revision FROM_REVISION] [--from-version FROM_VERSION] [-r REVISION] [-s] [-n VERSION] Upgrade the schema of the metadata database. To print but not execute commands, use option ``--show-sql-only``. If using options ``--from-revision`` or ``--from-version``, you must also use ``--show-sql-only``, because if actually *running* migrations, we should only migrate from the *current* revision. optional arguments: -h, --help show this help message and exit --from-revision FROM_REVISION (Optional) If generating sql, may supply a *from* revision --from-version FROM_VERSION (Optional) If generating sql, may supply a *from* version -r REVISION, --revision REVISION (Optional) The airflow revision to upgrade to. Note: must provide either `--revision` or `--version`. -s, --show-sql-only Don't actually run migrations; just print out sql scripts for offline migration. Required if using either `--from-version` or `--from-version`. -n VERSION, --version VERSION (Optional) The airflow version to upgrade to. Note: must provide either `--revision` or `--version`. ```
airflow
13
Python
15
test_db.py
def test_offline_upgrade_revision(self, from_revision, to_revision): with mock.patch('airflow.utils.db.settings.engine.dialect'): with mock.patch('alembic.command.upgrade') as mock_alembic_upgrade: upgradedb(from_revision=from_revision, to_revision=to_revision, show_sql_only=True) mock_alembic_upgrade.assert_called_once_with(mock.ANY, f"{from_revision}:{to_revision}", sql=True)
3452f7ce45607af04bade5e5edebaa18fdc13819
56
https://github.com/apache/airflow.git
55
def test_offline_upgrade_revision(self, from_revision, to_revision): with mock.p
12
101
test_offline_upgrade_revision
62
0
6
17
python/ray/tune/trial.py
139,298
[Tune] Logging of bad results dict keys (#23954) [User complains](https://discuss.ray.io/t/which-attributes-can-be-used-in-checkpoint-score-attr-when-using-tune-run/5826) about logging on failure of locating `checkpoint_score_attr ` in results dict not being informative. I propose that we log the actual results dict keys and extended stopping criteria, which imho should not log the whole result dict as this might contain tensors. Maybe there are other similar cases in tune library, in which I don't know my way around that good.
ray
17
Python
48
trial.py
def should_stop(self, result): if result.get(DONE): return True for criteria, stop_value in self.stopping_criterion.items(): if criteria not in result: raise TuneError( "Stopping criteria {} not provided in result dict. Keys " "are {}.".format(criteria, list(result.keys())) ) elif isinstance(criteria, dict): raise ValueError( "Stopping criteria is now flattened by default. " "Use forward slashes to nest values `key1/key2/key3`." ) elif result[criteria] >= stop_value: return True return False
bc8742792cde5be62e22add01686a9c539e0f465
83
https://github.com/ray-project/ray.git
285
def should_stop(self, result): if result.get(DONE): return True for criteria, stop_value in self.stopping_criterion.items(): if criteria not in result: raise TuneError( "Stopping criteria {} not provided in result dict. Keys " "are {}.".format(criteria, list(result.keys()))
16
140
should_stop
32
0
1
4
mindsdb/migrations/versions/2022-08-25_6a54ba55872e_view_integration.py
116,204
removed integration_id from view (is already absent in db.py)
mindsdb
13
Python
26
2022-08-25_6a54ba55872e_view_integration.py
def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('view', schema=None) as batch_op: batch_op.add_column(sa.Column('integration_id', sa.INTEGER(), autoincrement=False, nullable=False)) batch_op.create_foreign_key('fk_integration_id', 'integration', ['integration_id'], ['id']) # ### end Alembic commands ###
12c18196c71dee5b16b7c8ddcfe9a0bdffbf8440
59
https://github.com/mindsdb/mindsdb.git
54
def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('view', schema=None) as batch_op: batch_op.add_column(sa.Column('integration_id', sa.INTEGER(), autoincrement=False, nullable=False)) batch_op.create_foreign_key('fk_integrati
12
105
downgrade
66
0
1
19
python/ray/tune/tests/test_sync.py
139,805
[tune] Fast path for `sync_dir_between_nodes` (#24958) This PR adds a fast path for `sync_dir_between_nodes` that gets triggered if both source IP and target IP are the same. It uses simple `shutil` operations instead of packing and unpacking to improve performance.
ray
13
Python
35
test_sync.py
def _prepareDirForTestSyncRemoteTask(self): temp_source = tempfile.mkdtemp() temp_up_target = tempfile.mkdtemp() temp_down_target = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, temp_source) self.addCleanup(shutil.rmtree, temp_up_target) self.addCleanup(shutil.rmtree, temp_down_target) os.makedirs(os.path.join(temp_source, "A", "a1")) os.makedirs(os.path.join(temp_source, "A", "a2")) os.makedirs(os.path.join(temp_source, "B", "b1")) with open(os.path.join(temp_source, "level_0.txt"), "wt") as fp: fp.write("Level 0\n") with open(os.path.join(temp_source, "A", "level_a1.txt"), "wt") as fp: fp.write("Level A1\n") with open(os.path.join(temp_source, "A", "a1", "level_a2.txt"), "wt") as fp: fp.write("Level A2\n") with open(os.path.join(temp_source, "B", "level_b1.txt"), "wt") as fp: fp.write("Level B1\n") return temp_source, temp_up_target, temp_down_target
a25235a2c2d8882b7793cb7e5010764aa4adc999
221
https://github.com/ray-project/ray.git
207
def _prepareDirForTestSyncRemoteTask(self): temp_source = tempfile.mkdtemp() temp_up_target = tempfile.mkdtemp() temp_down_target = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, temp_source) self.addCleanup(shutil.rmtree, temp_up_target) self.addCleanup(shutil.rmtree, temp_down_target) os.makedirs(os.path.join(temp_source, "A", "a1"))
17
393
_prepareDirForTestSyncRemoteTask
84
0
1
10
sympy/printing/tests/test_julia.py
198,727
Add extra spaces in julia_code() printing.
sympy
11
Python
46
test_julia.py
def test_1_over_x_and_sqrt(): # 1.0 and 0.5 would do something different in regular StrPrinter, # but these are exact in IEEE floating point so no different here. assert julia_code(1/x) == '1 ./ x' assert julia_code(x**-1) == julia_code(x**-1.0) == '1 ./ x' assert julia_code(1/sqrt(x)) == '1 ./ sqrt(x)' assert julia_code(x**-S.Half) == julia_code(x**-0.5) == '1 ./ sqrt(x)' assert julia_code(sqrt(x)) == 'sqrt(x)' assert julia_code(x**S.Half) == julia_code(x**0.5) == 'sqrt(x)' assert julia_code(1/pi) == '1 / pi' assert julia_code(pi**-1) == julia_code(pi**-1.0) == '1 / pi' assert julia_code(pi**-0.5) == '1 / sqrt(pi)'
4c22fad1d280711c2a868cabb7d2dbd90c1ac052
138
https://github.com/sympy/sympy.git
116
def test_1_over_x_and_sqrt(): # 1.0 and 0.5 would do something different in regular StrPrinter, # but these are exact in IEEE floating point so no different here. assert julia_code(1/x) == '1 ./ x' assert julia_code(x**-1) == julia_code(x**-1.0) == '1 ./ x' assert julia_code(1/sqrt(x)) == '1 ./ sqrt(x)' assert julia_code(x**-S.Half) == julia_code(x**-0.5) == '1 ./ sqrt(x)' assert julia_code(sqrt(x)) == 'sqrt(x)' assert julia_code(x**S.Half) == julia_code(x**0.
7
221
test_1_over_x_and_sqrt
39
0
1
20
tests/components/homekit_controller/test_lock.py
311,520
Improve homekit_controller tests (#65266)
core
11
Python
23
test_lock.py
async def test_switch_change_lock_state(hass, utcnow): helper = await setup_test_component(hass, create_lock_service) await hass.services.async_call( "lock", "lock", {"entity_id": "lock.testdevice"}, blocking=True ) helper.async_assert_service_values( ServicesTypes.LOCK_MECHANISM, { CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 1, }, ) await hass.services.async_call( "lock", "unlock", {"entity_id": "lock.testdevice"}, blocking=True ) helper.async_assert_service_values( ServicesTypes.LOCK_MECHANISM, { CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 0, }, )
58b8c30221a6f6e5acbbe98b7e3298b03fb741f5
95
https://github.com/home-assistant/core.git
147
async def test_switch_change_lock_state(hass, utcnow): helper = await setup_test_component(hass, create_lock_service) await hass.services.async_call( "lock", "lock", {"entity_id": "lock.testdevice"}, blocking=True ) helper.async_assert_service_values(
14
158
test_switch_change_lock_state
21
0
2
7
tests/openbb_terminal/stocks/dark_pool_shorts/test_finra_model.py
285,321
Here we merge all API Refactor related branches (#2236) * Update api.py * Updated forex menu * refactor ycrv command * refactor ycrv command black * refactor ecocal command * Minh changes * Adding space to test pushing * title fix ecocal df * get economic calendar annotation * fix investingcom tests * refactor index command * refactor overview command * give defaults to wsj view function args * rename date args investincom * refacto bigmac command * fix ecocal typo * refactor rtps command * alphavantage gdp * alphavantage gdp per capita * alphavantage cpi * alphavantage tyld * alphavantage inf * refactor macro command * refactor macro command w helpers * refactor treasury command * fix macro on terminal * treasury labels * refactor maturities * update treasury maturities doc strings * refactor get economic calendar finhub * refactor map command api * display map filter choices * route economy api to performance map * route economy api to performance map * display group choices on valuation command * refactor performance and valuation commands * refactor spectrum model and view * add choices to spectrum controller * delete image after view * fix model tests finviz * fix finciz view tests * refactor futures * fix some tests * fix more tests * fix controller test * refactor fred series notes * update fred notes docstring * refacto fred series ids * fix pred and qa when empty datasets * refactor fred * uncomment stuff * refacto get series data * fix some tests * set defaults on args * refactor fred yield curve * black * fix spell and remove ecocal names * fix linting * linting * pylint fix * change dangerous defaults * Working through crypto fixes (#2256) * Working through crypto fixes * Continued adding crypto stuff * Added crypto overview * Added test fixes * Added fixtures * Fixed tests * Fixed charting issue * Removed broken APIs * Final adjustments * Added test fixes * map get groups and get ycrv countries into old api * exposed econdb helper funcs * remove helpers * refactor search indices * linting * refactor arg currency * pylint from currency * Started switching crpyto ascending to ascend * Merging * Portfolio model arguements, params, and docstring * Refactored for etf commands (#2292) * Refactored for etf commands * Fixed tests * Added load command * Fixed menu * Portfolio logic fixes * Added econometrics (#2260) * Added econometrics * Fixed tests * Simplified API * Added test fixes * Added test csv * Allowed examples to be loaded * Fund refactor (#2291) * Fund refactor * Changed fund_name and fund to name * Changed ascending to ascend * Stock menu refactoring for easier API usage (#2194) * Stocks refactoring for easier API usage * Linting * Refactor newly added features * Linting * Fixing tests * Refactor common files used by stocks menu * Fixing flake8 * Fix linting and tests * Linting * Fix flake8 * refactor insider_data * refactor mentions * refactor watchlist * refactor sentiment * refactor sentiment * fix yahoofinance tests * refactor load and candle * refactor get_news and display_news * refactor stocks.ins.act * candle default matplotlib * fix yahoofinance_view tests * fix ark model tests * fix ark view tests * fix business insider model * fix business insider view * refactor csimarket model * fix tests csi market model * update dd controller * fix get suppliers tests * fix dd controller tests * fix finhub tests * fix finviz tests * fix fmp tests * fix marketwatch tests * corrected argument keywords in test_bt_model * corrected argument keywords in test_bt_view * refactor fa controller * refactor marketwatch view * refactor gov controller * fix tests fa av * fix tests elect * fix dcf tests * fix polygon tests * fix fmp tests * fix quiverquant tests * fix yahoofinance fa tests * fix more fa tests * fix insider tests * fix more tests * fix more tests * fix options tests * fix stock gov tests * fix tests test_ba_controller * fix tests for test_finviz_compare_model.py * fixed 2 tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fix final tests * fixed tests * fixed tests * Fix tests * black * forgot to black tests * fixed tests * fixed tests * fixed tests * fixed tests * flakefix * Tests + code : Stocks / Discovery * fix tests * added recorder * fixed tests * fixed tests * black * black * remove unused imports * refactor display raw * sia dicts fix * pylint * linting * remove dangerous default * fix tests * fix beta model test * black * skip screener qa test * change sector path to sectors * update tests readme * fix metric defaults * black * substitute lost ticker * defaults cpic * another round on sia * refactor cramer * reduce default tweets on sentiment * refactor yf hist, corr, volume * arkorders default * refactor income, balance, cashflow * refacto scorr, screener, getfinnhub * refactor stockgrid * ibkr refactor * another round on stockgrid * add dividens end point * refactor discovery endpoints * update docstrings with similar input * refactor messages * refactor ba * refactor regioons * refactor twitter sentiment * refactor hist * refactor regions * give default to timeframe * refactor bunch of defaults and arg names * remove leftover imports * refactor vwap * let tests run * fix tests * fix stock tests * fix stockanalysis tests * flake * MYPY * Made important changes * added fixes * Fixed big issue * Added fixes to tests * fix qa tests * fix tests * fix 1 more test * last stocks failing * fix crypto test Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: colin99d <colin99delahunty@gmail.com> * fix portfolio tests * change period to window * update ca docstrings * refactor get_similar_companies func * Fixed * Update CI * Update CI 2 * Update CI 3 * Update dependencies Co-authored-by: colin99d <colin99delahunty@gmail.com> Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: James Simmons <simmonsj330@gmail.com> Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt>
OpenBBTerminal
10
Python
19
test_finra_model.py
def test_getATSdata(recorder): df_ats, d_ats_reg = finra_model.getATSdata( limit=2, tier_ats="T1", ) d_ats_reg = {k: round(v, 9) for k, v in d_ats_reg.items()} recorder.capture_list([df_ats, d_ats_reg])
9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b
54
https://github.com/OpenBB-finance/OpenBBTerminal.git
46
def test_getATSdata(recorder): df_ats, d_ats_reg = finra_model.getATSdata( limit=2, tier_ats="T1", ) d_ats_reg = {k: round(v, 9) for k, v in d_ats_reg.items()} recorder.capture_list([df
13
84
test_getATSdata
124
0
7
20
jax/_src/lax/linalg.py
122,481
Add support for Hessenberg and tridiagonal matrix reductions on CPU. * Implement jax.scipy.linalg.hessenberg and jax.lax.linalg.hessenberg. * Export what was previously jax._src.lax.linalg.orgqr as jax.lax.linalg.householder_product, since it can be used with some minor tweaks to compute the unitary matrix of a Hessenberg reduction. * Implement jax.lax.linalg.tridiagonal, which is the symmetric (Hermitian) equivalent of Hessenberg reduction. None of these primitives are differentiable at the moment. PiperOrigin-RevId: 487224934
jax
12
Python
68
linalg.py
def _triangular_solve_shape_rule(a, b, *, left_side=False, **unused_kwargs): if a.ndim < 2: msg = "triangular_solve requires a.ndim to be at least 2, got {}." raise TypeError(msg.format(a.ndim)) if b.ndim < 2: msg = "triangular_solve requires b.ndim to be at least 2, got {}." raise TypeError(msg.format(b.ndim)) if a.shape[-1] != a.shape[-2]: msg = ("triangular_solve requires the last two dimensions of a to be equal " "in size, got a.shape of {}.") raise TypeError(msg.format(a.shape)) if a.shape[:-2] != b.shape[:-2]: msg = ("triangular_solve requires both arguments to have the same number " "of dimensions and equal batch dimensions, got {} and {}.") raise TypeError(msg.format(a.shape, b.shape)) common_dim = -2 if left_side else -1 if a.shape[-1] != b.shape[common_dim]: msg = "Incompatible shapes for arguments to triangular_solve: {} and {}." raise TypeError(msg.format(a.shape, b.shape)) return b.shape
1cead779a3abd18066195919fc5693a15cfa9070
184
https://github.com/google/jax.git
180
def _triangular_solve_shape_rule(a, b, *, left_side=False, **unused_kwargs): if a.ndim < 2: msg = "triangular_solve requires a.ndim to be at least 2, got {}." raise TypeError(msg.format(a.ndim)) if b.ndim < 2: msg = "triangular_solve requires b.ndim to be at least 2, got {}." raise TypeError(msg.format(b.ndim)) if a.shape[-1] != a.shape[-2]: msg = ("triangular_solve requires the last two dimensions of a to be equal " "in size, got a.shape of {}.") raise TypeError(msg.format(a.shape)) if a.shape[:-2] != b.shape[:-2]: msg = ("triangular_solve requires both arguments to have the same number " "of dimensions and equal batch dimensions, got {} and {}.") raise TypeError(msg.format(a.shape, b.shape)) common_dim = -2 if left_side else -1 if a.shape[-1] != b.shape[common_dim]: msg = "Incompatible shapes for arguments to triangular_solve: {} and {}." raise TypeError(msg.format(a.shape, b.shape)) return b.shape
11
299
triangular_solve_shape_rule
366
0
18
68
dask/array/percentile.py
156,051
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
dask
16
Python
221
percentile.py
def merge_percentiles(finalq, qs, vals, method="lower", Ns=None, raise_on_nan=True): from dask.array.utils import array_safe if isinstance(finalq, Iterator): finalq = list(finalq) finalq = array_safe(finalq, like=finalq) qs = list(map(list, qs)) vals = list(vals) if Ns is None: vals, Ns = zip(*vals) Ns = list(Ns) L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N])) if not L: if raise_on_nan: raise ValueError("No non-trivial arrays found") return np.full(len(qs[0]) - 2, np.nan) qs, vals, Ns = L # TODO: Perform this check above in percentile once dtype checking is easy # Here we silently change meaning if vals[0].dtype.name == "category": result = merge_percentiles( finalq, qs, [v.codes for v in vals], method, Ns, raise_on_nan ) import pandas as pd return pd.Categorical.from_codes(result, vals[0].categories, vals[0].ordered) if not np.issubdtype(vals[0].dtype, np.number): method = "nearest" if len(vals) != len(qs) or len(Ns) != len(qs): raise ValueError("qs, vals, and Ns parameters must be the same length") # transform qs and Ns into number of observations between percentiles counts = [] for q, N in zip(qs, Ns): count = np.empty_like(finalq, shape=len(q)) count[1:] = np.diff(array_safe(q, like=q[0])) count[0] = q[0] count *= N counts.append(count) # Sort by calculated percentile values, then number of observations. combined_vals = np.concatenate(vals) combined_counts = array_safe(np.concatenate(counts), like=combined_vals) sort_order = np.argsort(combined_vals) combined_vals = np.take(combined_vals, sort_order) combined_counts = np.take(combined_counts, sort_order) # percentile-like, but scaled by total number of observations combined_q = np.cumsum(combined_counts) # rescale finalq percentiles to match combined_q finalq = array_safe(finalq, like=combined_vals) desired_q = finalq * sum(Ns) # the behavior of different interpolation methods should be # investigated further. if method == "linear": rv = np.interp(desired_q, combined_q, combined_vals) else: left = np.searchsorted(combined_q, desired_q, side="left") right = np.searchsorted(combined_q, desired_q, side="right") - 1 np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index lower = np.minimum(left, right) upper = np.maximum(left, right) if method == "lower": rv = combined_vals[lower] elif method == "higher": rv = combined_vals[upper] elif method == "midpoint": rv = 0.5 * (combined_vals[lower] + combined_vals[upper]) elif method == "nearest": lower_residual = np.abs(combined_q[lower] - desired_q) upper_residual = np.abs(combined_q[upper] - desired_q) mask = lower_residual > upper_residual index = lower # alias; we no longer need lower index[mask] = upper[mask] rv = combined_vals[index] else: raise ValueError( "interpolation method can only be 'linear', 'lower', " "'higher', 'midpoint', or 'nearest'" ) return rv
cccb9d8d8e33a891396b1275c2448c352ef40c27
615
https://github.com/dask/dask.git
834
def merge_percentiles(finalq, qs, vals, method="lower", Ns=None, raise_on_nan=True): from dask.array.utils import array_safe if isinstance(finalq, Iterator): finalq = list(finalq) finalq = array_safe(finalq, like=finalq) qs = list(map(list, qs)) vals = list(vals) if Ns is None: vals, Ns = zip(*vals) Ns = list(Ns) L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N])) if not L: if raise_on_nan: raise ValueError("No non-trivial arrays found") return np.full(len(qs[0]) - 2, np.nan) qs, vals, Ns = L # TODO: Perform this check above in percentile once dtype checking is easy # Here we silently change meaning if vals[0].dtype.name == "category": result = merge_percentiles( finalq, qs, [v.codes for v in vals], method, Ns, raise_on_nan
70
963
merge_percentiles
56
0
5
12
awx/main/tasks/receptor.py
80,321
Refactored tasks.py to a package --- Added 3 new sub-package : awx.main.tasks.system , awx.main.tasks.jobs , awx.main.tasks.receptor --- Modified the functional tests and unit tests accordingly
awx
18
Python
42
receptor.py
def get_receptor_sockfile(): with open(__RECEPTOR_CONF, 'r') as f: data = yaml.safe_load(f) for section in data: for entry_name, entry_data in section.items(): if entry_name == 'control-service': if 'filename' in entry_data: return entry_data['filename'] else: raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} control-service entry does not have a filename parameter') else: raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} does not have control-service entry needed to get sockfile')
a4a3ba65d736045733cb49430d7076b73aec23bb
69
https://github.com/ansible/awx.git
164
def get_receptor_sockfile(): with open(__RECEPTOR_CONF, 'r') as f: data = yaml.safe_load(f) for section in data: for entry_name, entry_data in sectio
12
130
get_receptor_sockfile
49
0
1
32
psutil/tests/test_linux.py
189,034
[Linux] Speedup `Process.full_memory_info()` (#2108) `Process.memory_full_info()` (reporting proecss USS/PSS/Swap memory) now reads ``/proc/pid/smaps_rollup`` instead of ``/proc/pids/smaps`` which makes it 5 times faster. Without patch: ``` ~/svn/psutil {linux-smaps-rollup}$ python3 -m timeit -s "import psutil; p = psutil.Process()" "p.memory_full_info()" 500 loops, best of 5: 518 usec per loop ``` With patch (5 times faster): ``` ~/svn/psutil {linux-smaps-rollup}$ python3 -m timeit -s "import psutil; p = psutil.Process()" "p.memory_full_info()" 2000 loops, best of 5: 111 usec per loop ``` ---- `make test-memleaks` suite, who heavily rely on `Process.memory_full_info()`, also received a nice speedup: Before patch: ``` $ make test-memleaks ---------------------------------------------------------------------- Ran 99 tests in 1.646s OK (skipped=9) SUCCESS ``` After patch: ``` $ make test-memleaks ---------------------------------------------------------------------- Ran 99 tests in 1.195s OK (skipped=9) SUCCESS ```
psutil
15
Python
42
test_linux.py
def test_parse_smaps_mocked(self): # See: https://github.com/giampaolo/psutil/issues/1222 with mock_open_content( "/proc/%s/smaps" % os.getpid(), textwrap.dedent().encode()) as m: p = psutil._pslinux.Process(os.getpid()) uss, pss, swap = p._parse_smaps() assert m.called self.assertEqual(uss, (6 + 7 + 14) * 1024) self.assertEqual(pss, 3 * 1024) self.assertEqual(swap, 15 * 1024) # On PYPY file descriptors are not closed fast enough.
f1f299527634a425cb34b621d6201fa9172d3529
95
https://github.com/giampaolo/psutil.git
153
def test_parse_smaps_mocked(self): # See: https://github.com/giampaolo/psutil/issues/1222 with mock_open_content( "/proc/%s/smaps" % os.getpid(), textwrap.dedent().encode()) as m: p = psutil._pslinux.Process(os.getpid()) uss, pss, swap = p._parse_smaps() assert m.called
19
159
test_parse_smaps_mocked
52
0
1
3
rllib/algorithms/algorithm_config.py
134,380
[RLlib] AlgorithmConfig: Next steps (volume 01); Algos, RolloutWorker, PolicyMap, WorkerSet use AlgorithmConfig objects under the hood. (#29395)
ray
8
Python
44
algorithm_config.py
def __getitem__(self, item): # TODO: Uncomment this once all algorithms use AlgorithmConfigs under the # hood (as well as Ray Tune). # if log_once("algo_config_getitem"): # logger.warning( # "AlgorithmConfig objects should NOT be used as dict! " # f"Try accessing `{item}` directly as a property." # ) item = self._translate_special_keys(item) return getattr(self, item)
182744bbd151c166b8028355eae12a5da63fb3cc
22
https://github.com/ray-project/ray.git
135
def __getitem__(self, item): # TODO: Uncomment this once all algorithms use AlgorithmConfigs under the # hood (as well as Ray Tune). # if log_once("algo_config_getitem"): # logger.warning( # "AlgorithmConfig
5
41
__getitem__
53
0
1
17
keras/saving/experimental/serialization_lib_test.py
280,516
Support lambdas in new serialization. PiperOrigin-RevId: 491075544
keras
12
Python
37
serialization_lib_test.py
def test_custom_fn(self): obj = {"activation": custom_fn} serialized, _, reserialized = self.roundtrip( obj, custom_objects={"custom_fn": custom_fn} ) self.assertEqual(serialized, reserialized) # Test inside layer dense = keras.layers.Dense(1, activation=custom_fn) dense.build((None, 2)) _, new_dense, _ = self.roundtrip( dense, custom_objects={"custom_fn": custom_fn} ) x = tf.random.normal((2, 2)) y1 = dense(x) _ = new_dense(x) new_dense.set_weights(dense.get_weights()) y2 = new_dense(x) self.assertAllClose(y1, y2, atol=1e-5)
b3f12f1acc0a599e9aa61349c7e1b4e3afcdd932
140
https://github.com/keras-team/keras.git
179
def test_custom_fn(self): obj = {"activation": custom_fn} serialized, _, reserialized = self.roun
27
218
test_custom_fn
42
0
3
16
lib/matplotlib/axes/_base.py
107,475
DOC: More cleanup axes -> Axes
matplotlib
10
Python
29
_base.py
def tick_params(self, axis='both', **kwargs): _api.check_in_list(['x', 'y', 'both'], axis=axis) if axis in ['x', 'both']: xkw = dict(kwargs) xkw.pop('left', None) xkw.pop('right', None) xkw.pop('labelleft', None) xkw.pop('labelright', None) self.xaxis.set_tick_params(**xkw) if axis in ['y', 'both']: ykw = dict(kwargs) ykw.pop('top', None) ykw.pop('bottom', None) ykw.pop('labeltop', None) ykw.pop('labelbottom', None) self.yaxis.set_tick_params(**ykw)
f156db08eee54d285ab0fb4e031e48d078ba6aa3
141
https://github.com/matplotlib/matplotlib.git
202
def tick_params(self, axis='both', **kwargs): _api.check_in_list(['x', 'y', 'both'], axis=axis) if axis in ['x', 'both']: xkw = dict(kwargs) xkw.pop('left', None) xkw.pop('right', None) xkw.pop('labelleft', None) xkw.pop('labelright', None) self.xaxis.set_tick_params(**xkw) if axis in ['y', 'both']: ykw = dict(kwargs) ykw.pop('top', None) ykw.pop('bottom', None) ykw.pop('labeltop', None) ykw.pop('labelbottom', None) self.yaxis.set_tick_params(**ykw)
13
248
tick_params
33
1
5
9
netbox/dcim/views.py
266,163
Check that device has a platform set before rendering napalm tab
netbox
12
Python
30
views.py
def render(self, instance): # Display NAPALM tabs only for devices which meet certain requirements if not ( instance.status == 'active' and instance.primary_ip and instance.platform and instance.platform.napalm_driver ): return None return super().render(instance) @register_model_view(Device, 'status')
977b79ecee4d1d8054c0fd9528c563376fe3bcd9
@register_model_view(Device, 'status')
42
https://github.com/netbox-community/netbox.git
114
def render(self, instance): # Display NAPALM tabs only for devices which meet certain requirements if not ( instance.status == 'active' and instance.primary_ip and instance.platform and instance.platform.napalm_driver
10
82
render
100
0
1
48
tests/sentry/api/endpoints/test_organization_metric_details.py
97,305
ref(metrics-indexer): Change bulk_record, record signatures (#32811) * ref(metrics-indexer): Change bulk_record, record signatures
sentry
15
Python
75
test_organization_metric_details.py
def test_same_entity_multiple_metric_ids(self): self.store_session( self.build_session( project_id=self.project.id, started=(time.time() // 60) * 60, status="ok", release="foobar@2.0", errors=2, ) ) response = self.get_response( self.organization.slug, "derived_metric.multiple_metrics", ) assert response.status_code == 404 assert response.json()["detail"] == ( "Not all the requested metrics or the constituent metrics in " "['derived_metric.multiple_metrics'] have data in the dataset" ) org_id = self.organization.id self._send_buckets( [ { "org_id": org_id, "project_id": self.project.id, "metric_id": indexer.record(org_id, "metric_foo_doe"), "timestamp": int(time.time()), "tags": { resolve_weak("release"): indexer.record(org_id, "foo"), }, "type": "c", "value": 1, "retention_days": 90, }, ], entity="metrics_counters", ) response = self.get_success_response( self.organization.slug, "derived_metric.multiple_metrics", ) assert response.data == { "name": "derived_metric.multiple_metrics", "type": "numeric", "operations": [], "unit": "percentage", "tags": [{"key": "release"}], }
f9dcd325304b37e3bff3869c1589354755e9300e
216
https://github.com/getsentry/sentry.git
680
def test_same_entity_multiple_metric_ids(self): self.store_session( self.build_session( project_id=self.project.id, started=(time.time() // 60) * 60, status="ok", release="foobar@2.0", errors=2, ) ) response = self.get_response( self.organization.slug, "derived_metric.multiple_metrics", ) assert response.status_code == 404 assert response.json()["detail"] == ( "Not all the requested m
27
382
test_same_entity_multiple_metric_ids
9
0
1
17
python/ray/tune/tests/test_integration_mlflow.py
132,509
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
9
Python
9
test_integration_mlflow.py
def testMlFlowMixinConfig(self): clear_env_vars() trial_config = {"par1": 4, "par2": 9.0}
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
126
https://github.com/ray-project/ray.git
22
def testMlFlowMixinConfig(self): clear_env_vars() trial_config = {"par1": 4, "par2": 9.0}
4
35
testMlFlowMixinConfig
50
0
4
16
manim/mobject/text/code_mobject.py
189,968
Always expand user when validating file-paths (#2885) The users are expanded when providing a file-path for the following mobjects: - CodeMobjects - ImageMobjects - SVGMobjects
manim
12
Python
44
code_mobject.py
def _ensure_valid_file(self): if self.file_name is None: raise Exception("Must specify file for Code") possible_paths = [ os.path.join(os.path.join("assets", "codes"), self.file_name), os.path.expanduser(self.file_name), ] for path in possible_paths: if os.path.exists(path): self.file_path = path return error = ( f"From: {os.getcwd()}, could not find {self.file_name} at either " + f"of these locations: {possible_paths}" ) raise OSError(error)
7a801707b6dc373fae21af192214620ce0a53380
90
https://github.com/ManimCommunity/manim.git
202
def _ensure_valid_file(self): if self.file_name is None: raise Exception("Must specify file for Code") possible_paths = [ os.path.join(os.path.join("assets", "codes"), self.file_name), os.path.expanduser(self.file_name), ] for path in possible_paths: if os.path.exists(path): self.file_path = path return
14
170
_ensure_valid_file
32
0
1
8
mitmproxy/tools/web/master.py
250,659
clean up initialization mess We now manage the eventloop ourselves no matter which tool.
mitmproxy
11
Python
31
master.py
async def running(self): # Register tornado with the current event loop tornado.ioloop.IOLoop.current() # Add our web app. http_server = tornado.httpserver.HTTPServer(self.app) http_server.listen(self.options.web_port, self.options.web_host) self.log.info( f"Web server listening at http://{self.options.web_host}:{self.options.web_port}/", ) return await super().running()
bbc65e5f375693ec32d50b805cdb369a607f0b67
61
https://github.com/mitmproxy/mitmproxy.git
98
async def running(self): # Register tornado with the current event loop tornado.ioloop.IOLoop.current() # Add our web app. http_server = tornado.httpserver.HTTPServer(self.app) http_server.listen(self.options.web_port, self.options.web_host) self.log.info( f"Web server listening at http://{self.options.web_host}:{self.options.web_port}/", ) r
17
122
running
34
1
1
9
dask/dataframe/io/tests/test_parquet.py
156,375
Change `to_parquet` default to `write_metadata_file=None` (#8988) * Refactor to_parquet A bit of refactoring before changing the default of `write_metadata_file` to `None` in `to_parquet`. - Simplify implementation - Don't include file metadata in `write_partition` calls if it's not needed - Everything needed to support implementing `write_metadata_file=None` as default *except* changing the value (to ensure tests pass). * Fixup failing parquet tests Most of the failures are due to divisions not being known by default anymore, since they're only known by default if a `_metadata` file is present. * Respond to feedback
dask
11
Python
29
test_parquet.py
def test_pathlib_path(tmpdir, engine): import pathlib df = pd.DataFrame({"x": [4, 5, 6, 1, 2, 3]}) df.index.name = "index" ddf = dd.from_pandas(df, npartitions=2) path = pathlib.Path(str(tmpdir)) ddf.to_parquet(path, engine=engine) ddf2 = dd.read_parquet(path, engine=engine) assert_eq(ddf, ddf2, check_divisions=False) @FASTPARQUET_MARK
00572071d15e7e8cfc20d8342b00aabadf0d2102
@FASTPARQUET_MARK
95
https://github.com/dask/dask.git
56
def test_pathlib_path(tmpdir, engine): import pathlib df = pd.DataFrame({"x": [4, 5, 6, 1, 2, 3]}) df.index.name = "index" ddf = dd.from_pan
22
149
test_pathlib_path
88
0
3
48
mindsdb/integrations/handlers/lightwood_handler/lightwood_handler/lightwood_handler.py
116,604
file storage factory
mindsdb
12
Python
68
lightwood_handler.py
def _retrain(self, statement): model_name = statement.name.parts[-1] base_predictor_record = get_model_record( name=model_name, ml_handler_name='lightwood', company_id=self.company_id, active=True ) if base_predictor_record is None: return Response( RESPONSE_TYPE.ERROR, error_message=f"Error: model '{model_name}' does not exists!" ) new_predictor_record = db.Predictor( company_id=self.company_id, name=base_predictor_record.name, integration_id=base_predictor_record.integration_id, data_integration_id=base_predictor_record.data_integration_id, fetch_data_query=base_predictor_record.fetch_data_query, mindsdb_version=mindsdb_version, lightwood_version=lightwood_version, to_predict=base_predictor_record.to_predict, learn_args=base_predictor_record.learn_args, data={'name': base_predictor_record.name}, active=False, status=PREDICTOR_STATUS.GENERATING ) db.session.add(new_predictor_record) db.session.commit() data_handler_meta = self.handler_controller.get_by_id(base_predictor_record.data_integration_id) data_handler = self.handler_controller.get_handler(data_handler_meta['name']) ast = self.parser(base_predictor_record.fetch_data_query, dialect=self.dialect) response = data_handler.query(ast) if response.type == RESPONSE_TYPE.ERROR: return response new_predictor_record.training_data_columns_count = len(response.data_frame.columns) new_predictor_record.training_data_rows_count = len(response.data_frame) db.session.commit() predictor_storage = self.storage_factory(new_predictor_record.id) p = HandlerProcess( run_update, new_predictor_record.id, response.data_frame, self.company_id, str(predictor_storage.folder_path) ) p.start() return Response(RESPONSE_TYPE.OK)
5f7345439b9317659e36eaa296aa7f7607ef7e79
284
https://github.com/mindsdb/mindsdb.git
528
def _retrain(self, statement): model_name = statement.name.parts[-1] base_predictor_record = get_model_record( name=model_name, ml_handler_name='lightwood', company_id=self.company_id, active=True ) if base_predictor_record is None: return Response( RESPONSE_TYPE.ERROR, error_message=f"Error: model '{model_name}' does not exists!" ) new_predictor_record = db.Predictor( company_id=self.company_id, name=base_predictor_record.name, integration_id=base_predictor_record.integration_id, data_integration_id=base_predictor_record.data_integration_id, fetch_data_query=base_predictor_record.fetch_data_query, mindsdb_version=mindsdb_version, lightwood_version=lightwood_version, to_predict=base_predictor_record.to_predict, learn_args=base_predictor_record.learn_args, data={'name': base_predictor_record.name}, active=False, status=PREDICTOR_STATUS.GENERATING ) db.session.add(new_predictor_record) db.session.commit() data_handler_meta = self.handler_controller.get_by_id(base_predictor_record.data_integration_id) data_handler = self.handler_controller.get_handler(data_handler_meta['name']) ast = self.parser(base_predictor_record.fetch_data_query, dialect=self.dialect) response = data_handler.query(ast) if response.type == RESPONSE_TYPE.ERROR: return response new_predictor_record.training_data_columns_count = len(response.data_frame.columns) new_predictor_record.training_data_rows_count = len(response.data_frame) db.session.commit() predictor_storage = self.storage_factory(new_predictor_record.id) p = HandlerProcess( run_update, new_predictor_record.id, response.data_frame, self.company_id, str(predictor_storage.folder_path) ) p.start()
58
435
_retrain
20
1
1
6
tests/sentry/issues/test_issue_occurrence.py
89,481
feat(issue_platform): Create a function to support saving an issue occurrence and related data (#42120) This introduces a function for saving an `IssueOccurrence` based on `IssueOccurrenceData`, as well as an related event data. In future prs it will also handle creating/updating the related issue and passing this information to eventstream.
sentry
10
Python
18
test_issue_occurrence.py
def test(self) -> None: occurrence = self.build_occurrence() occurrence.save(self.project.id) fetched_occurrence = IssueOccurrence.fetch(occurrence.id, self.project.id) assert fetched_occurrence is not None self.assert_occurrences_identical(occurrence, fetched_occurrence) @region_silo_test
0f6923d5dc11e8552e6620dd14596f1d4efe630c
@region_silo_test
53
https://github.com/getsentry/sentry.git
53
def test(self) -> None: occurrence = self.build_occurrence() occurrence.save(self.project.id) fetched_occurrence = IssueOccurrence.fetch(occurrence.id, self.project.id) assert fetched_occurrence is not None self.assert_occurrences_identical(occurrence, fetched
12
87
test
28
0
1
8
wagtail/admin/panels.py
76,820
Deprecate EditHandler.bind_to
wagtail
11
Python
27
panels.py
def bind_to(self, model=None, instance=None, request=None, form=None): warn( "The %s.bind_to() method has been replaced by bind_to_model(model) and get_bound_panel(instance=instance, request=request, form=form)" % type(self).__name__, category=RemovedInWagtail219Warning, stacklevel=2, ) return self.get_bound_panel(instance=instance, request=request, form=form)
defa7f8ed248354e57e90f3f5d31466de43c73f9
58
https://github.com/wagtail/wagtail.git
92
def bind_to(self, model=None, instance=None, request=None, form=None): warn( "The %s.bind_to() method has been replaced by bind_to_model(model) and get_bound_panel(instance=instance, request=request, form=form)" % type(self).__name__, category=RemovedInWagtail219Warning, stacklevel=2, ) return self.get_bound_panel(instance=insta
13
84
bind_to
25
0
1
10
onnx/backend/test/case/node/det.py
254,724
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fixes Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * remove extra blank line Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotations Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix Operators.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix TestCoverage.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
onnx
12
Python
23
det.py
def export_2d() -> None: node = onnx.helper.make_node( 'Det', inputs=['x'], outputs=['y'], ) x = np.arange(4).reshape(2, 2).astype(np.float32) y = np.linalg.det(x) # expect -2 expect(node, inputs=[x], outputs=[y], name='test_det_2d')
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
81
https://github.com/onnx/onnx.git
107
def export_2d() -> None: node = onnx.helper.make_node( 'Det',
18
131
export_2d
29
0
2
12
tests/acceptance/test_project_settings_sampling.py
85,320
ref(sampling): Remove skip from acceptance tests - (#38440)
sentry
13
Python
27
test_project_settings_sampling.py
def store_outcomes(self, outcome, num_times=1): outcomes = [] for _ in range(num_times): outcome_copy = outcome.copy() outcome_copy["timestamp"] = outcome_copy["timestamp"].strftime("%Y-%m-%dT%H:%M:%S.%fZ") outcomes.append(outcome_copy) assert ( requests.post( settings.SENTRY_SNUBA + "/tests/outcomes/insert", data=json.dumps(outcomes) ).status_code == 200 )
d6bc97675d194a4d336ed4444d48c4a4fb349255
76
https://github.com/getsentry/sentry.git
137
def store_outcomes(self, outcome, num_times=1): outcomes = [] for _ in range(num_times): outcome_copy = outcome.copy() outcome_copy["timestamp"] = outcome_copy["timestamp"].strftime("%Y-%m-%dT%H:%M:%S.%fZ") outcomes.append(outcome_copy) assert ( re
19
125
store_outcomes
140
0
6
34
ppocr/data/imaug/rec_img_aug.py
23,840
add rec vitstr algorithm.
PaddleOCR
16
Python
76
rec_img_aug.py
def __call__(self, data): img = data['image'] img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) image_shape = self.image_shape if self.padding: imgC, imgH, imgW = image_shape # todo: change to 0 and modified image shape h = img.shape[0] w = img.shape[1] ratio = w / float(h) if math.ceil(imgH * ratio) > imgW: resized_w = imgW else: resized_w = int(math.ceil(imgH * ratio)) resized_image = cv2.resize(img, (resized_w, imgH)) norm_img = np.expand_dims(resized_image, -1) norm_img = norm_img.transpose((2, 0, 1)) resized_image = norm_img.astype(np.float32) / 128. - 1. padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32) padding_im[:, :, 0:resized_w] = resized_image data['image'] = padding_im return data if self.resize_type == 'PIL': image_pil = Image.fromarray(np.uint8(img)) img = image_pil.resize(self.image_shape, self.inter_type) img = np.array(img) if self.resize_type == 'OpenCV': img = cv2.resize(img, self.image_shape) norm_img = np.expand_dims(img, -1) norm_img = norm_img.transpose((2, 0, 1)) if self.scale: data['image'] = norm_img.astype(np.float32) / 128. - 1. else: data['image'] = norm_img.astype(np.float32) / 255. return data
9816aebdb078ec14ca3141560f2431981c8948f5
321
https://github.com/PaddlePaddle/PaddleOCR.git
477
def __call__(self, data): img = data['image'] img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) image_shape = self.image_shape if self.padding: imgC, imgH, imgW = image_shape # todo: change to 0 and modified image shape h = img.shape[0] w = img.shape[1] ratio = w / float(h) if math.ceil(imgH * ratio) > imgW: resized_w = imgW else: resized_w = int(math.ceil(imgH * ratio)) resized_image = cv2.resize(img, (resized_w, imgH)) norm_img = np.expand_dims(resized_image, -1) norm_img = norm_img.transpose((2, 0, 1)) resized_image = norm_img.astype(np.float32) / 128. - 1. padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32) padding_im[:, :, 0:resized_w] = resized_image data['image'] = padding_im return data if self.resize_type == 'PIL': image_pil = Image.fromarray(np.uint8(img)) img = image_pil.resize(self.image_shape, self.inter_type)
40
496
__call__
163
0
11
74
lib/matplotlib/offsetbox.py
110,170
Don't pass unused xdescent to _get_packed_offsets. Instead of passing a list of (widths, xdescents) where xdescent is unused, just pass a list of widths. This helper is private so we just need to adjust the call sites and tests with no deprecation. This patch is preliminary work for some further cleanup on the offsetbox module.
matplotlib
16
Python
79
offsetbox.py
def _get_packed_offsets(widths, total, sep, mode="fixed"): r _api.check_in_list(["fixed", "expand", "equal"], mode=mode) if mode == "fixed": offsets_ = np.cumsum([0] + [w + sep for w in widths]) offsets = offsets_[:-1] if total is None: total = offsets_[-1] - sep return total, offsets elif mode == "expand": # This is a bit of a hack to avoid a TypeError when *total* # is None and used in conjugation with tight layout. if total is None: total = 1 if len(widths) > 1: sep = (total - sum(widths)) / (len(widths) - 1) else: sep = 0 offsets_ = np.cumsum([0] + [w + sep for w in widths]) offsets = offsets_[:-1] return total, offsets elif mode == "equal": maxh = max(widths) if total is None: if sep is None: raise ValueError("total and sep cannot both be None when " "using layout mode 'equal'") total = (maxh + sep) * len(widths) else: sep = total / len(widths) - maxh offsets = (maxh + sep) * np.arange(len(widths)) return total, offsets
8ef4e017f8a95db8704728a5fffd2c0384afc525
231
https://github.com/matplotlib/matplotlib.git
420
def _get_packed_offsets(widths, total, sep, mode="fixed"): r _api.check_in_list(["fixed", "expand", "equal"], mode=mode) if mode == "fixed": offsets_ = np.cumsum([0] + [w + sep for w in widths]) offsets = offsets_[:-1] if total is None: total = offsets_[-1] - sep return total, offsets elif mode == "expand": # This is a bit of a hack to avoid a TypeError when *total* # is None and used in conjugation with tight layout. if total is None: total = 1 if len(widths) > 1: sep = (total - sum(widths)) / (len(widths) - 1) else: sep = 0 offsets_ = np.cumsum([0] + [w + sep for w in widths]) offsets = offsets_[:-1] return total, offsets elif mode == "equal": maxh = max(widths) if total is None: if sep is None: raise ValueError("total and sep cannot both be None when " "using la
18
381
_get_packed_offsets
21
0
1
11
tests/tpot_tests.py
181,790
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
tpot
9
Python
20
tpot_tests.py
def test_fit_7(): tpot_obj = TPOTRegressor( random_state=42, population_size=1, offspring_size=2, generations=1, verbosity=0 ) tpot_obj.fit(pretest_X_reg, pretest_y_reg) assert isinstance(tpot_obj._optimized_pipeline, creator.Individual) assert not (tpot_obj._start_datetime is None)
388616b6247ca4ea8de4e2f340d6206aee523541
57
https://github.com/EpistasisLab/tpot.git
74
def test_fit_7(): tpot_obj = TPOTRegressor( random_state=42, population_size=1, offspring_size=2, generations=1, verbosity=0 ) tpot_obj.fit(pretest_X_reg, pretest_y_reg) assert isinstance(tpot_obj._optimized_pipeline, creator.Individual) assert not (tpot_obj._start_datetime is None)
16
87
test_fit_7
22
0
1
9
tests/engines/test_gif.py
190,916
Reformat to 80 chars and mypy.ini
thumbor
13
Python
20
test_gif.py
def test_errors_on_gifsicle_should_not_raises_errors_when_output(self): engine = Engine(self.context) with open( join(STORAGE_PATH, "SmallFullColourGIF.gif"), "rb" ) as image_file: buffer = image_file.read() engine.load(buffer, ".gif") result = engine.run_gifsicle("--some-invalid-opt") expect(result).Not.to_be_null()
301124c5b377fa56b940d298900dbc5816dbc24e
61
https://github.com/thumbor/thumbor.git
85
def test_errors_on_gifsicle_should_not_raises_errors_when_output(self): engine = Engine(self.context) with open( join(STORAGE_PATH, "SmallFullColourGIF.gif"), "rb" ) as image_file: buffer = image_file.read() engine.load(buffer, ".gif") result = engine.run_gifsicle("--some-invalid-o
17
109
test_errors_on_gifsicle_should_not_raises_errors_when_output
20
0
2
3
scapy/contrib/http2.py
209,506
E275 - Missing whitespace after keyword (#3711) Co-authored-by: Alexander Aring <alex.aring@gmail.com> Co-authored-by: Anmol Sarma <me@anmolsarma.in> Co-authored-by: antoine.torre <torreantoine1@gmail.com> Co-authored-by: Antoine Vacher <devel@tigre-bleu.net> Co-authored-by: Arnaud Ebalard <arno@natisbad.org> Co-authored-by: atlowl <86038305+atlowl@users.noreply.github.com> Co-authored-by: Brian Bienvenu <brian@bienvenu.id.au> Co-authored-by: Chris Packham <chris.packham@alliedtelesis.co.nz> Co-authored-by: CQ <cq674350529@163.com> Co-authored-by: Daniel Collins <kinap@users.noreply.github.com> Co-authored-by: Federico Maggi <federico.maggi@gmail.com> Co-authored-by: Florian Maury <florian.maury@ssi.gouv.fr> Co-authored-by: _Frky <3105926+Frky@users.noreply.github.com> Co-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com> Co-authored-by: gpotter2 <gabriel@potter.fr> Co-authored-by: Guillaume Valadon <guillaume@valadon.net> Co-authored-by: Hao Zheng <haozheng10@gmail.com> Co-authored-by: Haresh Khandelwal <hareshkhandelwal@gmail.com> Co-authored-by: Harri Hämäläinen <hhamalai@iki.fi> Co-authored-by: hecke <hecke@naberius.de> Co-authored-by: Jan Romann <jan.romann@gmail.com> Co-authored-by: Jan Sebechlebsky <sebechlebskyjan@gmail.com> Co-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com> Co-authored-by: jockque <38525640+jockque@users.noreply.github.com> Co-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com> Co-authored-by: Keith Scott <kscott@mitre.org> Co-authored-by: Kfir Gollan <kfir@drivenets.com> Co-authored-by: Lars Munch <lars@segv.dk> Co-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com> Co-authored-by: Leonard Crestez <cdleonard@gmail.com> Co-authored-by: Marcel Patzlaff <mpatzlaff@benocs.com> Co-authored-by: Martijn Thé <martijnthe@users.noreply.github.com> Co-authored-by: Martine Lenders <authmillenon@gmail.com> Co-authored-by: Michael Farrell <micolous+git@gmail.com> Co-authored-by: Michał Mirosław <mirq-linux@rere.qmqm.pl> Co-authored-by: mkaliszan <mkaliszan@benocs.com> Co-authored-by: mtury <maxence.tury@ssi.gouv.fr> Co-authored-by: Neale Ranns <nranns@cisco.com> Co-authored-by: Octavian Toader <Octavian.Toader@belden.com> Co-authored-by: Peter Eisenlohr <peter@eisenlohr.org> Co-authored-by: Phil <phil@secdev.org> Co-authored-by: Pierre Lalet <pierre@droids-corp.org> Co-authored-by: Pierre Lorinquer <pierre.lorinquer@ssi.gouv.fr> Co-authored-by: piersoh <42040737+piersoh@users.noreply.github.com> Co-authored-by: plorinquer <pierre.lorinquer@ssi.gouv.fr> Co-authored-by: pvinci <pvinci@users.noreply.github.com> Co-authored-by: Rahul Jadhav <nyrahul@gmail.com> Co-authored-by: Robin Jarry <robin.jarry@6wind.com> Co-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com> Co-authored-by: rperez <rperez@debian> Co-authored-by: Sabrina Dubroca <sd@queasysnail.net> Co-authored-by: Sebastian Baar <sebastian.baar@gmx.de> Co-authored-by: sebastien mainand <sebastien.mainand@ssi.gouv.fr> Co-authored-by: smehner1 <smehner1@gmail.com> Co-authored-by: speakinghedge <hecke@naberius.de> Co-authored-by: Steven Van Acker <steven@singularity.be> Co-authored-by: Thomas Faivre <thomas.faivre@6wind.com> Co-authored-by: Tran Tien Dat <peter.trantiendat@gmail.com> Co-authored-by: Wael Mahlous <wael.mahlous@gmail.com> Co-authored-by: waeva <74464394+waeva@users.noreply.github.com> Co-authored-by: Alexander Aring <alex.aring@gmail.com> Co-authored-by: Anmol Sarma <me@anmolsarma.in> Co-authored-by: antoine.torre <torreantoine1@gmail.com> Co-authored-by: Antoine Vacher <devel@tigre-bleu.net> Co-authored-by: Arnaud Ebalard <arno@natisbad.org> Co-authored-by: atlowl <86038305+atlowl@users.noreply.github.com> Co-authored-by: Brian Bienvenu <brian@bienvenu.id.au> Co-authored-by: Chris Packham <chris.packham@alliedtelesis.co.nz> Co-authored-by: CQ <cq674350529@163.com> Co-authored-by: Daniel Collins <kinap@users.noreply.github.com> Co-authored-by: Federico Maggi <federico.maggi@gmail.com> Co-authored-by: Florian Maury <florian.maury@ssi.gouv.fr> Co-authored-by: _Frky <3105926+Frky@users.noreply.github.com> Co-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com> Co-authored-by: gpotter2 <gabriel@potter.fr> Co-authored-by: Guillaume Valadon <guillaume@valadon.net> Co-authored-by: Hao Zheng <haozheng10@gmail.com> Co-authored-by: Haresh Khandelwal <hareshkhandelwal@gmail.com> Co-authored-by: Harri Hämäläinen <hhamalai@iki.fi> Co-authored-by: hecke <hecke@naberius.de> Co-authored-by: Jan Romann <jan.romann@gmail.com> Co-authored-by: Jan Sebechlebsky <sebechlebskyjan@gmail.com> Co-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com> Co-authored-by: jockque <38525640+jockque@users.noreply.github.com> Co-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com> Co-authored-by: Keith Scott <kscott@mitre.org> Co-authored-by: Kfir Gollan <kfir@drivenets.com> Co-authored-by: Lars Munch <lars@segv.dk> Co-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com> Co-authored-by: Leonard Crestez <cdleonard@gmail.com> Co-authored-by: Marcel Patzlaff <mpatzlaff@benocs.com> Co-authored-by: Martijn Thé <martijnthe@users.noreply.github.com> Co-authored-by: Martine Lenders <authmillenon@gmail.com> Co-authored-by: Michael Farrell <micolous+git@gmail.com> Co-authored-by: Michał Mirosław <mirq-linux@rere.qmqm.pl> Co-authored-by: mkaliszan <mkaliszan@benocs.com> Co-authored-by: mtury <maxence.tury@ssi.gouv.fr> Co-authored-by: Neale Ranns <nranns@cisco.com> Co-authored-by: Octavian Toader <Octavian.Toader@belden.com> Co-authored-by: Peter Eisenlohr <peter@eisenlohr.org> Co-authored-by: Phil <phil@secdev.org> Co-authored-by: Pierre Lalet <pierre@droids-corp.org> Co-authored-by: Pierre Lorinquer <pierre.lorinquer@ssi.gouv.fr> Co-authored-by: piersoh <42040737+piersoh@users.noreply.github.com> Co-authored-by: pvinci <pvinci@users.noreply.github.com> Co-authored-by: Rahul Jadhav <nyrahul@gmail.com> Co-authored-by: Robin Jarry <robin.jarry@6wind.com> Co-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com> Co-authored-by: rperez <rperez@debian> Co-authored-by: Sabrina Dubroca <sd@queasysnail.net> Co-authored-by: Sebastian Baar <sebastian.baar@gmx.de> Co-authored-by: sebastien mainand <sebastien.mainand@ssi.gouv.fr> Co-authored-by: smehner1 <smehner1@gmail.com> Co-authored-by: Steven Van Acker <steven@singularity.be> Co-authored-by: Thomas Faivre <thomas.faivre@6wind.com> Co-authored-by: Tran Tien Dat <peter.trantiendat@gmail.com> Co-authored-by: Wael Mahlous <wael.mahlous@gmail.com> Co-authored-by: waeva <74464394+waeva@users.noreply.github.com>
scapy
10
Python
19
http2.py
def h2i(self, pkt, x): # type: (Optional[packet.Packet], Optional[int]) -> Optional[int] assert not isinstance(x, six.integer_types) or x >= 0 return x
08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf
26
https://github.com/secdev/scapy.git
48
def h2i(self, pkt, x): # type: (Optional[packet.Packet], Optional[int]) -> Optional[int] assert not isinstance(x, six.integer_types) or x >= 0 return
7
42
h2i
21
0
3
9
mindsdb/integrations/handlers/crate_handler/crate_handler.py
117,345
ML handler supbrocess (#3377) * log -> logger dividing components: app initialize parse args set env.MINDSDB_CONFIG_PATH config requiers env.MINDSDB_CONFIG_PATH sets env.MINDSDB_DB_CON Config() - makes initialization log uses config initialize_log - makes initialization database uses env.MINDSDB_DB_CON have init() method file storage uses config * partial sync for model storage interfaces * ml handler in subprocess interface * fix delete model * fix: model with error in success status * fix: remove hf predictor * fix pg handler * MLHandlerPersistWrapper keeps wrapper process opened * predictor with error keeps 'success' status #3362 * lock for sending tasks to subprocess one by one * check target of predictor before run learn in subproccess * fix check target * fix: json_ai override and problem definition generation * fix None case * folder for ml handler tests * fix: add timeseries_settings key to learn_args * fixes in lw_handler * fix: del join_learn_process * tests for LW handler * finish unit test for LW * changes in tests: - set-project -> to base class - return of ml handler is dataframe - draft for project structure test * merge from staging * create_validation method to check learn params before send to subprocess fixes of HF fixed version of transformers in HF requirements Co-authored-by: Patricio Cerda Mardini <pcerdam@live.com>
mindsdb
12
Python
20
crate_handler.py
def disconnect(self): if self.is_connected is False: return try: self.connection.close() self.is_connected=False except Exception as e: log.logger.error(f"Error while disconnecting to CrateDB, {e}") return
9ce5a21dd6359fd7e8ebf78051ce9e97bd195ec9
43
https://github.com/mindsdb/mindsdb.git
101
def disconnect(self): if self.is_connected is False: return try: self.connection.close() self.is_connected=False except Exception as e: log.logger.error(f"Error while disconnecting to CrateDB, {e}")
10
80
disconnect
290
0
1
92
saleor/graphql/product/tests/queries/test_products_query.py
29,308
Split test_product.py and test_variant.py into multiple files (#11173) * Split test_product.py into multiple files * Split test_variant.py into multiple files
saleor
13
Python
89
test_products_query.py
def test_sort_products(user_api_client, product, channel_USD): product.updated_at = datetime.utcnow() product.save() product.pk = None product.slug = "second-product" product.updated_at = datetime.utcnow() product.save() ProductChannelListing.objects.create( product=product, channel=channel_USD, is_published=True, visible_in_listings=True, ) variant = ProductVariant.objects.create(product=product, sku="1234") ProductVariantChannelListing.objects.create( variant=variant, channel=channel_USD, price_amount=Decimal(20), cost_price_amount=Decimal(2), currency=channel_USD.currency_code, ) product.pk = None product.slug = "third-product" product.updated_at = datetime.utcnow() product.save() ProductChannelListing.objects.create( product=product, channel=channel_USD, is_published=True, visible_in_listings=True, ) variant_second = ProductVariant.objects.create(product=product, sku="12345") ProductVariantChannelListing.objects.create( variant=variant_second, channel=channel_USD, currency=channel_USD.currency_code, ) variables = {"channel": channel_USD.slug} query = SORT_PRODUCTS_QUERY # Test sorting by PRICE, ascending sort_by = "{field: PRICE, direction: ASC}" asc_price_query = query % {"sort_by_product_order": sort_by} response = user_api_client.post_graphql(asc_price_query, variables) content = get_graphql_content(response) edges = content["data"]["products"]["edges"] assert len(edges) == 2 price1 = edges[0]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][ "amount" ] price2 = edges[1]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][ "amount" ] assert price1 < price2 # Test sorting by PRICE, descending sort_by = "{field: PRICE, direction:DESC}" desc_price_query = query % {"sort_by_product_order": sort_by} response = user_api_client.post_graphql(desc_price_query, variables) content = get_graphql_content(response) edges = content["data"]["products"]["edges"] price1 = edges[0]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][ "amount" ] price2 = edges[1]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][ "amount" ] assert price1 > price2 # Test sorting by MINIMAL_PRICE, ascending sort_by = "{field: MINIMAL_PRICE, direction:ASC}" asc_price_query = query % {"sort_by_product_order": sort_by} response = user_api_client.post_graphql(asc_price_query, variables) content = get_graphql_content(response) edges = content["data"]["products"]["edges"] price1 = edges[0]["node"]["pricing"]["priceRange"]["start"]["gross"]["amount"] price2 = edges[1]["node"]["pricing"]["priceRange"]["start"]["gross"]["amount"] assert price1 < price2 # Test sorting by MINIMAL_PRICE, descending sort_by = "{field: MINIMAL_PRICE, direction:DESC}" desc_price_query = query % {"sort_by_product_order": sort_by} response = user_api_client.post_graphql(desc_price_query, variables) content = get_graphql_content(response) edges = content["data"]["products"]["edges"] price1 = edges[0]["node"]["pricing"]["priceRange"]["start"]["gross"]["amount"] price2 = edges[1]["node"]["pricing"]["priceRange"]["start"]["gross"]["amount"] assert price1 > price2 # Test sorting by DATE, ascending asc_date_query = query % {"sort_by_product_order": "{field: DATE, direction:ASC}"} response = user_api_client.post_graphql(asc_date_query, variables) content = get_graphql_content(response) date_0 = content["data"]["products"]["edges"][0]["node"]["updatedAt"] date_1 = content["data"]["products"]["edges"][1]["node"]["updatedAt"] assert parse_datetime(date_0) < parse_datetime(date_1) # Test sorting by DATE, descending desc_date_query = query % {"sort_by_product_order": "{field: DATE, direction:DESC}"} response = user_api_client.post_graphql(desc_date_query, variables) content = get_graphql_content(response) date_0 = content["data"]["products"]["edges"][0]["node"]["updatedAt"] date_1 = content["data"]["products"]["edges"][1]["node"]["updatedAt"] assert parse_datetime(date_0) > parse_datetime(date_1)
d90be220d6b687d08153934a51354011a3cb5ca1
746
https://github.com/saleor/saleor.git
660
def test_sort_products(user_api_client, product, channel_USD): product.updated_at = datetime.utcnow() product.save() product.pk = None product.slug = "second-product" product.updated_at = datetime.utcnow() product.save() ProductChannelListing.objects.create( product=product, channel=channel_USD, is_published=True, visible_in_listings=True, ) variant = ProductVariant.objects.create(product=product, sku="1234") ProductVariantChannelListing.objects.create( variant=variant, channel=channel_USD, price_amount=Decimal(20), cost_price_amount=Decimal(2), currency=channel_USD.currency_code, ) product.pk = None product.slug = "third-product" product.updated_at = datetime.utcnow() product.save() ProductChannelListing.objects.create( product=product, channel=channel_USD, is_published=True, visible_in_listings=True, ) variant_second = ProductVariant.objects.create(product=product, sku="12345") ProductVariantChannelListing.objects.create( variant=variant_second, channel=channel_USD, currency=channel_USD.currency_code, ) variables = {"channel": channel_USD.slug} query = SORT_PRODUCTS_QUERY # Test sorting by PRICE, ascending sort_by = "{field: PRICE, direction: ASC}" asc_price_query = query % {"sort_by_product_order": sort_by} response = user_api_client.post_graphql(asc_price_query, variables) content = get_graphql_content(response) edges = content["data"]["products"]["edges"] assert len(edges) == 2 price1 = edges[0]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][ "amount" ] price2 = edges[1]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][ "amount" ] assert price1 < price2 # Test sorting by PRICE, descending sort_by = "{field: PRICE, direction:DESC}" desc_price_query = query % {"sort_by_product_order": sort_by} response = user_api_client.post_graphql(desc_price_query, variables) content = get_graphql_content(response) edges = content["data"]["products"]["edges"] price1 = edges[0]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][ "amount" ] price2 = edges[1]["node"]["pricing"]["priceRangeUndiscounted"]["start"]["gross"][ "amount" ] assert price1 > price2 # Test sorting by MINIMAL_PRICE, ascending sort_by = "{field: MINIMAL_PRICE, direction:ASC}" asc_price_query = query % {"sort_by_product_order": sort_by} response = user_api_client.post_graphql(asc_price_query, variables) content = get_graphql_content(response) edges = content["data"]["products"]["edges"] price1 = edges[0]["node"]["pricing"]["priceRange"]["start"]["gross"]["amount"] price2 = edges[1]["node"]["pricing"]["priceRange"]["start"]["gross"]["amount"] assert price1 < price2 # Test sorting by MINIMAL_PRICE, descending sort_by = "{field: MINIMAL_PRICE, direction:DESC}" desc_price_query = query % {"sort_by_product_order": sort_by} response = user_api_client.post_graphql(desc_price_query, variables) content = get_graphql_content(response) edges = content["data"]["products"]["edges"] price1 = edges[0]["node"]["pricing"]["priceRange"]["start"]["gross"]["amount"] price2 = edges[1]["node"]["pricing"]["priceRange"]["start"]["gross"]["amount"] assert price1 > price2 # Test sorting by DATE, ascending asc_date_query = query % {"sort_by_product_order": "{field: DATE, direction:ASC}"} response = user_api_client.post_graphql(asc_date_query, variables) content = get_graphql_content(response) date_0 = conten
45
1,297
test_sort_products
107
0
1
27
tests/rest/client/test_relations.py
247,699
Refactor relations tests (#12232) * Moves the relation pagination tests to a separate class. * Move the assertion of the response code into the `_send_relation` helper. * Moves some helpers into the base-class.
synapse
12
Python
74
test_relations.py
def test_edit_thread(self) -> None: # Create a thread and edit the last event. channel = self._send_relation( RelationTypes.THREAD, "m.room.message", content={"msgtype": "m.text", "body": "A threaded reply!"}, ) threaded_event_id = channel.json_body["event_id"] new_body = {"msgtype": "m.text", "body": "I've been edited!"} channel = self._send_relation( RelationTypes.REPLACE, "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, parent_id=threaded_event_id, ) # Fetch the thread root, to get the bundled aggregation for the thread. channel = self.make_request( "GET", f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) # We expect that the edit message appears in the thread summary in the # unsigned relations section. relations_dict = channel.json_body["unsigned"].get("m.relations") self.assertIn(RelationTypes.THREAD, relations_dict) thread_summary = relations_dict[RelationTypes.THREAD] self.assertIn("latest_event", thread_summary) latest_event_in_thread = thread_summary["latest_event"] self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!")
1da0f79d5455b594f2aa989106a672786f5b990f
176
https://github.com/matrix-org/synapse.git
357
def test_edit_thread(self) -> None: # Create a thread and edit the last event. channel = self._send_relation( RelationTypes.THREAD, "m.room.message", content={"msgtype": "m.text", "body": "A threaded reply!"}, ) threaded_event_id = channel.json_body["event_id"] new_body = {"msgtype": "m.text", "body": "I've been edited!"} channel = self._send_relation( RelationTypes.REPLACE, "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, parent_id=threaded_event_id, ) # Fetch the thread root, to get the bundled aggregation for the thread. channel = self.make_request( "GET", f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) # We expect that the edit message appears in the thread summary in the # unsigned relations section. relations_dict = channel.json_body["unsigned"].get("m.r
23
324
test_edit_thread
9
0
1
4
keras/engine/training_distributed_v1.py
271,662
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
10
Python
8
training_distributed_v1.py
def fit(self, *args, **kwargs): return _train_with_multi_worker(self._single_worker_loop.fit)( *args, **kwargs )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
27
https://github.com/keras-team/keras.git
33
def fit(self, *args, **kwargs): return _train_with_multi_worker(self._single_worker_loop.fit)( *args, **kwarg
6
42
fit
42
1
1
14
keras/activations.py
280,055
Move serialization-related logic in utils/generic_utils.py to saving/legacy/serialization.py. PiperOrigin-RevId: 479688207
keras
9
Python
39
activations.py
def deserialize(name, custom_objects=None): activation_functions = {} current_module = sys.modules[__name__] # we put 'current_module' after 'activation_layers' to prefer the local one # if there is a collision generic_utils.populate_dict_with_module_objects( activation_functions, (activation_layers, current_module), obj_filter=callable, ) return serialization.deserialize_keras_object( name, module_objects=activation_functions, custom_objects=custom_objects, printable_module_name="activation function", ) @keras_export("keras.activations.get") @tf.__internal__.dispatch.add_dispatch_support
c269e3cd8fed713fb54d2971319df0bfe6e1bf10
@keras_export("keras.activations.get") @tf.__internal__.dispatch.add_dispatch_support
59
https://github.com/keras-team/keras.git
116
def deserialize(name, custom_objects=None): activation_functions = {} current_module = sys.modules[__name__] # we put 'current_module' after 'activation_layers' to prefer the local one # if there is a collision generic_utils.populate_dict_with_module_objects( activation_functions, (activation_layers, current_module), obj_filter=callable, ) return serialization.deserialize_keras_object( name, module_objects=activation_functions, custom_objects=custom_objects, printable_module_name="activation function", ) @keras_export("keras.activations.get") @tf.__internal__.dispatch.add_dispatch_support
22
114
deserialize
8
1
1
2
keras/backend.py
269,556
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
8
Python
8
backend.py
def cumprod(x, axis=0): return tf.math.cumprod(x, axis=axis) @keras_export("keras.backend.var") @doc_controls.do_not_generate_docs
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.var") @doc_controls.do_not_generate_docs
23
https://github.com/keras-team/keras.git
12
def cumprod(x, axis=0): return tf.math.cumprod(x, axis
8
54
cumprod
60
0
2
18
python/ray/train/examples/train_fashion_mnist_example.py
131,961
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
20
Python
48
train_fashion_mnist_example.py
def validate_epoch(dataloader, model, loss_fn): size = len(dataloader.dataset) // train.world_size() num_batches = len(dataloader) model.eval() test_loss, correct = 0, 0 with torch.no_grad(): for X, y in dataloader: pred = model(X) test_loss += loss_fn(pred, y).item() correct += (pred.argmax(1) == y).type(torch.float).sum().item() test_loss /= num_batches correct /= size print( f"Test Error: \n " f"Accuracy: {(100 * correct):>0.1f}%, " f"Avg loss: {test_loss:>8f} \n" ) return test_loss
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
114
https://github.com/ray-project/ray.git
150
def validate_epoch(dataloader, model, loss_fn): size = len(dataloader.dataset) // train.world_size() num_batches = len(dataloader) model.eval() test_loss, correct = 0, 0 with torch.no_grad(): for X, y in dataloader: pred = model(X) test_loss += loss_fn(pred, y).item() correct += (pred.argmax(1) == y).type(torch.float).sum().item() test_loss /= num_batches correct /= size print( f"Test Error: \n " f"Accuracy: {(100 * correct):>0.1f}%,
24
212
validate_epoch
12
0
1
4
wagtail/search/tests/test_queries.py
75,868
Reformat with black
wagtail
9
Python
11
test_queries.py
def test_only_query(self): filters, query = separate_filters_from_query("hello world") self.assertDictEqual(filters, {}) self.assertEqual(query, "hello world")
d10f15e55806c6944827d801cd9c2d53f5da4186
30
https://github.com/wagtail/wagtail.git
32
def test_only_query(self): filters, query = separate_filters_from_query("hello world") self.assertDictEqual(filters, {}) sel
7
52
test_only_query
120
0
6
21
.venv/lib/python3.8/site-packages/pip/_vendor/html5lib/_tokenizer.py
62,444
upd; format
transferlearning
16
Python
81
_tokenizer.py
def rcdataState(self): data = self.stream.char() if data == "&": self.state = self.characterReferenceInRcdata elif data == "<": self.state = self.rcdataLessThanSignState elif data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any <!-- or --> sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
164
https://github.com/jindongwang/transferlearning.git
480
def rcdataState(self): data = self.stream.char() if data == "&": self.state = self.characterReferenceInRcdata elif data == "<": self.state = self.rcdataLessThanSignState elif data == EOF:
15
300
rcdataState
70
0
1
18
wagtail/images/rect.py
75,100
Reformat with black
wagtail
11
Python
43
rect.py
def transform(self, transform): # Transform each corner of the rect tl_transformed = transform.transform_vector(Vector(self.left, self.top)) tr_transformed = transform.transform_vector(Vector(self.right, self.top)) bl_transformed = transform.transform_vector(Vector(self.left, self.bottom)) br_transformed = transform.transform_vector(Vector(self.right, self.bottom)) # Find extents of the transformed corners left = min( [tl_transformed.x, tr_transformed.x, bl_transformed.x, br_transformed.x] ) right = max( [tl_transformed.x, tr_transformed.x, bl_transformed.x, br_transformed.x] ) top = min( [tl_transformed.y, tr_transformed.y, bl_transformed.y, br_transformed.y] ) bottom = max( [tl_transformed.y, tr_transformed.y, bl_transformed.y, br_transformed.y] ) return Rect(left, top, right, bottom)
d10f15e55806c6944827d801cd9c2d53f5da4186
174
https://github.com/wagtail/wagtail.git
218
def transform(self, transform): # Transform each corner of the rect tl_transformed = transform.transform_vector(Vector(self.left, self.top)) tr_transformed = transform.transform_vector(Vector(self.right, self.top)) bl_transformed = transform.transform_vector(Vector(self.left, self.bottom)) br_transformed = transform.transform_vector(Vector(self.right, self.bottom)) # Find extents of the transformed corners left = min( [tl_transformed.x, tr_transformed.x, bl_transformed.x, br_transformed.x] ) right = max( [tl_transformed.x, tr_transformed.x, bl_transformed.x, br_transformed.x] ) top = min( [tl_transformed.y, tr_transformed.y, bl_transformed.y, br_transformed.y] ) bottom = max( [tl_transformed.y, tr_transformed.y, bl_transformed.y, br_transformed.y] ) re
17
255
transform
333
0
15
87
homeassistant/components/flux/switch.py
297,834
String formatting and max line length - Part 2 (#84393)
core
17
Python
146
switch.py
async def async_flux_update(self, utcnow=None): if utcnow is None: utcnow = dt_utcnow() now = as_local(utcnow) sunset = get_astral_event_date(self.hass, SUN_EVENT_SUNSET, now.date()) start_time = self.find_start_time(now) stop_time = self.find_stop_time(now) if stop_time <= start_time: # stop_time does not happen in the same day as start_time if start_time < now: # stop time is tomorrow stop_time += datetime.timedelta(days=1) elif now < start_time: # stop_time was yesterday since the new start_time is not reached stop_time -= datetime.timedelta(days=1) if start_time < now < sunset: # Daytime time_state = "day" temp_range = abs(self._start_colortemp - self._sunset_colortemp) day_length = int(sunset.timestamp() - start_time.timestamp()) seconds_from_start = int(now.timestamp() - start_time.timestamp()) percentage_complete = seconds_from_start / day_length temp_offset = temp_range * percentage_complete if self._start_colortemp > self._sunset_colortemp: temp = self._start_colortemp - temp_offset else: temp = self._start_colortemp + temp_offset else: # Night time time_state = "night" if now < stop_time: if stop_time < start_time and stop_time.day == sunset.day: # we need to use yesterday's sunset time sunset_time = sunset - datetime.timedelta(days=1) else: sunset_time = sunset night_length = int(stop_time.timestamp() - sunset_time.timestamp()) seconds_from_sunset = int(now.timestamp() - sunset_time.timestamp()) percentage_complete = seconds_from_sunset / night_length else: percentage_complete = 1 temp_range = abs(self._sunset_colortemp - self._stop_colortemp) temp_offset = temp_range * percentage_complete if self._sunset_colortemp > self._stop_colortemp: temp = self._sunset_colortemp - temp_offset else: temp = self._sunset_colortemp + temp_offset rgb = color_temperature_to_rgb(temp) x_val, y_val, b_val = color_RGB_to_xy_brightness(*rgb) brightness = self._brightness if self._brightness else b_val if self._disable_brightness_adjust: brightness = None if self._mode == MODE_XY: await async_set_lights_xy( self.hass, self._lights, x_val, y_val, brightness, self._transition ) _LOGGER.debug( ( "Lights updated to x:%s y:%s brightness:%s, %s%% " "of %s cycle complete at %s" ), x_val, y_val, brightness, round(percentage_complete * 100), time_state, now, ) elif self._mode == MODE_RGB: await async_set_lights_rgb(self.hass, self._lights, rgb, self._transition) _LOGGER.debug( "Lights updated to rgb:%s, %s%% of %s cycle complete at %s", rgb, round(percentage_complete * 100), time_state, now, ) else: # Convert to mired and clamp to allowed values mired = color_temperature_kelvin_to_mired(temp) await async_set_lights_temp( self.hass, self._lights, mired, brightness, self._transition ) _LOGGER.debug( ( "Lights updated to mired:%s brightness:%s, %s%% " "of %s cycle complete at %s" ), mired, brightness, round(percentage_complete * 100), time_state, now, )
cb13418babd21a1e9584978b0c523f1b1e4e1cb0
493
https://github.com/home-assistant/core.git
1,491
async def async_flux_update(self, utcnow=None): if utcnow is None: utcnow = dt_utcnow() now = as_local(utcnow) sunset = get_astral_event_date(self.hass, SUN_EVENT_SUNSET, now.date()) start_time = self.find_start_time(now) stop_time = self.find_stop_time(now) if stop_time <= start_time: # stop_time does not happen in the same day as start_time if start_time < now: # stop time is tomorrow stop_time += datetime.timedelta(days=1) elif now < start_time: # stop_time was yesterday since the new start_time is not reached stop_time -= datetime.timedelta(days=1) if start_time < now < sunset: # Daytime time_state = "day" temp_range = abs(self._start_colortemp - self._sunset_colortemp) day_length = int(sunset.timestamp() - start_time.timestamp(
57
787
async_flux_update
24
0
1
8
tests/providers/slack/hooks/test_slack.py
44,273
Return slack api call response in slack_hook (#21107)
airflow
10
Python
18
test_slack.py
def test_api_call(self, slack_client_class_mock): slack_client_mock = mock.Mock() slack_client_class_mock.return_value = slack_client_mock slack_client_mock.api_call.return_value = {'ok': True} slack_hook = SlackHook(token='test_token') test_api_json = {'channel': 'test_channel'} slack_hook.call("chat.postMessage", json=test_api_json) slack_client_mock.api_call.assert_called_with("chat.postMessage", json=test_api_json)
0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e
67
https://github.com/apache/airflow.git
72
def test_api_call(self, slack_client_class_mock): slack_client_mock = mock.Mock() slack_client_class_mock.return_value = slack_client_mock slack_client_mock.api_call.return_value = {'ok': True} slack_hook = SlackHook(token='test_token') test_api_json = {'channel': 'test_channel'} slack_hook.call(
15
117
test_api_call
52
0
6
9
lib/matplotlib/figure.py
108,783
ENH: add ability to remove layout engine This also adds a "place holder" layout engine to ensure that users can not "go through zero" and change to an incompatible layout engine. Co-authored-by: Jody Klymak <jklymak@gmail.com>
matplotlib
10
Python
40
figure.py
def _check_layout_engines_compat(self, old, new): if old is None or new is None: return True if old.colorbar_gridspec == new.colorbar_gridspec: return True # colorbar layout different, so check if any colorbars are on the # figure... for ax in self.axes: if hasattr(ax, '_colorbar'): # colorbars list themselves as a colorbar. return False return True
f7f3bb6079048506613c513231e1bd2a87ebc7d3
51
https://github.com/matplotlib/matplotlib.git
164
def _check_layout_engines_compat(self, old, new): if old is None or ne
8
84
_check_layout_engines_compat
130
0
3
24
rllib/algorithms/marwil/marwil.py
124,504
[RLlib] Make Dataset reader default reader and enable CRR to use dataset (#26304) Co-authored-by: avnish <avnish@avnishs-MBP.local.meter>
ray
12
Python
90
marwil.py
def training_step(self) -> ResultDict: # Collect SampleBatches from sample workers. with self._timers[SAMPLE_TIMER]: batch = synchronous_parallel_sample(worker_set=self.workers) batch = batch.as_multi_agent() self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps() self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps() # Add batch to replay buffer. self.local_replay_buffer.add(batch) # Pull batch from replay buffer and train on it. train_batch = sample_min_n_steps_from_buffer( self.local_replay_buffer, self.config["train_batch_size"], count_by_agent_steps=self._by_agent_steps, ) # Train. if self.config["simple_optimizer"]: train_results = train_one_step(self, train_batch) else: train_results = multi_gpu_train_one_step(self, train_batch) # TODO: Move training steps counter update outside of `train_one_step()` method. # # Update train step counters. # self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps() # self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps() global_vars = { "timestep": self._counters[NUM_AGENT_STEPS_SAMPLED], } # Update weights - after learning on the local worker - on all remote # workers. if self.workers.remote_workers(): with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]: self.workers.sync_weights(global_vars=global_vars) # Update global vars on local worker as well. self.workers.local_worker().set_global_vars(global_vars) return train_results # Deprecated: Use ray.rllib.algorithms.marwil.MARWILConfig instead!
1243ed62bf4121c83881c3ddc095bc6a873a09f3
166
https://github.com/ray-project/ray.git
406
def training_step(self) -> ResultDict: # Collect SampleBatches from sample workers. with self._timers[SAMPLE_TIMER]: batch = synchronous_parallel_sample(worker_set=self.workers) batch = batch.as_multi_agent() self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps() self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps() # Add batch to replay buffer. self.local_replay_buffer.add(batch) # Pull batch from replay buffer and train on it. train_batch = sample_min_n_steps_from_buffer( self.local_replay_buffer, self.config["train_batch_size"], count_by_agent_steps=self._by_agent_steps, ) # Train. if self.config["simple_optimizer"]: train_results = train_one_step(self, train_batch) else: train_results = multi_gpu_train_one_step(self, train_batch) # TODO: Move training steps counter update outside of `train_one_step()` method. # # Update train step counters. # self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps() # self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps() global_vars = { "timestep": self._counters[NUM_AGENT_STEPS_SAMPLED], } # Update weights - after learning on the local worker - on all remote # workers. if self.workers.remote_workers(): with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]: self.workers.sync_weights(global_vars=global_vars) # Update global vars on local worker as well. self.w
31
281
training_step
124
0
2
29
tests/models/test_default_evaluator.py
19,067
Evaluation Default evaluator (#5092) * init Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * rename module Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * revert black change Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * change module path Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * lazy load pyspark Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * revert export Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix curcit import Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix conftest.py Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * Revert "fix conftest.py" This reverts commit 2ea29c62bfffc5461bf77f3da15b5c00f51de19b. * fix tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * default evaluator Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update import Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update hash algo Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update import Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comment Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix lint Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add more tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix lint Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update shap explainer Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * remove scikitplot dep Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add pr curve Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add shap.summary_plot Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * log explainer Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * improve explainer code Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update shap init Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update explainer creating Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update predict_proba Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add multi-class metrics artifacts Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add log_loss metric Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * lazy load pyspark Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address ben comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * prevent show shap logo, add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * support spark model Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add shap version check Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update docs, loose classifier label limit Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * multiclass classifier merge metrics/plots Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * zfill feature name Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add config max_num_classes_threshold_logging_roc_pr_curve_for_multiclass_classifier Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * improve label handling Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * black Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * increase plot dpi Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix test fixture Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix pylint Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * use matplot rc_context Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix shap import Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor EvaluationDataset Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * limit user specify shap algos Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * clean Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update evaluation dataset Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * use svg fig Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * revert svg Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * curve dashline, legend display ap/roc, legend move out Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * linewidth 1 Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * keyword arguments for evaluate, fix tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * mark abc.abstractmethod, kw args for ModelEvaluator methods Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix pylint Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix pylint Signed-off-by: Weichen Xu <weichen.xu@databricks.com>
mlflow
10
Python
82
test_default_evaluator.py
def test_gen_multiclass_roc_curve(): y = [0, 1, 2, 1, 2] y_probs = [ [0.7, 0.1, 0.2], [0.2, 0.3, 0.5], [0.25, 0.4, 0.35], [0.3, 0.4, 0.3], [0.8, 0.1, 0.1], ] results = _gen_classifier_curve( is_binomial=False, y=y, y_probs=y_probs, labels=[0, 1, 2], curve_type="roc" ) print(results) expected_x_data_list = [ [0.0, 0.25, 0.25, 1.0], [0.0, 0.33333333, 0.33333333, 1.0], [0.0, 0.33333333, 0.33333333, 1.0, 1.0], ] expected_y_data_list = [[0.0, 0.0, 1.0, 1.0], [0.0, 0.5, 1.0, 1.0], [0.0, 0.0, 0.5, 0.5, 1.0]] line_labels = ["label=0,AUC=0.750", "label=1,AUC=0.750", "label=2,AUC=0.333"] for index, (name, x_data, y_data) in enumerate(results.plot_fn_args["data_series"]): assert name == line_labels[index] assert np.allclose(x_data, expected_x_data_list[index], rtol=1e-3) assert np.allclose(y_data, expected_y_data_list[index], rtol=1e-3) assert results.plot_fn_args["xlabel"] == "False Positive Rate" assert results.plot_fn_args["ylabel"] == "True Positive Rate" assert results.plot_fn_args["line_kwargs"] == {"drawstyle": "steps-post", "linewidth": 1} expected_auc = [0.75, 0.75, 0.3333] assert np.allclose(results.auc, expected_auc, rtol=1e-3)
964f5ab75098c55f028f8acfeeae05df35ea68d5
388
https://github.com/mlflow/mlflow.git
255
def test_gen_multiclass_roc_curve(): y = [0, 1, 2, 1, 2] y_probs = [ [0.7, 0.1, 0.2], [0.2, 0.3, 0.5], [0.25, 0.4, 0.35], [0.3, 0.4, 0.3], [0.8, 0.1, 0.1], ] results = _gen_classifier_curve( is_binomial=False, y=y, y_probs=y_probs, labels=[0, 1, 2], curve_type="roc" ) print(results) expected_x_data_list = [ [0.0, 0.25, 0.25, 1.0], [0.0, 0.33333333, 0.33333333, 1.0], [0.0, 0.33333333, 0.33333333, 1.0, 1.0], ] expected_y_data_list = [[0.0, 0.0, 1.0, 1.0], [0.0, 0.5, 1.0, 1.0], [0.0, 0.0, 0.5, 0.5, 1.0]] line_labels = ["label=0,AUC=0.750", "label=1,AUC=0.750", "label=2,AUC=0.333"] for index, (name, x_data, y_data) in enumerate(results.plot_fn_args["data_series"]): assert name == line_labels[index] assert np.allclose(x_data, expected_x_data_list[index], rtol=1e-3) assert np.allclose(y_data, expected_y_data_list[index], rtol=1e-3) assert results.plot_fn_args["xlabel"] == "False Positive Rate" assert results.plot_fn_args["ylabel"] ==
23
410
test_gen_multiclass_roc_curve
110
0
1
70
tests/components/homekit_controller/specific_devices/test_eve_energy.py
288,863
Migrate HomeKit Controller to use stable identifiers (#80064)
core
18
Python
66
test_eve_energy.py
async def test_eve_energy_setup(hass): accessories = await setup_accessories_from_file(hass, "eve_energy.json") await setup_test_accessories(hass, accessories) await assert_devices_and_entities_created( hass, DeviceTestInfo( unique_id=HUB_TEST_ACCESSORY_ID, name="Eve Energy 50FF", model="Eve Energy 20EAO8601", manufacturer="Elgato", sw_version="1.2.9", hw_version="1.0.0", serial_number="AA00A0A00000", devices=[], entities=[ EntityTestInfo( entity_id="switch.eve_energy_50ff", unique_id="00:00:00:00:00:00_1_28", friendly_name="Eve Energy 50FF", state="off", ), EntityTestInfo( entity_id="sensor.eve_energy_50ff_amps", unique_id="00:00:00:00:00:00_1_28_33", friendly_name="Eve Energy 50FF Amps", unit_of_measurement=ELECTRIC_CURRENT_AMPERE, capabilities={"state_class": SensorStateClass.MEASUREMENT}, state="0", ), EntityTestInfo( entity_id="sensor.eve_energy_50ff_volts", unique_id="00:00:00:00:00:00_1_28_32", friendly_name="Eve Energy 50FF Volts", unit_of_measurement=ELECTRIC_POTENTIAL_VOLT, capabilities={"state_class": SensorStateClass.MEASUREMENT}, state="0.400000005960464", ), EntityTestInfo( entity_id="sensor.eve_energy_50ff_power", unique_id="00:00:00:00:00:00_1_28_34", friendly_name="Eve Energy 50FF Power", unit_of_measurement=POWER_WATT, capabilities={"state_class": SensorStateClass.MEASUREMENT}, state="0", ), EntityTestInfo( entity_id="sensor.eve_energy_50ff_energy_kwh", unique_id="00:00:00:00:00:00_1_28_35", friendly_name="Eve Energy 50FF Energy kWh", capabilities={"state_class": SensorStateClass.MEASUREMENT}, unit_of_measurement=ENERGY_KILO_WATT_HOUR, state="0.28999999165535", ), EntityTestInfo( entity_id="switch.eve_energy_50ff_lock_physical_controls", unique_id="00:00:00:00:00:00_1_28_36", friendly_name="Eve Energy 50FF Lock Physical Controls", entity_category=EntityCategory.CONFIG, state="off", ), EntityTestInfo( entity_id="button.eve_energy_50ff_identify", unique_id="00:00:00:00:00:00_1_1_3", friendly_name="Eve Energy 50FF Identify", entity_category=EntityCategory.DIAGNOSTIC, state="unknown", ), ], ), )
f23b1750e85f07091eb896a0b12b8f95e5646338
278
https://github.com/home-assistant/core.git
1,188
async def test_eve_energy_setup(hass): accessories = await setup_accessories_from_file(hass, "eve_energy.json") await setup_test_accessories(hass, accessories) await assert_devices_and_entities_created( hass, DeviceTestInfo( unique_id=HUB_TEST_ACCESSORY_ID, name="Eve Energy 50FF", model="Eve Energy 20EAO8601", manufacturer="Elgato", sw_version="1.2.9", hw_version="1.0.0", serial_number="AA00A0A00000", devices=[], entities=[ EntityTestInfo( entity_id="switch.eve_energy_50ff", unique_id="00:00:00:00:00:00_1_28", friendly_name="Eve Energy 50FF", state="off", ), EntityTestInfo( entity_id="sensor.eve_energy_50ff_amps", unique_id="00:00:00:00:00:00_1_28_33", friendly_name="Eve Energy 50FF Amps", unit_of_m
33
456
test_eve_energy_setup
65
0
3
22
saleor/checkout/tasks.py
29,355
Optimize memory usage of delete_expired_checkouts task (#11175)
saleor
13
Python
43
tasks.py
def delete_expired_checkouts(): now = timezone.now() expired_anonymous_checkouts = ( Q(email__isnull=True) & Q(user__isnull=True) & Q(last_change__lt=now - settings.ANONYMOUS_CHECKOUTS_TIMEDELTA) ) expired_user_checkout = (Q(email__isnull=False) | Q(user__isnull=False)) & Q( last_change__lt=now - settings.USER_CHECKOUTS_TIMEDELTA ) empty_checkouts = Q(lines__isnull=True) & Q( last_change__lt=now - settings.EMPTY_CHECKOUTS_TIMEDELTA ) qs = Checkout.objects.filter( empty_checkouts | expired_anonymous_checkouts | expired_user_checkout ) deleted_count = 0 for tokens_batch in queryset_in_batches(qs): batch_count, _ = Checkout.objects.filter(token__in=tokens_batch).delete() deleted_count += batch_count if deleted_count: task_logger.debug("Removed %s checkouts.", deleted_count)
f6edcd7b4f7ee334245733f118d3b10cd065d671
143
https://github.com/saleor/saleor.git
163
def delete_expired_checkouts(): now = timezone.now() expired_anonymous_checkouts = ( Q(email__isnull=True) & Q(user__isnull=True) & Q(last_change__lt=now - settings.ANONYMOUS_CHECKOUTS_TIMEDELTA) ) expired_user_checkout = (Q(email__isnull=False) | Q(user__isnull=False))
28
230
delete_expired_checkouts
54
0
1
15
tests/admin_changelist/tests.py
206,968
Refs #33476 -- Reformatted code with Black.
django
10
Python
34
tests.py
def test_search_help_text(self): superuser = self._create_superuser("superuser") m = BandAdmin(Band, custom_site) # search_fields without search_help_text. m.search_fields = ["name"] request = self._mocked_authenticated_request("/band/", superuser) response = m.changelist_view(request) self.assertIsNone(response.context_data["cl"].search_help_text) self.assertNotContains(response, '<div class="help">') # search_fields with search_help_text. m.search_help_text = "Search help text" request = self._mocked_authenticated_request("/band/", superuser) response = m.changelist_view(request) self.assertEqual( response.context_data["cl"].search_help_text, "Search help text" ) self.assertContains(response, '<div class="help">Search help text</div>')
9c19aff7c7561e3a82978a272ecdaad40dda5c00
113
https://github.com/django/django.git
169
def test_search_help_text(self): superuser = self._create_superuser("superuser") m = BandAdmin(Band, custom_site) # search_fields without search_help_text. m.search_fields = ["name"] request = self._mocked_authenticated_request("/band/", superuser) response = m.changelist_view(request) self.assertIsNone(response.context_data["cl"].search_help_text) self.assertNotContains(response, '<div class="help">') #
19
197
test_search_help_text
81
0
2
19
dask/dataframe/tests/test_dataframe.py
155,960
Avoid pytest.warns(None) (#8718) Compatibility with new pytest 7.0 Co-authored-by: Luka Sturtewagen <luka.sturtewagen@sensorfact.nl> Co-authored-by: Julia Signell <jsignell@gmail.com>
dask
13
Python
58
test_dataframe.py
def test_head_npartitions_warn(): match = "5 elements requested, only 3 elements" with pytest.warns(UserWarning, match=match): d.head(5) match = "Insufficient elements" with pytest.warns(UserWarning, match=match): d.head(100) with pytest.warns(UserWarning, match=match): d.head(7) with pytest.warns(UserWarning, match=match): d.head(7, npartitions=2) # No warn if all partitions are inspected for n in [3, -1]: with warnings.catch_warnings(record=True) as record: d.head(10, npartitions=n) assert not record # With default args, this means that a 1 partition dataframe won't warn d2 = dd.from_pandas(pd.DataFrame({"x": [1, 2, 3]}), npartitions=1) with warnings.catch_warnings(record=True) as record: d2.head() assert not record
e715a4d4b5b30374d6ea625a921b5557f0ce6efa
168
https://github.com/dask/dask.git
176
def test_head_npartitions_warn(): match = "5 elements requested, only 3 elements" with pytest.warns
17
282
test_head_npartitions_warn
17
0
1
4
mitmproxy/tools/main.py
251,223
use Python 3.9+ typing
mitmproxy
9
Python
17
main.py
def mitmweb(args=None) -> Optional[int]: # pragma: no cover from mitmproxy.tools import web run(web.master.WebMaster, cmdline.mitmweb, args) return None
fdde9ba3b3caaa2654048cec0af07bfcc3a6a3f8
34
https://github.com/mitmproxy/mitmproxy.git
26
def mitmweb(args=None) -> Optional[int]: # pragma: no cover from mitmproxy.tools import web run(web.master.WebMaster, cmdline.mitmweb, args) return None
11
52
mitmweb