n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
22
0
1
11
keras/datasets/imdb.py
270,137
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
11
Python
20
imdb.py
def get_word_index(path="imdb_word_index.json"): origin_folder = ( "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" ) path = get_file( path, origin=origin_folder + "imdb_word_index.json", file_hash="bfafd718b763782e994055a2d397834f", ) with open(path) as f: return json.load(f)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
45
https://github.com/keras-team/keras.git
75
def get_word_index(path="imdb_word_index.json"): origin_folder = ( "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" ) path = get_file( path, origin=origin_folder + "imdb_word_index.json", file_hash="bfafd718b763782e994055a2d3978
10
83
get_word_index
214
0
7
95
mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
113,995
DESCRIBE to accept [predictor_name].[features, model, etc] syntax (#1938) * DESCRIBE to accept [predictor_name].[features, model, etc] syntax
mindsdb
19
Python
91
mysql_proxy.py
def answer_describe_predictor(self, predictor_value): predictor_attr = None if isinstance(predictor_value, (list, tuple)): predictor_name = predictor_value[0] predictor_attr = predictor_value[1] else: predictor_name = predictor_value model_interface = self.session.model_interface models = model_interface.get_models() if predictor_name not in [x['name'] for x in models]: raise ErBadTableError(f"Can't describe predictor. There is no predictor with name '{predictor_name}'") description = model_interface.get_model_description(predictor_name) if predictor_attr is None: description = [ description['accuracies'], description['column_importances'], description['outputs'], description['inputs'], description['datasource'], description['model'] ] packages = self.get_tabel_packets( columns=[{ 'table_name': '', 'name': 'accuracies', 'type': TYPES.MYSQL_TYPE_VAR_STRING }, { 'table_name': '', 'name': 'column_importances', 'type': TYPES.MYSQL_TYPE_VAR_STRING }, { 'table_name': '', 'name': "outputs", 'type': TYPES.MYSQL_TYPE_VAR_STRING }, { 'table_name': '', 'name': 'inputs', 'type': TYPES.MYSQL_TYPE_VAR_STRING }, { 'table_name': '', 'name': 'datasource', 'type': TYPES.MYSQL_TYPE_VAR_STRING }, { 'table_name': '', 'name': 'model', 'type': TYPES.MYSQL_TYPE_VAR_STRING }], data=[description] ) else: data = model_interface.get_model_data(predictor_name) if predictor_attr == "features": data = self._get_features_info(data) packages = self.get_tabel_packets( columns=[{ 'table_name': '', 'name': 'column', 'type': TYPES.MYSQL_TYPE_VAR_STRING }, { 'table_name': '', 'name': 'type', 'type': TYPES.MYSQL_TYPE_VAR_STRING }, { 'table_name': '', 'name': "encoder", 'type': TYPES.MYSQL_TYPE_VAR_STRING }, { 'table_name': '', 'name': 'role', 'type': TYPES.MYSQL_TYPE_VAR_STRING }], data=data ) elif predictor_attr == "model": data = self._get_model_info(data) packages = self.get_tabel_packets( columns=[{ 'table_name': '', 'name': 'name', 'type': TYPES.MYSQL_TYPE_VAR_STRING }, { 'table_name': '', 'name': 'performance', 'type': TYPES.MYSQL_TYPE_VAR_STRING }, { 'table_name': '', 'name': "selected", 'type': TYPES.MYSQL_TYPE_VAR_STRING }], data=data ) else: raise ErNotSupportedYet("DESCRIBE '%s' predictor attribute is not supported yet" % predictor_attr) packages.append(self.last_packet()) self.send_package_group(packages)
1552c3b72ed13e12e86be90506fa34504298695c
433
https://github.com/mindsdb/mindsdb.git
1,771
def answer_describe_predictor(self, predictor_value): predictor_attr
29
774
answer_describe_predictor
12
0
2
5
src/accelerate/test_utils/testing.py
337,423
Create Cross-Validation example (#317)
accelerate
11
Python
11
testing.py
def slow(test_case): if not _run_slow_tests: return unittest.skip("test is slow")(test_case) else: return test_case
2d7fbbdc73670b96dbc8b3f875cfe147db4d9241
24
https://github.com/huggingface/accelerate.git
35
def slow(test_case): if not _run_slow_tests: return unittest.skip("test is slow")(test_case) else: return test_case
5
45
slow
78
0
3
25
python/ray/util/sgd/tf/examples/tensorflow_train_example.py
133,273
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
11
Python
59
tensorflow_train_example.py
def train_example(num_replicas=1, batch_size=128, use_gpu=False): trainer = TFTrainer( model_creator=simple_model, data_creator=simple_dataset, num_replicas=num_replicas, use_gpu=use_gpu, verbose=True, config=create_config(batch_size), ) # model baseline performance start_stats = trainer.validate() print(start_stats) # train for 2 epochs trainer.train() trainer.train() # model performance after training (should improve) end_stats = trainer.validate() print(end_stats) # sanity check that training worked dloss = end_stats["validation_loss"] - start_stats["validation_loss"] dmse = ( end_stats["validation_mean_squared_error"] - start_stats["validation_mean_squared_error"] ) print(f"dLoss: {dloss}, dMSE: {dmse}") if dloss > 0 or dmse > 0: print("training sanity check failed. loss increased!") else: print("success!")
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
127
https://github.com/ray-project/ray.git
201
def train_example(num_replicas=1, batch_size=128, use_gpu=False): trainer = TFTrainer( model_creator=simple_model, data_creator=simple_dataset, num_replicas=num_replicas, use_gpu=use_gpu, verbose=True, config=create_config(batch_size), ) # model baseline performance start_stats = trainer.validate() print(start_stats) # train for 2 epochs trainer.train() trainer.train() # model performance after training (should improve) end_stats = trainer.validate() print(end_stats) # sanity check that training worked dloss = end_stats["validation_loss"] - start_stats["validation_loss"] dmse = ( end_stats["validation_mean_squared_error"] - start_stats["validation_
20
222
train_example
30
0
2
7
pandas/core/indexes/base.py
171,063
CLN: assorted (#49590)
pandas
12
Python
24
base.py
def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: if not isinstance(other, Index): other = Index(other, name=self.name) result_name = self.name else: result_name = get_op_result_name(self, other) return other, result_name # -------------------------------------------------------------------- # Indexing Methods
ceebce6f4f074887ce2c27f2342d8d618b4037e0
54
https://github.com/pandas-dev/pandas.git
89
def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: if not isinstance(other, Index): other = Index(other, name=self.name) result_name = self.name else: result_name = get_op_result_name(self, other) return other, result_name # -------------------------------------------------------------------- # Indexing Methods
10
84
_convert_can_do_setop
26
0
1
22
modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/beam.py
50,182
add disco_diffusion_ernievil_base
PaddleHub
10
Python
21
beam.py
def _expand_to_beam_size(self, x): r check_type(x, 'x', (Variable), 'BeamSearchDecoder._expand_to_beam_size') x = nn.unsqueeze(x, [1]) expand_times = [1] * len(x.shape) expand_times[1] = self.beam_size x = paddle.tile(x, expand_times) return x
ffcde21305c61d950a9f93e57e6180c9a9665b87
65
https://github.com/PaddlePaddle/PaddleHub.git
74
def _expand_to_beam_size(self, x): r check_type(x, 'x', (Variable), 'BeamSearchDecoder._expand_to_be
13
102
_expand_to_beam_size
29
0
1
15
tests/nightly/gpu/test_style_gen.py
194,793
[Style-Controlled Generation] Open-source a second style classifier (#4380) * Add model to model list * Curr only classifier download page * Add test case * Update version * Update with some results * Wording
ParlAI
12
Python
29
test_style_gen.py
def test_curr_only_accuracy(self): _, test = testing_utils.eval_model( opt={ 'batchsize': 4, 'fp16': True, 'num_examples': 16, 'model_file': 'zoo:style_gen/curr_only_classifier/model', 'model': 'projects.style_gen.classifier:ClassifierAgent', 'classes_from_file': 'image_chat_personalities_file', 'task': 'style_gen:CurrUttOnlyStyle', 'wrapper_task': 'style_gen:LabeledBlendedSkillTalk', }, skip_valid=True, ) self.assertAlmostEqual(test['accuracy'], 0.4375, delta=0.0)
82df52b4431f3573ca2c93dd4bb3098992968acc
75
https://github.com/facebookresearch/ParlAI.git
210
def test_curr_only_accuracy(self): _, test = testing_utils.eval_model( opt={ 'batchsize': 4, 'fp16': True, 'num_examples': 16, 'model_file': 'zoo:style_gen/cu
10
129
test_curr_only_accuracy
96
0
5
28
homeassistant/components/matter/config_flow.py
291,864
Add matter integration BETA (#83064) * Add matter base (#79372) Co-authored-by: Marcel van der Veldt <m.vanderveldt@outlook.com> * Add matter server add-on flow (#82698) * Add matter server add-on flow * Fix stale error argument * Clean docstrings * Use localhost as default address * Add matter websocket api foundation (#82848) * Add matter config entry add-on management (#82865) * Use matter refactored server/client library (#83003) Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Bump python-matter-server to 1.0.6 (#83059) * Extend matter websocket api (#82948) * Extend matter websocket api * Finish docstring * Fix pin type * Adjust api after new client * Adjust api to frontend for now Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
core
19
Python
80
config_flow.py
async def _async_start_addon(self) -> None: addon_manager: AddonManager = get_addon_manager(self.hass) try: await addon_manager.async_schedule_start_addon() # Sleep some seconds to let the add-on start properly before connecting. for _ in range(ADDON_SETUP_TIMEOUT_ROUNDS): await asyncio.sleep(ADDON_SETUP_TIMEOUT) try: if not (ws_address := self.ws_address): discovery_info = await self._async_get_addon_discovery_info() ws_address = self.ws_address = build_ws_address( discovery_info["host"], discovery_info["port"] ) await validate_input(self.hass, {CONF_URL: ws_address}) except (AbortFlow, CannotConnect) as err: LOGGER.debug( "Add-on not ready yet, waiting %s seconds: %s", ADDON_SETUP_TIMEOUT, err, ) else: break else: raise CannotConnect("Failed to start Matter Server add-on: timeout") finally: # Continue the flow after show progress when the task is done. self.hass.async_create_task( self.hass.config_entries.flow.async_configure(flow_id=self.flow_id) )
e2308fd15cec4dfdd25d843b72cd3071657fd5b8
147
https://github.com/home-assistant/core.git
551
async def _async_start_addon(self) -> None: addon_manager: AddonManager = get_addon_manager(self.hass) try: await addon_manager.async_schedule_start_addon() # Sleep some seconds to let the add-on start properly before connecting. for _ in range(ADDON_SETUP_TIMEOUT_ROUN
29
249
_async_start_addon
7
0
1
2
python3.10.4/Lib/hashlib.py
217,637
add python 3.10.4 for windows
XX-Net
8
Python
7
hashlib.py
def __py_new(name, data=b'', **kwargs): return __get_builtin_constructor(name)(data, **kwargs)
8198943edd73a363c266633e1aa5b2a9e9c9f526
25
https://github.com/XX-net/XX-Net.git
13
def __py_new(name, data=b'', **kwargs): return __get_builtin_constructor(name)(data, **kwa
5
41
__py_new
36
0
3
14
homeassistant/components/jellyfin/media_player.py
289,777
Add media_player platform to Jellyfin (#76801)
core
12
Python
22
media_player.py
def _handle_coordinator_update(self) -> None: self.session_data = ( self.coordinator.data.get(self.session_id) if self.coordinator.data is not None else None ) if self.session_data is not None: self.now_playing = self.session_data.get("NowPlayingItem") self.play_state = self.session_data.get("PlayState") else: self.now_playing = None self.play_state = None self._update_from_session_data() super()._handle_coordinator_update()
3759be09df09be61a4b880eaa58c7d9d8a099080
92
https://github.com/home-assistant/core.git
154
def _handle_coordinator_update(self) -> None: self.session_data = ( self.coordinator.data.get(se
11
151
_handle_coordinator_update
24
1
1
7
tests/gamestonk_terminal/stocks/options/test_yfinance_model.py
280,980
Tests : Stocks > Options (#1125) * Update tests : conftest * Updating tests : stocks/options * Updating tests : fix typing * Updating tests : black * Updating tests : pyupgrade * Updating tests : black * Updating tests : mock dates in cassettes * Updating tests : conftest * Updating tests : black * Updating tests : force single threading * Updating tests : skip * Updating tests : black * Updating tests : conftest * Update tests : skip stocks/options/controller * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : fixing issue * Updating tests : add init * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : conftest * Updating tests : skip * Updating tests : skip * Updating tests : skip * Updating tests : skip
OpenBBTerminal
10
Python
21
test_yfinance_model.py
def test_get_option_chain(recorder): result_tuple = yfinance_model.get_option_chain( ticker="PM", expiration="2022-01-07", ) result_tuple = (result_tuple.calls, result_tuple.puts) recorder.capture_list(result_tuple) @pytest.mark.vcr @pytest.mark.parametrize( "func", [ "option_expirations", "get_dividend", "get_price", "get_info", "get_closing", ], )
000d1e93d7187299dce5653f781345031a9ad96f
@pytest.mark.vcr @pytest.mark.parametrize( "func", [ "option_expirations", "get_dividend", "get_price", "get_info", "get_closing", ], )
37
https://github.com/OpenBB-finance/OpenBBTerminal.git
90
def test_get_option_chain(recorder): result_tuple = yfinance_model.get_option_chain( ticker="PM", expiration="2022-01-07", ) result_tuple = (result_tuple.calls, result_tuple.puts) recorder.capture_list(result_tuple) @pytest.mark.vcr @pytest.mark.parametri
14
112
test_get_option_chain
126
0
8
66
wagtail/admin/views/pages/create.py
72,469
Reformat with black
wagtail
21
Python
90
create.py
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update( { "content_type": self.page_content_type, "page_class": self.page_class, "parent_page": self.parent_page, "edit_handler": self.edit_handler, "action_menu": PageActionMenu( self.request, view="create", parent_page=self.parent_page ), "preview_modes": self.page.preview_modes, "form": self.form, "next": self.next_url, "has_unsaved_changes": self.has_unsaved_changes, "locale": None, "translations": [], } ) if getattr(settings, "WAGTAIL_I18N_ENABLED", False): # Pages can be created in any language at the root level if self.parent_page.is_root(): translations = [ { "locale": locale, "url": reverse( "wagtailadmin_pages:add", args=[ self.page_content_type.app_label, self.page_content_type.model, self.parent_page.id, ], ) + "?" + urlencode({"locale": locale.language_code}), } for locale in Locale.objects.all() ] else: user_perms = UserPagePermissionsProxy(self.request.user) translations = [ { "locale": translation.locale, "url": reverse( "wagtailadmin_pages:add", args=[ self.page_content_type.app_label, self.page_content_type.model, translation.id, ], ), } for translation in self.parent_page.get_translations() .only("id", "locale") .select_related("locale") if user_perms.for_page(translation).can_add_subpage() and self.page_class in translation.specific_class.creatable_subpage_models() and self.page_class.can_create_at(translation) ] context.update( { "locale": self.locale, "translations": translations, } ) return context
d10f15e55806c6944827d801cd9c2d53f5da4186
313
https://github.com/wagtail/wagtail.git
1,339
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update( { "content_type": self.page_content_type, "page_class": self.page_class, "parent_page": self.parent_page, "edit_handler": self.edit_handler, "action_menu": PageActionMenu( self.request, view="create", parent_page=self.parent_page ), "preview_modes": self.page.preview_modes, "form": self.form, "next": self.next_url, "has_unsaved_changes": self.has_unsaved_changes, "locale": None, "translations": [], } ) if getattr(settings, "WAGTAIL_I18N_ENABLED", False): # Pages can be created in any language at the root level if self.parent_page.is_root(): translations = [ { "locale": locale, "url": reverse( "wagtailadmin_pages:add", args=[ self.page_content_type.app_label, self.page_content_type.model, self.parent_page.id, ],
45
512
get_context_data
30
0
1
14
tests/checkpointing/test_model_checkpoint.py
241,760
Update `tests/checkpointing/*.py` to use `devices` instead of `gpus` or `ipus` (#11408) Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
lightning
10
Python
27
test_model_checkpoint.py
def test_model_checkpoint_no_extraneous_invocations(tmpdir): model = LogInTwoMethods() num_epochs = 4 model_checkpoint = ModelCheckpointTestInvocations(monitor="early_stop_on", expected_count=num_epochs, save_top_k=-1) trainer = Trainer( strategy="ddp_spawn", accelerator="cpu", devices=2, default_root_dir=tmpdir, callbacks=[model_checkpoint], max_epochs=num_epochs, ) trainer.fit(model) assert trainer.state.finished, f"Training failed with {trainer.state}"
d2d284fd6e3e8f53e9a44ab233771850af1e4dab
77
https://github.com/Lightning-AI/lightning.git
96
def test_model_checkpoint_no_extraneous_invocations(tmpdir): model = LogInTwoMethods() num_epochs = 4 model_checkpoint = ModelCheckpointTestInvocations(monitor="early_stop_on", expected_count=num_epochs, sa
21
130
test_model_checkpoint_no_extraneous_invocations
8
0
1
3
homeassistant/components/humidifier/__init__.py
290,833
Adjust HumidifierEntity type hints (#82248)
core
6
Python
8
__init__.py
def supported_features(self) -> HumidifierEntityFeature | int: return self._attr_supported_features
2453f95b2442036200a07d862d98bfd3a401e726
14
https://github.com/home-assistant/core.git
22
def supported_features(self) -> HumidifierEntityFeature | int: return self._attr_supported_features
5
25
supported_features
46
0
1
31
saleor/graphql/product/tests/queries/test_product_type_query.py
29,297
Split test_product.py and test_variant.py into multiple files (#11173) * Split test_product.py into multiple files * Split test_variant.py into multiple files
saleor
12
Python
33
test_product_type_query.py
def test_query_product_type_for_federation(api_client, product, channel_USD): product_type = product.product_type product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk) variables = { "representations": [ { "__typename": "ProductType", "id": product_type_id, }, ], } query = response = api_client.post_graphql(query, variables) content = get_graphql_content(response) assert content["data"]["_entities"] == [ { "__typename": "ProductType", "id": product_type_id, "name": product_type.name, } ]
d90be220d6b687d08153934a51354011a3cb5ca1
94
https://github.com/saleor/saleor.git
186
def test_query_product_type_for_federation(api_client, product, channel_USD): product_type = product.product_type product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk) variables = { "representations": [ { "__typename": "ProductType", "id": product_type_id, }, ], } query = response = api_client.post_graphql(query, variables) content = get_graphql_content(response) assert content["data"]["_entities"] == [ { "__typename":
17
161
test_query_product_type_for_federation
22
0
2
4
src/transformers/utils/fx.py
37,533
Fx with meta (#16836) * Add meta proxy * Uses meta data to trace data dependent control-flow * Remove commented class * Handles torch creating functions * Added type annotation to fix tracing * Tracing works for everything but T5 and GPT-J * Almost all previously supported models pass * All architectures can be traced except T5 * Intermediate commit to have a trace of the comparison operators for HFProxy * Everything works, except loss computation * Everything works * Removed unused import * Overriden methods do not use underlying ops (linear and torch.matmul), and model attributes are copied to the traced version * Fix torch_matmul_override * Change attributes reference to deepcopy * Remove breakpoint and add torch_index_override * Small fix * Fix typo * Replace asserts by explicit exceptions
transformers
9
Python
20
fx.py
def __contains__(self, key): # To handle cases such as : # `"some_key" in kwargs` if self.node.op == "placeholder": return False return super().__contains__(key)
2c2a2169b6524f18b37d7b4b64c64fb6a29a35a2
27
https://github.com/huggingface/transformers.git
60
def __contains__(self, key): # To handle cases such as : # `"some_key" in kwargs` if self.node.op == "placeholder":
6
47
__contains__
10
0
1
3
airflow/models/taskinstance.py
44,419
Make `airflow dags test` be able to execute Mapped Tasks (#21210) * Make `airflow dags test` be able to execute Mapped Tasks In order to do this there were two steps required: - The BackfillJob needs to know about mapped tasks, both to expand them, and in order to update it's TI tracking - The DebugExecutor needed to "unmap" the mapped task to get the real operator back I was testing this with the following dag: ``` from airflow import DAG from airflow.decorators import task from airflow.operators.python import PythonOperator import pendulum @task def make_list(): return list(map(lambda a: f'echo "{a!r}"', [1, 2, {'a': 'b'}])) def consumer(*args): print(repr(args)) with DAG(dag_id='maptest', start_date=pendulum.DateTime(2022, 1, 18)) as dag: PythonOperator(task_id='consumer', python_callable=consumer).map(op_args=make_list()) ``` It can't "unmap" decorated operators successfully yet, so we're using old-school PythonOperator We also just pass the whole value to the operator, not just the current mapping value(s) * Always have a `task_group` property on DAGNodes And since TaskGroup is a DAGNode, we don't need to store parent group directly anymore -- it'll already be stored * Add "integation" tests for running mapped tasks via BackfillJob * Only show "Map Index" in Backfill report when relevant Co-authored-by: Tzu-ping Chung <uranusjr@gmail.com>
airflow
8
Python
10
taskinstance.py
def key(self) -> TaskInstanceKey: return TaskInstanceKey(self.dag_id, self.task_id, self.run_id, self.try_number, self.map_index)
6fc6edf6af7f676bfa54ff3a2e6e6d2edb938f2e
31
https://github.com/apache/airflow.git
24
def key(self) -> TaskInstanceKey: return TaskInstanceKey(self.dag_id, self.task_id, self.run_id, self.try_number, self.m
8
47
key
15
0
1
5
airbyte-integrations/connectors/source-zendesk-support/unit_tests/unit_test.py
4,476
🐛 Source Zendesk-Support: fixed bug when `Tickets` stream didn't return removed records (#11349)
airbyte
12
Python
11
unit_test.py
def test_check_start_time_param(): expected = 1626936955 start_time = calendar.timegm(pendulum.parse(DATETIME_STR).utctimetuple()) output = SourceZendeskTicketExportStream.check_start_time_param(start_time) assert output == expected
a305e4913060b919f02f3db57b9e17f82f48c425
36
https://github.com/airbytehq/airbyte.git
26
def test_check_start_time_param():
12
60
test_check_start_time_param
76
1
2
17
sklearn/cluster/tests/test_k_means.py
258,814
MNT Update black to stable version (#22474)
scikit-learn
12
Python
58
test_k_means.py
def test_euclidean_distance(dtype, squared): # Check that the _euclidean_(dense/sparse)_dense helpers produce correct # results rng = np.random.RandomState(0) a_sparse = sp.random( 1, 100, density=0.5, format="csr", random_state=rng, dtype=dtype ) a_dense = a_sparse.toarray().reshape(-1) b = rng.randn(100).astype(dtype, copy=False) b_squared_norm = (b**2).sum() expected = ((a_dense - b) ** 2).sum() expected = expected if squared else np.sqrt(expected) distance_dense_dense = _euclidean_dense_dense_wrapper(a_dense, b, squared) distance_sparse_dense = _euclidean_sparse_dense_wrapper( a_sparse.data, a_sparse.indices, b, b_squared_norm, squared ) assert_allclose(distance_dense_dense, distance_sparse_dense, rtol=1e-6) assert_allclose(distance_dense_dense, expected, rtol=1e-6) assert_allclose(distance_sparse_dense, expected, rtol=1e-6) @pytest.mark.parametrize("dtype", [np.float32, np.float64])
1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
177
https://github.com/scikit-learn/scikit-learn.git
136
def test_euclidean_distance(dtype, squared): # Check that the _euclidean_(dense/sparse)_dense helpers produce correct # results rng = np.random.RandomState(0) a_sparse = sp.random( 1, 100, density=0.5, format="csr", random_state=rng, dtype=dtype ) a_dense = a_sparse.toarray().reshape(-1) b = rng.randn(100).astype(dtype, copy=False) b_squared_norm = (b**2).sum() expected = ((a_dense - b) ** 2).sum() expected = expected if squared else np.sqrt(expected) distance_dense_dense = _euclidean_dense_dense_wrapper(a_dense, b, squared) distance_sparse_dense = _euclidean_sparse_dense_wrapper( a_sparse.data, a_sparse.indices, b, b_squared_norm, squared ) assert_allclose(distance_dense_dense, distance_sparse_dense, rtol
36
284
test_euclidean_distance
81
0
9
16
netbox/extras/scripts.py
264,990
Iterate base classes when searching for ScriptVariables
netbox
13
Python
53
scripts.py
def _get_vars(cls): vars = {} # Iterate all base classes looking for ScriptVariables for base_class in inspect.getmro(cls): # When object is reached there's no reason to continue if base_class is object: break for name, attr in base_class.__dict__.items(): if name not in vars and issubclass(attr.__class__, ScriptVariable): vars[name] = attr # Order variables according to field_order field_order = getattr(cls.Meta, 'field_order', None) if not field_order: return vars ordered_vars = { field: vars.pop(field) for field in field_order if field in vars } ordered_vars.update(vars) return ordered_vars
fe899d9d7cdb458298b92c2f46792adaf211851d
105
https://github.com/netbox-community/netbox.git
254
def _get_vars(cls): vars = {} # Iterate all base classes looking for ScriptVariables for base_class in inspect.getmro(cls): # When object is reached there's no reason to continue if base_class is object: break for name, attr in base_class.__dict__.items(): if name not in vars and issubclass(attr.__class__, ScriptVariable): vars[name] = attr # Order variables according to field_order field_order = getattr(cls.Meta, 'field_order', None) if not field_order: return vars ordered_vars = { field: vars.pop(field) for field in field_order if field in vars } ordered_vars.update(vars) r
21
167
_get_vars
36
0
3
9
tests/test_unpaper.py
30,491
unpaper: issue warning if image too large to clean
OCRmyPDF
13
Python
31
test_unpaper.py
def test_unpaper_image_too_big(resources, outdir, caplog): with patch('ocrmypdf._exec.unpaper.UNPAPER_IMAGE_PIXEL_LIMIT', 42): infile = resources / 'crom.png' unpaper.clean(infile, outdir / 'out.png', dpi=300) == infile assert any( 'too large for cleaning' in rec.message for rec in caplog.get_records('call') if rec.levelno == logging.WARNING )
ea69e868ed95a335b362a3708628c0372cb7abb8
64
https://github.com/ocrmypdf/OCRmyPDF.git
99
def test_unpaper_image_too_big(resources, outdir, caplog): with patch('ocrmypdf._exec.unpaper.UNPAPER_IMAGE_PIXEL_LIMIT', 42): infile = resources / 'crom.png' u
16
107
test_unpaper_image_too_big
17
0
1
8
tests/maskformer/test_feature_extraction_maskformer.py
35,843
Maskformer (#15682) * maskformer * conflicts * conflicts * minor fixes * feature extractor test fix refactor MaskFormerLoss following conversation MaskFormer related types should not trigger a module time import error missed one removed all the types that are not used update config mapping minor updates in the doc resolved conversation that doesn't need a discussion minor changes resolved conversations fixed DetrDecoder * minor changes minor changes fixed mdx file test feature_extractor return types functional losses -> classes removed the return type test for the feature extractor minor changes + style + quality * conflicts? * rebase master * readme * added missing files * deleded poolformers test that where in the wrong palce * CI * minor changes * Apply suggestions from code review Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * resolved conversations * minor changes * conversations [Unispeech] Fix slow tests (#15818) * remove soundfile old way of loading audio * Adapt slow test [Barthez Tokenizer] Fix saving (#15815) [TFXLNet] Correct tf xlnet generate (#15822) * [TFXLNet] Correct tf xlnet * adapt test comment Fix the push run (#15807) Fix semantic segmentation pipeline test (#15826) Fix dummy_inputs() to dummy_inputs in symbolic_trace doc (#15776) Add model specific output classes to PoolFormer model docs (#15746) * Added model specific output classes to poolformer docs * Fixed Segformer typo in Poolformer docs Adding the option to return_timestamps on pure CTC ASR models. (#15792) * Adding the option to return_timestamps on pure CTC ASR models. * Remove `math.prod` which was introduced in Python 3.8 * int are not floats. * Reworking the PR to support "char" vs "word" output. * Fixup! * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Quality. Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> HFTracer.trace should use/return self.graph to be compatible with torch.fx.Tracer (#15824) Fix tf.concatenate + test past_key_values for TF models (#15774) * fix wrong method name tf.concatenate * add tests related to causal LM / decoder * make style and quality * clean-up * Fix TFBertModel's extended_attention_mask when past_key_values is provided * Fix tests * fix copies * More tf.int8 -> tf.int32 in TF test template * clean-up * Update TF test template * revert the previous commit + update the TF test template * Fix TF template extended_attention_mask when past_key_values is provided * Fix some styles manually * clean-up * Fix ValueError: too many values to unpack in the test * Fix more: too many values to unpack in the test * Add a comment for extended_attention_mask when there is past_key_values * Fix TFElectra extended_attention_mask when past_key_values is provided * Add tests to other TF models * Fix for TF Electra test: add prepare_config_and_inputs_for_decoder * Fix not passing training arg to lm_head in TFRobertaForCausalLM * Fix tests (with past) for TF Roberta * add testing for pask_key_values for TFElectra model Co-authored-by: ydshieh <ydshieh@users.noreply.github.com> [examples/summarization and translation] fix readme (#15833) Add ONNX Runtime quantization for text classification notebook (#15817) Re-enable doctests for the quicktour (#15828) * Re-enable doctests for the quicktour * Re-enable doctests for task_summary (#15830) * Remove & Framework split model report (#15825) Add TFConvNextModel (#15750) * feat: initial implementation of convnext in tensorflow. * fix: sample code for the classification model. * chore: added checked for from the classification model. * chore: set bias initializer in the classification head. * chore: updated license terms. * chore: removed ununsed imports * feat: enabled argument during using drop_path. * chore: replaced tf.identity with layers.Activation(linear). * chore: edited default checkpoint. * fix: minor bugs in the initializations. * partial-fix: tf model errors for loading pretrained pt weights. * partial-fix: call method updated * partial-fix: cross loading of weights (4x3 variables to be matched) * chore: removed unneeded comment. * removed playground.py * rebasing * rebasing and removing playground.py. * fix: renaming TFConvNextStage conv and layer norm layers * chore: added initializers and other minor additions. * chore: added initializers and other minor additions. * add: tests for convnext. * fix: integration tester class. * fix: issues mentioned in pr feedback (round 1). * fix: how output_hidden_states arg is propoagated inside the network. * feat: handling of arg for pure cnn models. * chore: added a note on equal contribution in model docs. * rebasing * rebasing and removing playground.py. * feat: encapsulation for the convnext trunk. * Fix variable naming; Test-related corrections; Run make fixup * chore: added Joao as a contributor to convnext. * rebasing * rebasing and removing playground.py. * rebasing * rebasing and removing playground.py. * chore: corrected copyright year and added comment on NHWC. * chore: fixed the black version and ran formatting. * chore: ran make style. * chore: removed from_pt argument from test, ran make style. * rebasing * rebasing and removing playground.py. * rebasing * rebasing and removing playground.py. * fix: tests in the convnext subclass, ran make style. * rebasing * rebasing and removing playground.py. * rebasing * rebasing and removing playground.py. * chore: moved convnext test to the correct location * fix: locations for the test file of convnext. * fix: convnext tests. * chore: applied sgugger's suggestion for dealing w/ output_attentions. * chore: added comments. * chore: applied updated quality enviornment style. * chore: applied formatting with quality enviornment. * chore: revert to the previous tests/test_modeling_common.py. * chore: revert to the original test_modeling_common.py * chore: revert to previous states for test_modeling_tf_common.py and modeling_tf_utils.py * fix: tests for convnext. * chore: removed output_attentions argument from convnext config. * chore: revert to the earlier tf utils. * fix: output shapes of the hidden states * chore: removed unnecessary comment * chore: reverting to the right test_modeling_tf_common.py. * Styling nits Co-authored-by: ariG23498 <aritra.born2fly@gmail.com> Co-authored-by: Joao Gante <joao@huggingface.co> Co-authored-by: Sylvain Gugger <Sylvain.gugger@gmail.com> * minor changes * doc fix in feature extractor * doc * typose * removed detr logic from config * removed detr logic from config * removed num_labels * small fix in the config * auxilary -> auxiliary * make style * some test is failing * fix a weird char in config prevending doc-builder * retry to fix the doc-builder issue * make style * new try to fix the doc builder * CI * change weights to facebook Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: ariG23498 <aritra.born2fly@gmail.com> Co-authored-by: Joao Gante <joao@huggingface.co> Co-authored-by: Sylvain Gugger <Sylvain.gugger@gmail.com>
transformers
10
Python
12
test_feature_extraction_maskformer.py
def test_feat_extract_properties(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(feature_extractor, "image_mean")) self.assertTrue(hasattr(feature_extractor, "image_std")) self.assertTrue(hasattr(feature_extractor, "do_normalize")) self.assertTrue(hasattr(feature_extractor, "do_resize")) self.assertTrue(hasattr(feature_extractor, "size")) self.assertTrue(hasattr(feature_extractor, "max_size"))
d83d22f578276e9f201b0b3b0f8f9bd68e86c133
82
https://github.com/huggingface/transformers.git
65
def test_feat_extract_properties(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(feature_extractor, "image_mean")) self.assertTrue(hasattr(feature_extractor, "image_std")) self.assertTrue(hasattr(feature_extractor, "do_normalize")) self.assertTrue(hasattr(feature_extractor, "do_resize")) self.assertTrue(hasattr(feature_extractor, "size")) self.assertTru
7
141
test_feat_extract_properties
13
0
1
8
openbb_terminal/settings_controller.py
284,285
Default env for packaged apps (#1693) * Remove defaults json in favor of a .env in a cross platform specfile * Use ENV_FILE from obff across the app * Add venv packaging support to the specfile * Make silencing explicit * Fix bug in integration tests report printout Co-authored-by: piiq <piiq@tinag.ru>
OpenBBTerminal
10
Python
12
settings_controller.py
def call_cls(self, _): obbff.USE_CLEAR_AFTER_CMD = not obbff.USE_CLEAR_AFTER_CMD set_key( obbff.ENV_FILE, "OPENBB_USE_CLEAR_AFTER_CMD", str(obbff.USE_CLEAR_AFTER_CMD), ) console.print("")
a5b414bf1a91f05f370886748845077d4cec03e7
38
https://github.com/OpenBB-finance/OpenBBTerminal.git
81
def call_cls(self, _): obbff.USE_CLEAR_AFTER_CMD = not obbff.USE_CLEAR_AFTER_CMD set_key(
10
65
call_cls
141
0
6
19
plugins/extract/detect/s3fd.py
100,446
Update all Keras Imports to be conditional (#1214) * Remove custom keras importer * first round keras imports fix * launcher.py: Remove KerasFinder references * 2nd round keras imports update (lib and extract) * 3rd round keras imports update (train) * remove KerasFinder from tests * 4th round keras imports update (tests)
faceswap
18
Python
79
s3fd.py
def _post_process(self, bboxlist): retval = [] for i in range(len(bboxlist) // 2): bboxlist[i * 2] = self.softmax(bboxlist[i * 2], axis=3) for i in range(len(bboxlist) // 2): ocls, oreg = bboxlist[i * 2], bboxlist[i * 2 + 1] stride = 2 ** (i + 2) # 4,8,16,32,64,128 poss = zip(*np.where(ocls[:, :, :, 1] > 0.05)) for _, hindex, windex in poss: axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride score = ocls[0, hindex, windex, 1] if score >= self.confidence: loc = np.ascontiguousarray(oreg[0, hindex, windex, :]).reshape((1, 4)) priors = np.array([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]]) box = self.decode(loc, priors) x_1, y_1, x_2, y_2 = box[0] * 1.0 retval.append([x_1, y_1, x_2, y_2, score]) return_numpy = np.array(retval) if len(retval) != 0 else np.zeros((1, 5)) return return_numpy
aa39234538a8f83e6aa2b60b8275a570e8876ac2
288
https://github.com/deepfakes/faceswap.git
381
def _post_process(self, bboxlist):
37
417
_post_process
3
0
1
2
lib/streamlit/app_session.py
118,533
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
streamlit
8
Python
3
app_session.py
def handle_stop_script_request(self): self._enqueue_script_request(ScriptRequest.STOP)
704eab3478cf69847825b23dabf15813a8ac9fa2
14
https://github.com/streamlit/streamlit.git
17
def handle_stop_script_request(self): self._enqueue_script_request(ScriptRequest.STOP)
5
26
handle_stop_script_request
16
0
1
4
dask/array/tests/test_overlap.py
155,881
Finish making ``map_overlap`` default boundary ``kwarg`` ``'none'`` (#8743) Followup to PR https://github.com/dask/dask/pull/8397 We've had a FutureWarning up for a few months about an upcoming change to the default 'boundary' kwarg value in `map_overlap`, so now is the time to change it. Previous default was `"reflect"`, new default will be "None". The reason for this change is that it makes the code run a lot faster, and for most people the overlap depth is sufficient and they should not require additional boundary handling. See https://github.com/dask/dask/issues/8391 for a full discussion.
dask
10
Python
15
test_overlap.py
def test_map_overlap_no_depth(boundary): x = da.arange(10, chunks=5) y = x.map_overlap(lambda i: i, depth=0, boundary=boundary, dtype=x.dtype) assert_eq(y, x)
c7e069947b9b720df03aca2e4f7682faa2d9876f
48
https://github.com/dask/dask.git
24
def test_map_overlap_no_depth(boundary): x = da.arange(10, chunks=5) y = x.map_overlap(lambda i: i, depth=0, boundary=bound
12
72
test_map_overlap_no_depth
26
0
3
7
homeassistant/components/media_player/browse_media.py
292,993
Restore children media class (#67409)
core
9
Python
21
browse_media.py
def calculate_children_class(self) -> None: self.children_media_class = MEDIA_CLASS_DIRECTORY assert self.children is not None proposed_class = self.children[0].media_class if all(child.media_class == proposed_class for child in self.children): self.children_media_class = proposed_class
d68ada74ccebaa0c1b6986b3be9cf4d73eca7cae
51
https://github.com/home-assistant/core.git
72
def calculate_children_class(self) -> None: self.children_media_class = MEDIA_CLASS
9
81
calculate_children_class
28
1
1
13
tests/gamestonk_terminal/cryptocurrency/defi/test_coindix_model.py
282,174
Tests + Fix : Cryptocurrency > Defi (#1284) * Updating tests : crypto/defi * Updating tests : stocks/prediction_techniques * Updating tests : economy * Updating tests : conftest * Updating tests : economy/wsj * Updating crypto : fixing defi/coindix_view * Updating crypto : fix crypto/defi/defipulse_model * Updating tests : crypto/defi * Updating tests : crypto/defi * Updating crypto : crypto/defi/graph_model * Updating tests : crypto/defi * Updating tests : crypto/defi * Updating tests : crypto/defi * Updating tests : black * Updating tests : economy/fred/prediction * Updating tests : crypto/defi/graph * Updating tests : linting
OpenBBTerminal
11
Python
27
test_coindix_model.py
def test_get_defi_vaults_value_error(mocker): # MOCK GET attrs = { "status_code": 200, "json.side_effect": UnicodeDecodeError, } mock_response = mocker.Mock(**attrs) mocker.patch(target="requests.get", new=mocker.Mock(return_value=mock_response)) with pytest.raises(ValueError) as _: coindix_model.get_defi_vaults( chain=None, protocol=None, kind=None, ) @pytest.mark.vcr(record_mode="none")
ccfe98e19dd36702047fd8130e9b299e8f7cadcc
@pytest.mark.vcr(record_mode="none")
72
https://github.com/OpenBB-finance/OpenBBTerminal.git
105
def test_get_defi_vaults_value_error(mocker): # MOCK GET attrs = { "status_code": 200, "json.side_effect": UnicodeDecodeError, } mock_response = mocker.Mock(**attrs) mocker.patch(target="requests.get", new=mocker.
22
140
test_get_defi_vaults_value_error
28
0
2
6
homeassistant/components/powerwall/__init__.py
292,632
Enable strict typing for powerwall (#65577)
core
10
Python
26
__init__.py
async def async_update_data(self) -> PowerwallData: # Check if we had an error before _LOGGER.debug("Checking if update failed") if self.api_changed: raise UpdateFailed("The powerwall api has changed") return await self.hass.async_add_executor_job(self._update_data)
e1989e285896e07fb6f4a5f09dcf5039c722a16e
36
https://github.com/home-assistant/core.git
74
async def async_update_data(self) -> PowerwallData: # Check if we had an error before _LO
10
67
async_update_data
32
0
6
10
homeassistant/components/itunes/media_player.py
306,911
Use new media player enums [i-l] (#78054)
core
8
Python
17
media_player.py
def state(self): if self.player_state == "offline" or self.player_state is None: return "offline" if self.player_state == "error": return "error" if self.player_state == "stopped": return MediaPlayerState.IDLE if self.player_state == "paused": return MediaPlayerState.PAUSED return MediaPlayerState.PLAYING
823e7e8830118a8c500a0492c9cc8905bf5effb4
56
https://github.com/home-assistant/core.git
118
def state(self): if self.player_state == "offline" or self.player_state is None: return "offline" if self.player_state == "error": return "error" if self.player_state == "stopped": return MediaPlayerState.IDLE if self.player_s
7
102
state
85
0
3
25
pandas/tests/plotting/frame/test_frame.py
164,937
TST: Clean tests/plotting (#45992)
pandas
11
Python
62
test_frame.py
def test_boxplot_vertical(self, hist_df): df = hist_df numeric_cols = df._get_numeric_data().columns labels = [pprint_thing(c) for c in numeric_cols] # if horizontal, yticklabels are rotated ax = df.plot.box(rot=50, fontsize=8, vert=False) self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8) self._check_text_labels(ax.get_yticklabels(), labels) assert len(ax.lines) == 7 * len(numeric_cols) axes = _check_plot_works( df.plot.box, default_axes=True, subplots=True, vert=False, logx=True, ) self._check_axes_shape(axes, axes_num=3, layout=(1, 3)) self._check_ax_scales(axes, xaxis="log") for ax, label in zip(axes, labels): self._check_text_labels(ax.get_yticklabels(), [label]) assert len(ax.lines) == 7 positions = np.array([3, 2, 8]) ax = df.plot.box(positions=positions, vert=False) self._check_text_labels(ax.get_yticklabels(), labels) tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions) assert len(ax.lines) == 7 * len(numeric_cols)
03fef5f0e35200aa5828975b62782bcf11faa0d2
255
https://github.com/pandas-dev/pandas.git
287
def test_boxplot_vertical(self, hist_df): df = hist_df numeric_cols = df._get_numeric_data().columns labels = [pprint_thing(c) for c in numeric_cols] # if horizontal, yticklabels are rotated ax = df.plot.box(rot=50, fontsize=8, vert=False) self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8) self._check_text_labels(ax.get_yticklabels(), labels) assert len(ax.lines) == 7 * len(numeric_cols) axes = _check_p
43
384
test_boxplot_vertical
26
0
2
12
homeassistant/components/esphome/media_player.py
301,002
Initial implementation of ESPHome media players (#72047) Co-authored-by: Paulus Schoutsen <balloob@gmail.com> Co-authored-by: Franck Nijhof <git@frenck.dev>
core
12
Python
20
media_player.py
def supported_features(self) -> int: flags = ( MediaPlayerEntityFeature.PLAY_MEDIA | MediaPlayerEntityFeature.BROWSE_MEDIA | MediaPlayerEntityFeature.STOP | MediaPlayerEntityFeature.VOLUME_SET | MediaPlayerEntityFeature.VOLUME_MUTE ) if self._static_info.supports_pause: flags |= MediaPlayerEntityFeature.PAUSE | MediaPlayerEntityFeature.PLAY return flags
8ff0ced846e505a0c33a848e21b19820861e6884
49
https://github.com/home-assistant/core.git
127
def supported_features(self) -> int: flags = ( MediaPlayerEntityFeature.PLAY_MEDIA | MediaPlayerEn
14
79
supported_features
80
0
3
8
awx/main/scheduler/task_manager.py
80,601
Consume control capacity (#11665) * Select control node before start task Consume capacity on control nodes for controlling tasks and consider remainging capacity on control nodes before selecting them. This depends on the requirement that control and hybrid nodes should all be in the instance group named 'controlplane'. Many tests do not satisfy that requirement. I'll update the tests in another commit. * update tests to use controlplane We don't start any tasks if we don't have a controlplane instance group Due to updates to fixtures, update tests to set node type and capacity explicitly so they get expected result. * Fixes for accounting of control capacity consumed Update method is used to account for currently consumed capacity for instance groups in the in-memory capacity tracking data structure we initialize in after_lock_init and then update via calculate_capacity_consumed (both in task_manager.py) Also update fit_task_to_instance to consider control impact on instances Trust that these functions do the right thing looking for a node with capacity, and cut out redundant check for the whole group's capacity per Alan's reccomendation. * Refactor now redundant code Deal with control type tasks before we loop over the preferred instance groups, which cuts out the need for some redundant logic. Also, fix a bug where I was missing assigning the execution node in one case! * set job explanation on tasks that need capacity move the job explanation for jobs that need capacity to a function so we can re-use it in the three places we need it. * project updates always run on the controlplane Instance group ordering makes no sense on project updates because they always need to run on the control plane. Also, since hybrid nodes should always run the control processes for the jobs running on them as execution nodes, account for this when looking for a execution node. * fix misleading message the variables and wording were both misleading, fix to be more accurate description in the two different cases where this log may be emitted. * use settings correctly use settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME instead of a hardcoded name cache the controlplane_ig object during the after lock init to avoid an uneccesary query eliminate mistakenly duplicated AWX_CONTROL_PLANE_TASK_IMPACT and use only AWX_CONTROL_NODE_TASK_IMPACT * add test for control capacity consumption add test to verify that when there are 2 jobs and only capacity for one that one will move into waiting and the other stays in pending * add test for hybrid node capacity consumption assert that the hybrid node is used for both control and execution and capacity is deducted correctly * add test for task.capacity_type = control Test that control type tasks have the right capacity consumed and get assigned to the right instance group Also fix lint in the tests * jobs_running not accurate for control nodes We can either NOT use "idle instances" for control nodes, or we need to update the jobs_running property on the Instance model to count jobs where the node is the controller_node. I didn't do that because it may be an expensive query, and it would be hard to make it match with jobs_running on the InstanceGroup which filters on tasks assigned to the instance group. This change chooses to stop considering "idle" control nodes an option, since we can't acurrately identify them. The way things are without any change, is we are continuing to over consume capacity on control nodes because this method sees all control nodes as "idle" at the beginning of the task manager run, and then only counts jobs started in that run in the in-memory tracking. So jobs which last over a number of task manager runs build up consuming capacity, which is accurately reported via Instance.consumed_capacity * Reduce default task impact for control nodes This is something we can experiment with as far as what users want at install time, but start with just 1 for now. * update capacity docs Describe usage of the new setting and the concept of control impact. Co-authored-by: Alan Rominger <arominge@redhat.com> Co-authored-by: Rebeccah <rhunter@redhat.com>
awx
12
Python
70
task_manager.py
def task_needs_capacity(self, task, tasks_to_update_job_explanation): task.log_lifecycle("needs_capacity") job_explanation = gettext_noop("This job is not ready to start because there is not enough available capacity.") if task.job_explanation != job_explanation: if task.created < (tz_now() - self.time_delta_job_explanation): # Many launched jobs are immediately blocked, but most blocks will resolve in a few seconds. # Therefore we should only update the job_explanation after some time has elapsed to # prevent excessive task saves. task.job_explanation = job_explanation tasks_to_update_job_explanation.append(task) logger.debug("{} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
604cbc17376620dc67df35386421835d43732a4e
67
https://github.com/ansible/awx.git
193
def task_needs_capacity(self, task, tasks_to_update_job_explanation): task.log_lifecycle("needs_capacity") job_explanation = gettext_noop("This job is not ready to start because there is not enough available capacity.") if task.job_explanation != job_explanation: if task.created < (tz_now() - self.time_delta_job_explanation): # Many launched jobs are immediately blocked, but most blocks will resolve in a few seconds. # Therefore we should only update the job_explanation after some time has elapsed to # prev
15
116
task_needs_capacity
36
0
1
11
tests/sentry/notifications/utils/test_participants.py
88,115
ref(hybrid-cloud): Add user services. Start tagging some model tests as stable (#40614) Notifications uses new hybrid cloud APIUser Co-authored-by: Mike Ihbe <mike.ihbe@sentry.io> Co-authored-by: Zachary Collins <zachary.collins@sentry.io> Co-authored-by: Zach Collins <recursive.cookie.jar@gmail.com>
sentry
11
Python
26
test_participants.py
def test_other_org_user(self): org_2 = self.create_organization() user_2 = self.create_user() team_2 = self.create_team(org_2, members=[user_2]) team_3 = self.create_team(org_2, members=[user_2]) project_2 = self.create_project(organization=org_2, teams=[team_2, team_3]) assert self.get_send_to_member(project_2, user_2.id) == { ExternalProviders.EMAIL: {user_service.serialize_user(user_2)}, ExternalProviders.SLACK: {user_service.serialize_user(user_2)}, } assert self.get_send_to_member(self.project, user_2.id) == {}
b38f59d9f6d9eedd7ce0606805df7c072addb000
121
https://github.com/getsentry/sentry.git
113
def test_other_org_user(self): org_2 = self.create_organizatio
22
184
test_other_org_user
16
0
1
11
tests/rest/admin/test_device.py
249,234
Use literals in place of `HTTPStatus` constants in tests (#13479) Replace - `HTTPStatus.NOT_FOUND` - `HTTPStatus.FORBIDDEN` - `HTTPStatus.UNAUTHORIZED` - `HTTPStatus.CONFLICT` - `HTTPStatus.CREATED` Signed-off-by: Dirk Klimpel <dirk@klimpel.org>
synapse
9
Python
16
test_device.py
def test_no_auth(self) -> None: channel = self.make_request("GET", self.url, b"{}") self.assertEqual( 401, channel.code, msg=channel.json_body, ) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
1595052b2681fb86c1c1b9a6028c1bc0d38a2e4b
55
https://github.com/matrix-org/synapse.git
84
def test_no_auth(self) -> None: channel = self.make_request("GET", self.url, b"{}") self.assertEqual( 401, channel.code, msg=channel.json_body, ) s
11
88
test_no_auth
22
0
1
10
tests/unit/serve/gateway/test_gateway.py
13,189
feat: allow passing custom gateway in Flow (#5189)
jina
10
Python
19
test_gateway.py
def _start_gateway_runtime(uses, uses_with, worker_port): port = random_port() p = multiprocessing.Process( target=_create_gateway_runtime, args=(port, uses, uses_with, worker_port), daemon=True, ) p.start() time.sleep(1) return port, p
cdaf7f87ececf9e13b517379ca183b17f0d7b007
56
https://github.com/jina-ai/jina.git
60
def _start_gateway_runtime(uses, uses_with, worker_port): port = random_port() p = multiprocessing.Process(
16
83
_start_gateway_runtime
41
0
1
8
pandas/tests/plotting/frame/test_frame_subplots.py
165,192
TST: Don't mark all plotting tests as slow (#46003)
pandas
11
Python
34
test_frame_subplots.py
def test_bar_barwidth_position_int(self, w): # GH 12979 df = DataFrame(np.random.randn(5, 5)) ax = df.plot.bar(stacked=True, width=w) ticks = ax.xaxis.get_ticklocs() tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4])) assert ax.get_xlim() == (-0.75, 4.75) # check left-edge of bars assert ax.patches[0].get_x() == -0.5 assert ax.patches[-1].get_x() == 3.5
63616a622186068e487b3fd5304022c27f6aa6db
119
https://github.com/pandas-dev/pandas.git
103
def test_bar_barwidth_position_int(self, w): # GH 12979 df = DataFrame(np.random.randn(5, 5)) ax = df.plot.bar(stacked=True, width=w) ticks = ax.xaxis.get_ticklocs() tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4])) assert ax.get_xlim() == (-0.75, 4.75) # check left-edge of bars assert ax.patches[0].get_x() == -0.5 assert ax.patches[-1].get_x() == 3.5
22
170
test_bar_barwidth_position_int
43
0
2
9
pandas/tests/indexes/test_base.py
163,593
BUG: do not replace all nulls with "NaN"-string in Series index (#45283)
pandas
10
Python
35
test_base.py
def test_format_missing(self, vals, nulls_fixture): # 2845 vals = list(vals) # Copy for each iteration vals.append(nulls_fixture) index = Index(vals) formatted = index.format() null_repr = "NaN" if isinstance(nulls_fixture, float) else str(nulls_fixture) expected = [str(index[0]), str(index[1]), str(index[2]), null_repr] assert formatted == expected assert index[3] is nulls_fixture
b8cce91ee7bcc86877d4679cd8a9454b5995c2c6
89
https://github.com/pandas-dev/pandas.git
106
def test_format_missing(self, vals, nulls_fixture): # 2845 vals = list(vals) # Copy for each iteration vals.append(nulls_fixture)
15
138
test_format_missing
129
0
8
43
src/PIL/Jpeg2KImagePlugin.py
243,790
Improve exception traceback readability
Pillow
15
Python
74
Jpeg2KImagePlugin.py
def _open(self): sig = self.fp.read(4) if sig == b"\xff\x4f\xff\x51": self.codec = "j2k" self._size, self.mode = _parse_codestream(self.fp) else: sig = sig + self.fp.read(8) if sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a": self.codec = "jp2" header = _parse_jp2_header(self.fp) self._size, self.mode, self.custom_mimetype, dpi = header if dpi is not None: self.info["dpi"] = dpi else: msg = "not a JPEG 2000 file" raise SyntaxError(msg) if self.size is None or self.mode is None: msg = "unable to determine size/mode" raise SyntaxError(msg) self._reduce = 0 self.layers = 0 fd = -1 length = -1 try: fd = self.fp.fileno() length = os.fstat(fd).st_size except Exception: fd = -1 try: pos = self.fp.tell() self.fp.seek(0, io.SEEK_END) length = self.fp.tell() self.fp.seek(pos) except Exception: length = -1 self.tile = [ ( "jpeg2k", (0, 0) + self.size, 0, (self.codec, self._reduce, self.layers, fd, length), ) ]
2ae55ccbdad9c842929fb238ea1eb81d1f999024
266
https://github.com/python-pillow/Pillow.git
611
def _open(self): sig = self.fp.read(4) if sig == b"\xff\x4f\xff\x51": self.codec = "j2k" self._size, self.mode = _parse_codestream(self.fp) else: sig = sig + self.fp.read(8) if sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a": self.codec = "jp2" header = _parse_jp2_header(self.fp) self._size, self.mode, self.custom_mimetype, dpi = header if dpi is not None: self.info["dpi"] = dpi else: msg = "not a JPEG 2000 file" raise SyntaxError(msg) if self.size is None or self.mode is None: msg = "unable to determine size/mode" raise S
32
441
_open
43
0
3
17
zerver/tests/test_message_topics.py
84,089
tests: Refactor away result.json() calls with helpers. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
zulip
13
Python
37
test_message_topics.py
def test_get_topics_web_public_stream_web_public_request(self) -> None: iago = self.example_user("iago") stream = self.make_stream("web-public-stream", is_web_public=True) self.subscribe(iago, stream.name) for i in range(3): self.send_stream_message(iago, stream.name, topic_name="topic" + str(i)) endpoint = f"/json/users/me/{stream.id}/topics" result = self.client_get(endpoint) history = self.assert_json_success(result)["topics"] self.assertEqual( [topic["name"] for topic in history], [ "topic2", "topic1", "topic0", ], )
a142fbff85302c5e3acb2e204eca2e9c75dbc74b
112
https://github.com/zulip/zulip.git
194
def test_get_topics_web_public_stream_web_public_request(self) -> None: iago = self.example_user("iago") stream = self.make_stream("web-public-stream", is_web_public=True) self.subscribe(iago, stream.name) for i in range(3): self.send_stream_message(iago,
22
191
test_get_topics_web_public_stream_web_public_request
21
1
1
3
ludwig/datasets/kaggle.py
8,067
Config-first Datasets API (ludwig.datasets refactor) (#2479) * Adds README and stub for reading dataset configs. * Adds __init__.py for configs, moves circular import into function scope in ludwig/datasets/__init__.py * Print config files in datasets folder. * First pass at automatic archive extraction. * Implemented downloading and extract. * Refactor DatasetConfig into its own file. * Fixed bugs downloading kaggle dataset. * Makes registry store dataset instances, not classes. Also comments out import_submodules for testing. * Typo fix. * Only pass data files on to load_unprocessed_dataframe, symlink directories. * Downloading dataset files into existing directory if exists. * Refactor: make datasets fully config-first, lazy load dataset loaders. * Implemented agnews custom loader. * Implements train/validation/test split by files, and globbing support * Adds _glob_multiple * Adds adult_census_income, agnews, allstate_claims_severity. * Implements sha256 verification, adds more datasets up to creditcard_fraud. * Adds checksums, dbpedia, electricity * Fixes gzip file name returned as string not list, adds up to forest_cover dataset. * Adds datasets up to reuters_r8 * Adds all datasets which don't require a custom class. * Restore dataset import behavior by implementing module __getattr__ * Adds KDD datasets. * Adds ieee_fraud. * Adds imbalanced_insurance, insurance_lite. * Adds mnist. * Completes implementation of all of the built-in datasets. * Made cache_dir optional, read from environment variable if set. * Upgrades datasets tests. * Adds test for new dataset config API. Also adds scripts for dataset link checking. * Fixes loading allstate claims severity dataset. * Use @lru_cache(1), @cache not supported in python < 3.9 * Deletes dataset registry, updates automl test utils * Fix imports of datasets API. * Adds more detail to sha256: docstring and basic README * Copy-paste link oops. * Fixes handling of nested archive types like .tar.bz Also adds a LUDWIG_CACHE and export to the README * Adds link for twitter bots. * Fix order of splits in README.md * typo * Adds verify as a phase in doc string. * Support .pqt, .pq extensions for parquet. * Handle nested archives with longer file extensions like .csv.zip * Handle nested .gz types properly too. Check all extensions with .endswith * Handle all archive types with .endswith * Update ludwig/datasets/loaders/split_loaders.py Co-authored-by: Joppe Geluykens <joppe@rvrie.com> * Adds explanation for export, fixes preserve_paths (should be relative to processed_dataset_dir) * Resolve preserved paths relative to raw dataset dir before move. * Catch runtime exception from extracting sub-archives. Co-authored-by: Daniel Treiman <daniel@predibase.com> Co-authored-by: Joppe Geluykens <joppe@rvrie.com>
ludwig
6
Python
16
kaggle.py
def create_kaggle_client(): # Need to import here to prevent Kaggle from authenticating on import from kaggle import api return api @contextmanager
e4fc06f986e03919d9aef3ab55c05fee5a6b9d3a
@contextmanager
10
https://github.com/ludwig-ai/ludwig.git
28
def create_kaggle_client(): # Need
4
23
create_kaggle_client
295
0
6
49
python/ccxt/async_support/huobi.py
15,000
1.66.21 [ci skip]
ccxt
15
Python
174
huobi.py
def parse_trade(self, trade, market=None): # # spot fetchTrades(public) # # { # "amount": 0.010411000000000000, # "trade-id": 102090736910, # "ts": 1583497692182, # "id": 10500517034273194594947, # "price": 9096.050000000000000000, # "direction": "sell" # } # # spot fetchMyTrades(private) # # { # 'symbol': 'swftcbtc', # 'fee-currency': 'swftc', # 'filled-fees': '0', # 'source': 'spot-api', # 'id': 83789509854000, # 'type': 'buy-limit', # 'order-id': 83711103204909, # 'filled-points': '0.005826843283532154', # 'fee-deduct-currency': 'ht', # 'filled-amount': '45941.53', # 'price': '0.0000001401', # 'created-at': 1597933260729, # 'match-id': 100087455560, # 'role': 'maker', # 'trade-id': 100050305348 # } # # linear swap isolated margin fetchOrder details # # { # "trade_id": 131560927, # "trade_price": 13059.800000000000000000, # "trade_volume": 1.000000000000000000, # "trade_turnover": 13.059800000000000000, # "trade_fee": -0.005223920000000000, # "created_at": 1603703614715, # "role": "taker", # "fee_asset": "USDT", # "profit": 0, # "real_profit": 0, # "id": "131560927-770334322963152896-1" # } # marketId = self.safe_string(trade, 'symbol') market = self.safe_market(marketId, market) symbol = market['symbol'] timestamp = self.safe_integer_2(trade, 'ts', 'created-at') timestamp = self.safe_integer(trade, 'created_at', timestamp) order = self.safe_string(trade, 'order-id') side = self.safe_string(trade, 'direction') type = self.safe_string(trade, 'type') if type is not None: typeParts = type.split('-') side = typeParts[0] type = typeParts[1] takerOrMaker = self.safe_string(trade, 'role') priceString = self.safe_string_2(trade, 'price', 'trade_price') amountString = self.safe_string_2(trade, 'filled-amount', 'amount') amountString = self.safe_string(trade, 'trade_volume', amountString) costString = self.safe_string(trade, 'trade_turnover') fee = None feeCost = self.safe_string_2(trade, 'filled-fees', 'trade_fee') feeCurrencyId = self.safe_string_2(trade, 'fee-currency', 'fee_asset') feeCurrency = self.safe_currency_code(feeCurrencyId) filledPoints = self.safe_string(trade, 'filled-points') if filledPoints is not None: if (feeCost is None) or Precise.string_equals(feeCost, '0'): feeCost = filledPoints feeCurrency = self.safe_currency_code(self.safe_string(trade, 'fee-deduct-currency')) if feeCost is not None: fee = { 'cost': feeCost, 'currency': feeCurrency, } tradeId = self.safe_string_2(trade, 'trade-id', 'tradeId') id = self.safe_string_2(trade, 'trade_id', 'id', tradeId) return self.safe_trade({ 'id': id, 'info': trade, 'order': order, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'symbol': symbol, 'type': type, 'side': side, 'takerOrMaker': takerOrMaker, 'price': priceString, 'amount': amountString, 'cost': costString, 'fee': fee, }, market)
dbd6d1c306421a24581647dd50f82f3e11dadf4e
369
https://github.com/ccxt/ccxt.git
1,369
def parse_trade(self, trade, market=None): # # spot fetchTrades(public) # # { # "amount": 0.010411000000000000, # "trade-id": 102090736910, # "ts": 1583497692182, # "id": 10500517034273194594947, # "price": 9096.050000000000000000, # "direction": "sell" # } # # spot fetchMyTrades(private) # # { # 'symbol': 'swftcbtc', # 'fee-currency': 'swftc', # 'filled-fees': '0', # 'source': 'spot-api', # 'id': 83789509854000, # 'type': 'buy-limit', # 'order-id': 83711103204909, # 'filled-points': '0.005826843283532154', # 'fee-deduct-currency': 'ht', # 'filled-amount': '45941.53',
33
665
parse_trade
200
0
11
46
pandas/tests/base/test_value_counts.py
168,733
TST: Filter/test pyarrow PerformanceWarnings (#48093)
pandas
16
Python
103
test_value_counts.py
def test_value_counts_null(null_obj, index_or_series_obj): orig = index_or_series_obj obj = orig.copy() if not allow_na_ops(obj): pytest.skip("type doesn't allow for NA operations") elif len(obj) < 1: pytest.skip("Test doesn't make sense on empty data") elif isinstance(orig, pd.MultiIndex): pytest.skip(f"MultiIndex can't hold '{null_obj}'") values = obj._values values[0:2] = null_obj klass = type(obj) repeated_values = np.repeat(values, range(1, len(values) + 1)) obj = klass(repeated_values, dtype=obj.dtype) # because np.nan == np.nan is False, but None == None is True # np.nan would be duplicated, whereas None wouldn't counter = collections.Counter(obj.dropna()) expected = Series(dict(counter.most_common()), dtype=np.int64) expected.index = expected.index.astype(obj.dtype) result = obj.value_counts() if obj.duplicated().any(): # TODO(GH#32514): # Order of entries with the same count is inconsistent on CI (gh-32449) with tm.maybe_produces_warning( PerformanceWarning, pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow", ): expected = expected.sort_index() with tm.maybe_produces_warning( PerformanceWarning, pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow", ): result = result.sort_index() if not isinstance(result.dtype, np.dtype): # i.e IntegerDtype expected = expected.astype("Int64") tm.assert_series_equal(result, expected) expected[null_obj] = 3 result = obj.value_counts(dropna=False) if obj.duplicated().any(): # TODO(GH#32514): # Order of entries with the same count is inconsistent on CI (gh-32449) with tm.maybe_produces_warning( PerformanceWarning, pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow", ): expected = expected.sort_index() with tm.maybe_produces_warning( PerformanceWarning, pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow", ): result = result.sort_index() tm.assert_series_equal(result, expected)
786c28fe929ed65298bfc723aa1cdbe49a68ae0c
363
https://github.com/pandas-dev/pandas.git
521
def test_value_counts_null(null_obj, index_or_series_obj): orig = index_or_series_obj obj = orig.copy() if not allow_na_ops(obj): pytest.skip("type doesn't allow for NA operations") elif len(obj) < 1: pytest.skip("Test doesn't make sense on empty data") elif isinstance(orig, pd.MultiIndex): pytest.skip(f"MultiIndex can't hold '{null_obj}'") values = obj._values values[0:2] = null_obj klass = type(obj) repeated_values = np.repeat(values, range(1, len(values) + 1)) obj = klass(repeated_values, dtype=obj.dtype) # because np.nan == np.nan is False, but None == None is True # np.nan would be duplicated, whereas None wouldn't counter = collections.Counter(obj.dropna()) expected = Series(dict(counter.most_common()), dtype=np.int64) expected.index = expected.index.astype(obj.dtype) result = obj.value_counts() if obj.duplicated().any(): # TODO(GH#32514): # Order of entries with the same count is inconsistent on CI (gh-32449) with tm.maybe_produces_warning( PerformanceWarning, pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow", ): expected = expected.sort_index() with tm.maybe_produces_warning( PerformanceWarning, pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow",
44
615
test_value_counts_null
7
0
1
4
wagtail/admin/tests/test_page_chooser.py
72,067
Reformat with black
wagtail
12
Python
7
test_page_chooser.py
def test_type_missing(self): self.assertEqual( self.get_best_root({"page_type": "tests.BusinessIndex"}), self.tree_root )
d10f15e55806c6944827d801cd9c2d53f5da4186
25
https://github.com/wagtail/wagtail.git
39
def test_type_missing(self): self.assertEqual( self.get_best_root({"page_type": "tests.B
5
46
test_type_missing
58
0
5
17
erpnext/manufacturing/report/work_order_consumed_materials/work_order_consumed_materials.py
69,348
feat: provision to return non consumed components against the work order
erpnext
13
Python
41
work_order_consumed_materials.py
def get_returned_materials(work_orders): raw_materials_qty = defaultdict(float) raw_materials = frappe.get_all( "Stock Entry", fields=["`tabStock Entry Detail`.`item_code`", "`tabStock Entry Detail`.`qty`"], filters=[ ["Stock Entry", "is_return", "=", 1], ["Stock Entry Detail", "docstatus", "=", 1], ["Stock Entry", "work_order", "in", [d.name for d in work_orders]], ], ) for d in raw_materials: raw_materials_qty[d.item_code] += d.qty for row in work_orders: row.returned_qty = 0.0 if raw_materials_qty.get(row.raw_material_item_code): row.returned_qty = raw_materials_qty.get(row.raw_material_item_code)
d59ed24e6ca2a1ff62963c282882a2d52691b7c6
120
https://github.com/frappe/erpnext.git
41
def get_returned_materials(work_orders): raw_materials_qty = defaultdict(float) raw_materials = frappe.get_all( "Stock Entry", fields=["`tabStock Entry Detail`.`item_code`", "`tabStock Entry Detail`.`qty`"], filters=[ ["Stock Entry", "is_return", "=", 1], ["Stock Entry Detail", "docstatus", "=", 1], ["Stock Entry", "work_order", "in", [d.name for d in work_orders]], ], ) for d in raw_materials: raw_materials_qty[d.item_code] += d.qty for row in work_orders: row.returned_qty = 0.0 if raw_materials_qty.get(row.raw_material_item_code): row.returned_qty = raw_materials_qty.get(row.raw_material_item_code)
18
190
get_returned_materials
61
0
1
26
tests/integration_tests/utils.py
6,229
Removes/renames some references to legacy config keys (#1775) * regularizer settings no longer supported for modules. * s/fc_size/output_size * Removes regularize parameter. Co-authored-by: Daniel Treiman <daniel@predibase.com>
ludwig
13
Python
50
utils.py
def audio_feature(folder, **kwargs): feature = { "name": "audio_" + random_string(), "type": "audio", "preprocessing": { "audio_feature": { "type": "fbank", "window_length_in_s": 0.04, "window_shift_in_s": 0.02, "num_filter_bands": 80, }, "audio_file_length_limit_in_s": 3.0, }, "encoder": "stacked_cnn", "should_embed": False, "conv_layers": [ {"filter_size": 400, "pool_size": 16, "num_filters": 32}, {"filter_size": 40, "pool_size": 10, "num_filters": 64}, ], "output_size": 16, "destination_folder": folder, } feature.update(kwargs) feature[COLUMN] = feature[NAME] feature[PROC_COLUMN] = compute_feature_hash(feature) return feature
b77b6ca0afa3439103ab164d80be61652bee21dc
135
https://github.com/ludwig-ai/ludwig.git
263
def audio_feature(folder, **kwargs): feature = { "name": "audio_" + random_string(), "type": "audio", "preprocessing": { "audio_feature": { "type": "fbank", "window_length_in_s": 0.04, "window_shift_in_s": 0.02, "num_filter_bands": 80, }, "audio_file_length_limit_in_s": 3.0, }, "encoder": "s
10
227
audio_feature
36
0
1
5
reconstruction/ostec/external/stylegan2/training/misc.py
9,518
initialize ostec
insightface
13
Python
33
misc.py
def parse_config_for_previous_run(run_dir): with open(os.path.join(run_dir, 'submit_config.pkl'), 'rb') as f: data = pickle.load(f) data = data.get('run_func_kwargs', {}) return dict(train=data, dataset=data.get('dataset_args', {})) #---------------------------------------------------------------------------- # Size and contents of the image snapshot grids that are exported # periodically during training.
7375ee364e0df2a417f92593e09557f1b2a3575a
62
https://github.com/deepinsight/insightface.git
48
def parse_config_for_previous_run(run_dir): with open(os.path.join(run_dir, 'submit_config.pkl'),
14
109
parse_config_for_previous_run
50
0
1
16
tests/core/training/test_story_conflict.py
159,133
Update dependencies in 3.0 to align with rasa-sdk (#10667) * align dependencies * use black 21.7b0 * apply black and docstring reformatting * add changelog
rasa
14
Python
31
test_story_conflict.py
async def test_get_previous_event(): assert _get_previous_event( {PREVIOUS_ACTION: {"action_name": "utter_greet"}, USER: {"intent": "greet"}} ) == ("action", "utter_greet") assert _get_previous_event( {PREVIOUS_ACTION: {"action_text": "this is a test"}, USER: {"intent": "greet"}} ) == ("bot utterance", "this is a test") assert ( _get_previous_event( { PREVIOUS_ACTION: {"action_name": ACTION_LISTEN_NAME}, USER: {"intent": "greet"}, } ) == ("intent", "greet") )
36eb9c9a5fcca2160e54a6cde5076c93db5bd70b
88
https://github.com/RasaHQ/rasa.git
154
async def test_get_previous_event(): assert _get_previous_event( {PREVIOUS_ACTION: {"action_name": "utter_greet"}, USER: {"intent": "greet"}} ) == ("action", "utter_greet") assert _get_previous_event( {PREVIOUS_ACTION: {"action_text": "this is a test"}, USER: {"intent": "greet"}} ) == ("bot utterance", "this is a test"
5
164
test_get_previous_event
82
1
1
11
tests/ludwig/backend/test_ray.py
8,296
Allow explicitly plumbing through nics (#2605)
ludwig
13
Python
52
test_ray.py
def test_get_trainer_kwargs(trainer_config, cluster_resources, num_nodes, expected_kwargs): with patch("ludwig.backend.ray.ray.cluster_resources", return_value=cluster_resources): with patch("ludwig.backend.ray._num_nodes", return_value=num_nodes): trainer_config_copy = copy.deepcopy(trainer_config) actual_kwargs = get_trainer_kwargs(**trainer_config_copy) # Function should not modify the original input assert trainer_config_copy == trainer_config actual_backend = actual_kwargs.pop("backend") expected_backend = expected_kwargs.pop("backend") assert type(actual_backend) == type(expected_backend) assert actual_backend.nics == expected_backend.nics assert actual_kwargs == expected_kwargs @pytest.mark.parametrize( "trainer_kwargs,current_env_value,expected_env_value", [ ({"use_gpu": False, "num_workers": 2}, None, "1"), ({"use_gpu": False, "num_workers": 1}, None, None), ({"use_gpu": True, "num_workers": 2}, None, None), ({"use_gpu": True, "num_workers": 2}, "1", "1"), ({"use_gpu": True, "num_workers": 2}, "", ""), ], )
c99cab3a674e31885e5608a4aed73a64b1901c55
@pytest.mark.parametrize( "trainer_kwargs,current_env_value,expected_env_value", [ ({"use_gpu": False, "num_workers": 2}, None, "1"), ({"use_gpu": False, "num_workers": 1}, None, None), ({"use_gpu": True, "num_workers": 2}, None, None), ({"use_gpu": True, "num_workers": 2}, "1", "1"), ({"use_gpu": True, "num_workers": 2}, "", ""), ], )
88
https://github.com/ludwig-ai/ludwig.git
232
def test_get_trainer_kwargs(trainer_config, cluster_resources, num_nodes, expected_kwargs): with patch("ludwig.backend.ray.ray.cluster_resources", return_value=cluster_resources): with patch("ludwig.backend.ray._num_nodes", return_value=num_nodes): trainer_config_copy = copy.deepcopy(trainer_config) actual_kwargs = get_trainer_kwargs(**trainer_config_copy) # Function should not modify the original input assert trainer_config_copy == trainer_config actual_backend = actual_kwargs.pop("backend") expected_backend = expected_kwargs.pop("backend") assert type(actual_backend) == type(expected_backend) assert actual_backend.nics == expected_backend.nics assert
20
301
test_get_trainer_kwargs
28
0
1
9
test/pipelines/test_pipeline.py
257,547
Fix YAML validation for `ElasticsearchDocumentStore.custom_query` (#2789) * Add exception for in the validation code * Update Documentation & Code Style * Add tests * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
haystack
17
Python
26
test_pipeline.py
def test_validate_pipeline_config_component_with_json_input_invalid_value(): with pytest.raises(PipelineConfigError, match="does not contain valid JSON"): validate_config_strings( { "components": [ {"name": "test", "type": "test", "params": {"custom_query": "this is surely not JSON! :)"}} ] } )
4d2a06989db0b8bff5570624b13c734dfc1e3d68
42
https://github.com/deepset-ai/haystack.git
115
def test_validate_pipeline_config_component_with_json_input_invalid_value(): with pytest.raises(PipelineConfigError, match="does not contain valid JSON"): validate_config_strings( { "components": [ {"name": "test", "type": "test", "params": {"custom_query": "this is surely not JSON! :)"}} ] }
6
84
test_validate_pipeline_config_component_with_json_input_invalid_value
26
0
2
7
lib/gui/display_command.py
101,907
Typing - lib.gui.display_command
faceswap
14
Python
25
display_command.py
def display_item_set(self) -> None: logger.trace("Loading latest preview") # type:ignore size = 256 if self.command == "convert" else 128 get_images().load_latest_preview(thumbnail_size=int(size * get_config().scaling_factor), frame_dims=(self.winfo_width(), self.winfo_height())) self.display_item = get_images().previewoutput
dab823a3eb7a5257cb1e0818ee10ed234d3de97f
69
https://github.com/deepfakes/faceswap.git
102
def display_item_set(self) -> None: logger.trace("Loadin
17
118
display_item_set
19
1
1
3
python/ray/data/tests/conftest.py
135,515
[AIR] Add `batch_size` arg for `BatchMapper`. (#29193) The default batch_size of 4096 at the Datasets level doesn't suffice for all use cases: it can be too large for wide tables and large images, leading to DRAM/GRAM OOms; it can be too small for narrow tables, leading to unnecessary batch slicing overhead and suboptimal vectorized operations in their UDFs. We should allow users to configure the batch_size at the AIR level. Closes #29168 Signed-off-by: Amog Kamsetty <amogkamsetty@yahoo.com> Signed-off-by: Amog Kamsetty <amogkam@users.noreply.github.com> Co-authored-by: Amog Kamsetty <amogkamsetty@yahoo.com> Co-authored-by: Amog Kamsetty <amogkam@users.noreply.github.com>
ray
11
Python
18
conftest.py
def ds_pandas_list_multi_column_format(): in_df = pd.DataFrame({"column_1": [1], "column_2": [1]}) yield ray.data.from_pandas([in_df] * 4) # ===== Arrow dataset formats ===== @pytest.fixture(scope="function")
28a295968b445635efd1105b900cc624312fc49e
@pytest.fixture(scope="function")
37
https://github.com/ray-project/ray.git
22
def ds_pandas_list_multi_column_format(): in_df = pd.DataFrame
10
81
ds_pandas_list_multi_column_format
107
0
6
25
mmdet/models/detectors/base.py
244,474
Modify RetinaNet model interface
mmdetection
17
Python
73
base.py
def preprocss_aug_testing_data(self, data): num_augs = len(data[0]['img']) batch_size = len(data) aug_batch_imgs = [] aug_batch_data_samples = [] # adjust `images` and `data_samples` to a list of list # outer list is test-time augmentation and inter list # is batch dimension for aug_index in range(num_augs): batch_imgs = [] batch_data_samples = [] for batch_index in range(batch_size): single_img = data[batch_index]['img'][aug_index] # to gpu and normalize single_img = single_img.to(self.device) if self.preprocess_cfg is None: # YOLOX does not need preprocess_cfg single_img = single_img.float() else: if self.to_rgb and single_img[0].size(0) == 3: single_img = single_img[[2, 1, 0], ...] single_img = (single_img - self.pixel_mean) / self.pixel_std batch_imgs.append(single_img) batch_data_samples.append( data[batch_index]['data_sample'][aug_index]) aug_batch_imgs.append( stack_batch(batch_imgs, self.pad_size_divisor, self.pad_value)) aug_batch_data_samples.append(batch_data_samples) return aug_batch_imgs, aug_batch_data_samples
924c381a78eb70cede198e042ef34e038e05c15a
188
https://github.com/open-mmlab/mmdetection.git
503
def preprocss_aug_testing_data(self, data): num_augs = len(data[0]['img']) batch_size = len(data) aug_batch_imgs = [] aug_batch_data_samples = [] # adjust `images` and `data_samples` to a list of list # outer list is test-time augmentation and inter list # is batch dimension for aug_index in range(num_augs): batch_imgs = [] batch_data_samples = [] for batch_index in range(batch_size): single_img = data[batch_index]['img'][aug_index] # to gpu and normalize single_img = single_img.to(self.device) if self.preprocess_cfg is None: # YOLOX does not need preprocess_cfg single_img = single_img.float() else: if self.to_rgb and single_img[0].size(0) == 3: single_img = single_img[[2, 1, 0], ..
26
303
preprocss_aug_testing_data
205
0
14
24
sympy/polys/numberfields/minpoly.py
196,836
Moved definition of illegal
sympy
18
Python
144
minpoly.py
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5): if isinstance(factors[0], tuple): factors = [f[0] for f in factors] if len(factors) == 1: return factors[0] prec1 = 10 points = {} symbols = dom.symbols if hasattr(dom, 'symbols') else [] while prec1 <= prec: # when dealing with non-Rational numbers we usually evaluate # with `subs` argument but we only need a ballpark evaluation xv = {x:v if not v.is_number else v.n(prec1)} fe = [f.as_expr().xreplace(xv) for f in factors] # assign integers [0, n) to symbols (if any) for n in subsets(range(bound), k=len(symbols), repetition=True): for s, i in zip(symbols, n): points[s] = i # evaluate the expression at these points candidates = [(abs(f.subs(points).n(prec1)), i) for i,f in enumerate(fe)] # if we get invalid numbers (e.g. from division by zero) # we try again if any(i in _illegal for i, _ in candidates): continue # find the smallest two -- if they differ significantly # then we assume we have found the factor that becomes # 0 when v is substituted into it can = sorted(candidates) (a, ix), (b, _) = can[:2] if b > a * 10**6: # XXX what to use? return factors[ix] prec1 *= 2 raise NotImplementedError("multiple candidates for the minimal polynomial of %s" % v)
117f9554466e08aa4178137ad65fae1f2d49b340
256
https://github.com/sympy/sympy.git
485
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5): if isinstance(factors[0], tuple): factors = [f[0] for f in factors] if len(factors) == 1: return factors[0] prec1 = 10 points = {} symbols = dom.symbols if hasattr(dom, 'symbols') else [] while prec1 <= prec: # when dealing with non-Rational numbers we usually evaluate # with `subs` argument but we only need a ballpark evaluation xv = {x:v if not v.is_number else v.n(prec1)} fe = [f.as_expr().xreplace(xv) for f in factors] # assign integers [0, n) to symbols (if any) for n in subsets(range(bound), k=len(symbols), repetition=True): for s, i in zip(symbols, n): points[s] = i # evaluate the expression at these points candidates = [
42
397
_choose_factor
29
0
4
13
python/ray/serve/controller.py
128,244
[Serve] add alpha gRPC support (#28175)
ray
15
Python
23
controller.py
def get_root_url(self): if self.http_state is None: return None http_config = self.get_http_config() if http_config.root_url == "": if SERVE_ROOT_URL_ENV_KEY in os.environ: return os.environ[SERVE_ROOT_URL_ENV_KEY] else: return ( f"http://{http_config.host}:{http_config.port}" f"{http_config.root_path}" ) return http_config.root_url
65d0c0aa48be8f9f7faae857d3ab71444997755a
56
https://github.com/ray-project/ray.git
180
def get_root_url(self): if self.http_state is None: return None http_config
12
116
get_root_url
10
0
2
3
timm/models/convnext.py
331,845
Significant model refactor and additions: * All models updated with revised foward_features / forward_head interface * Vision transformer and MLP based models consistently output sequence from forward_features (pooling or token selection considered part of 'head') * WIP param grouping interface to allow consistent grouping of parameters for layer-wise decay across all model types * Add gradient checkpointing support to a significant % of models, especially popular architectures * Formatting and interface consistency improvements across models * layer-wise LR decay impl part of optimizer factory w/ scale support in scheduler * Poolformer and Volo architectures added
pytorch-image-models
9
Python
10
convnext.py
def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable
372ad5fa0dbeb74dcec81db06e9ff69b3d5a2eb6
21
https://github.com/huggingface/pytorch-image-models.git
27
def set_grad_checkpointing(self, enable=True): for s in s
6
32
set_grad_checkpointing
26
0
1
8
pytorch_lightning/loops/fit_loop.py
241,718
Add DETAIL logs for batch use cases (#11008)
lightning
11
Python
24
fit_loop.py
def advance(self) -> None: # type: ignore[override] log.detail(f"{self.__class__.__name__}: advancing loop") assert self.trainer.train_dataloader is not None dataloader = self.trainer.strategy.process_dataloader(self.trainer.train_dataloader) data_fetcher = self.trainer._data_connector.get_profiled_dataloader(dataloader) with self.trainer.profiler.profile("run_training_epoch"): self._outputs = self.epoch_loop.run(data_fetcher)
6107ce8e0d2feaed0263c0a60fc6c031603fd9ea
76
https://github.com/Lightning-AI/lightning.git
80
def advance(self) -> None: # type: ignore[override] log.detail(f"{self.__class__.__name__}: advancing loop") assert self.trainer.train_dataloader is not None dataloader = self.trainer.strategy.process_dataloader(self.trainer.train_dataloader) data_fetcher = self.trainer._data_connector.get_profiled_dataloader(dataloader) with self.train
19
138
advance
85
0
11
21
python3.10.4/Lib/inspect.py
218,368
add python 3.10.4 for windows
XX-Net
16
Python
61
inspect.py
def cleandoc(doc): try: lines = doc.expandtabs().split('\n') except UnicodeError: return None else: # Find minimum indentation of any non-blank lines after first line. margin = sys.maxsize for line in lines[1:]: content = len(line.lstrip()) if content: indent = len(line) - content margin = min(margin, indent) # Remove indentation. if lines: lines[0] = lines[0].lstrip() if margin < sys.maxsize: for i in range(1, len(lines)): lines[i] = lines[i][margin:] # Remove any trailing or leading blank lines. while lines and not lines[-1]: lines.pop() while lines and not lines[0]: lines.pop(0) return '\n'.join(lines)
8198943edd73a363c266633e1aa5b2a9e9c9f526
156
https://github.com/XX-net/XX-Net.git
277
def cleandoc(doc): try: lines = doc.expandtabs().split('\n') except UnicodeError: return None else: # Find minimum indentation of any non-blank lines after first line. margin = sys.maxsize for line in lines[1:]: content = len(line.lstrip()) if content: indent = len(line) - content margin = min(margin, indent) # Remove indentation. if lines: lines[0] = lines[0].lstrip() if margin < sys.maxsize: for i in range(1, len(lines)): lines[i] = lines[i][margin:] # Remove any trailing or leading blank lines. while lines and not lines[-1]: lines.pop() while lines and not lines[0]: lines.pop(0)
19
260
cleandoc
27
0
1
9
test/test_utils.py
179,381
Format The Codebase - black formatting - isort formatting
gradio
14
Python
27
test_utils.py
def test_should_fail_with_distribution_not_found(self, mock_require): mock_require.side_effect = pkg_resources.DistributionNotFound() with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") version_check() self.assertEqual( str(w[-1].message), "gradio is not setup or installed properly. Unable to get version info.", )
cc0cff893f9d7d472788adc2510c123967b384fe
55
https://github.com/gradio-app/gradio.git
114
def test_should_fail_with_distribution_not_found(self, mock_require): mock_require.side_effect = pkg_resources.DistributionNotFound() with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") version_check() self.assertEqual( str(w[-1].message), "gradio is not setup or installed properly. Unable to get
15
95
test_should_fail_with_distribution_not_found
38
0
1
9
test/test_components.py
180,849
Support for iterative outputs (#2189) * Support for iterative outputs (#2162) (#2188) * added generator demo * fixed demo structure * fixes * fix failing tests due to refactor * test components * adding generators * fixes * iterative * formatting * add all * added demo * demo * formatting * fixed frontend * 3.2.1b release * removed test queue * iterative * formatting * formatting * Support for iterative outputs (#2149) * added generator demo * fixed demo structure * fixes * fix failing tests due to refactor * test components * adding generators * fixes * iterative * formatting * add all * added demo * demo * formatting * fixed frontend * 3.2.1b release * iterative * formatting * formatting * reverted queue everywhere * added queue to demos * added fake diffusion with gif * add to demos * more complex counter * fixes * image gif * fixes * version * merged * added support for state * formatting * generating animation * fix * tests, iterator * tests * formatting * tests for queuing * version * generating orange border animation * testings * added to documentation Co-authored-by: Ali Abid <aabid94@gmail.com>
gradio
12
Python
29
test_components.py
async def test_in_blocks(self): with gr.Blocks() as demo: score = gr.State() btn = gr.Button() btn.click(lambda x: x + 1, score, score) result = await demo.call_function(0, [0]) assert result["prediction"] == 1 result = await demo.call_function(0, [result["prediction"]]) assert result["prediction"] == 2
bf1510165ddd8c0d5b29adf67dfed967995e8a5b
86
https://github.com/gradio-app/gradio.git
105
async def test_in_blocks(self): with gr.Blocks() as demo: score = gr.State()
13
144
test_in_blocks
251
0
2
77
python/ccxt/stex.py
16,445
1.70.39 [ci skip]
ccxt
18
Python
145
stex.py
def fetch_markets(self, params={}): request = { 'code': 'ALL', } response = self.publicGetCurrencyPairsListCode(self.extend(request, params)) # # { # "success":true, # "data":[ # { # "id":935, # "currency_id":662, # "currency_code":"ABET", # "currency_name":"Altbet", # "market_currency_id":1, # "market_code":"BTC", # "market_name":"Bitcoin", # "min_order_amount":"0.00000010", # "min_buy_price":"0.00000001", # "min_sell_price":"0.00000001", # "buy_fee_percent":"0.20000000", # "sell_fee_percent":"0.20000000", # "active":true, # "delisted":false, # "pair_message":"", # "currency_precision":8, # "market_precision":8, # "symbol":"ABET_BTC", # "group_name":"BTC", # "group_id":1 # } # ] # } # result = [] markets = self.safe_value(response, 'data', []) for i in range(0, len(markets)): market = markets[i] id = self.safe_string(market, 'id') numericId = self.safe_integer(market, 'id') baseId = self.safe_string(market, 'currency_id') quoteId = self.safe_string(market, 'market_currency_id') baseNumericId = self.safe_integer(market, 'currency_id') quoteNumericId = self.safe_integer(market, 'market_currency_id') base = self.safe_currency_code(self.safe_string(market, 'currency_code')) quote = self.safe_currency_code(self.safe_string(market, 'market_code')) minBuyPrice = self.safe_string(market, 'min_buy_price') minSellPrice = self.safe_string(market, 'min_sell_price') minPrice = Precise.string_max(minBuyPrice, minSellPrice) buyFee = Precise.string_div(self.safe_string(market, 'buy_fee_percent'), '100') sellFee = Precise.string_div(self.safe_string(market, 'sell_fee_percent'), '100') fee = Precise.string_max(buyFee, sellFee) result.append({ 'id': id, 'numericId': numericId, 'symbol': base + '/' + quote, 'base': base, 'quote': quote, 'settle': None, 'baseId': baseId, 'quoteId': quoteId, 'settleId': None, 'baseNumericId': baseNumericId, 'quoteNumericId': quoteNumericId, 'type': 'spot', 'spot': True, 'margin': False, 'swap': False, 'future': False, 'option': False, 'active': self.safe_value(market, 'active'), 'contract': False, 'linear': None, 'inverse': None, 'taker': fee, 'maker': fee, 'contractSize': None, 'expiry': None, 'expiryDatetime': None, 'strike': None, 'optionType': None, 'precision': { 'price': self.safe_integer(market, 'market_precision'), 'amount': self.safe_integer(market, 'currency_precision'), }, 'limits': { 'leverage': { 'min': None, 'max': None, }, 'amount': { 'min': self.safe_number(market, 'min_order_amount'), 'max': None, }, 'price': { 'min': minPrice, 'max': None, }, 'cost': { 'min': None, 'max': None, }, }, 'info': market, }) return result
599367bddf0348d9491990623efcf32c1158d48f
460
https://github.com/ccxt/ccxt.git
1,945
def fetch_markets(self, params={}): request = { 'code': 'ALL', } response = self.publicGetCurrencyPairsListCode(self.extend(request, params)) # # { # "success":true, # "data":[ # { # "id":935, # "currency_id":662, # "currency_code":"ABET", # "currency_name":"Altbet", # "market_currency_id":1, # "market_code":"BTC", # "market_name":"Bitcoin", # "min_order_amount":"0.00000010", # "min_buy_price":"0.00000001", # "min_sell_price":"0.00000001", # "buy_fee_percent":"0.20000000", # "sell_fee_percent":"0.20000000", # "active":true, # "delisted":false, # "pair_message":"", # "currency_precision":8, # "market_precision":8, # "symbol":"ABET_BTC", # "group_name":"BTC", # "group_id":1 # } # ] # } # result = [] markets = self.safe_value(response, 'data', []) for i in range(0, len(markets)): market = markets[i] id = self.safe_string(market, 'id') numericId = self.safe_integer(market, 'id') baseId = self.safe_string(market, 'currency_id') quoteId = self.safe_string(market, 'market_currency_id') baseNumericId = self.safe_integer(market, 'currency_id') quoteNumericId = self.safe_integer(market, 'market_currency_id') base = self.safe_currency_code(self.safe_string(market, 'currency_code')) quote = self.safe_currency_code(self.safe_string(market, 'market_code')) minBuyPrice = self.safe_string(market, 'min_buy_price') minSellPrice = self.safe_string(market, 'min_sell_price') minPrice = Precise.string_max(minBuyPrice, minSellPrice) buyFee = Precise.string_div(self.safe_string(market, 'buy_fee_percent'), '100') sellFee = Precise.string_div(self.safe_string(market, 'sell_fee_percent'), '100') fee = Precise.string_max(buyFee, sellFee) result.append({ 'id': id, 'numericId': numericId, 'symbol': base + '/' + quote, 'base': base, 'quote': quote, 'settle': None, 'baseId': baseId, 'quoteId': quoteId, 'settleId': None, 'baseNumericId': baseNumericId, 'quoteNumericId': quoteNumericId, 'type': 'spot', 'spot': True, 'margin': False, 'swap': False, 'future': False, 'option': False, 'active': self.safe_value(market, 'active'), 'contract': False, 'linear': None, 'inverse': None, 'taker': fee, 'maker': fee, 'contractSize': None, 'expiry': None, 'expiryDatetime': None, 'strike': None, 'optionType': None, 'precision': { 'price': self.safe_integer(market, 'market_precision'), 'amount': self.safe_integer(market, 'currency_precision'), }, 'limits': { 'leverage': {
36
814
fetch_markets
19
0
1
8
pandas/core/indexes/interval.py
168,510
Revert Interval/IntervalIndex/interval_range.inclusive deprecation (#48116) * Revert "Cln tests interval wrt inclusive (#47775)" This reverts commit 2d6e0b251955d3a2c0c88f7e6ddb57b335ed09b7. * Revert "CLN: Rename private variables to inclusive (#47655)" This reverts commit 102b3ca2119df822e2b0f346fa936d0fe9f17501. * Revert "TYP: Improve typing interval inclusive (#47646)" This reverts commit 55064763e8ba55f6ff5370a8dd083767a189d7a4. * Revert "DEPR: Deprecate set_closed and add set_incluive (#47636)" This reverts commit bd4ff395cbbf4cbde1fc8f1f746cae064a401638. * Revert "DEPR: Remove deprecation from private class IntervalTree (#47637)" This reverts commit f6658ef9fdef5972214fdc338e2c6b5ee308dbf4. * Revert "Revert inclusive default change of IntervalDtype (#47367)" This reverts commit d9dd1289e07d86928d144e53beb3d5b8ab3c2215. * Revert "ENH: consistency of input args for boundaries - Interval (#46522)" This reverts commit 7e23a37e1c5bda81234801a6584563e2880769eb. * Revert "ENH: consistency of input args for boundaries - pd.interval_range (#46355)" This reverts commit 073b3535d7a5171102e5915c38b57c21d13795ae. * Fix ArrowIntervalType manually * Remove unused import * Fix doctest and leftover usage * Fix remaining tests * Fix wording in doctoring Co-authored-by: Patrick Hoefler <61934744+phofl@users.noreply.github.com>
pandas
9
Python
19
interval.py
def __reduce__(self): d = { "left": self.left, "right": self.right, "closed": self.closed, "name": self.name, } return _new_IntervalIndex, (type(self), d), None
252ae0555abf488522f947107dcdee684be6ac8a
46
https://github.com/pandas-dev/pandas.git
83
def __reduce__(self): d = { "left": self.left, "right": self
9
74
__reduce__
21
0
3
11
ludwig/serve.py
7,439
Serve json numpy encoding (#2316)
ludwig
11
Python
19
serve.py
def server(model, allowed_origins=None): middleware = [Middleware(CORSMiddleware, allow_origins=allowed_origins)] if allowed_origins else None app = FastAPI(middleware=middleware) input_features = {f[COLUMN] for f in model.config["input_features"]}
5069f19bc289592c3d57969531e56271cb0bc538
81
https://github.com/ludwig-ai/ludwig.git
29
def server(model, allowed_origins=None): middleware = [Middleware(CORSMiddleware, allow_o
13
76
server
9
0
1
2
wagtail/search/tests/test_backends.py
71,025
Fix warnings from flake8-comprehensions.
wagtail
9
Python
9
test_backends.py
def assertUnsortedListEqual(self, a, b): self.assertListEqual(sorted(a), sorted(b)) # SEARCH TESTS
de3fcba9e95818e9634ab7de6bfcb1f4221f2775
24
https://github.com/wagtail/wagtail.git
26
def assertUnsortedListEqual(self, a, b):
6
40
assertUnsortedListEqual
54
0
1
12
wagtail/admin/tests/api/test_images.py
71,301
Reformat with black
wagtail
12
Python
46
test_images.py
def test_thumbnail(self): # Add a new image with source file image = get_image_model().objects.create( title="Test image", file=get_test_image_file(), ) response = self.get_response(image.id) content = json.loads(response.content.decode("UTF-8")) self.assertIn("thumbnail", content) self.assertEqual(content["thumbnail"]["width"], 165) self.assertEqual(content["thumbnail"]["height"], 123) self.assertTrue(content["thumbnail"]["url"].startswith("/media/images/test")) # Check that source_image_error didn't appear self.assertNotIn("source_image_error", content["meta"]) # Overwrite imported test cases do Django doesn't run them TestImageDetail = None TestImageListing = None
d10f15e55806c6944827d801cd9c2d53f5da4186
115
https://github.com/wagtail/wagtail.git
149
def test_thumbnail(self): # Add a new image with source file image = get_image_model().objects.create(
23
213
test_thumbnail
71
0
5
19
python3.10.4/Lib/distutils/_msvccompiler.py
222,539
add python 3.10.4 for windows
XX-Net
16
Python
55
_msvccompiler.py
def _find_vc2017(): root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles") if not root: return None, None try: path = subprocess.check_output([ os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), "-latest", "-prerelease", "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", "-property", "installationPath", "-products", "*", ], encoding="mbcs", errors="strict").strip() except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): return None, None path = os.path.join(path, "VC", "Auxiliary", "Build") if os.path.isdir(path): return 15, path return None, None PLAT_SPEC_TO_RUNTIME = { 'x86' : 'x86', 'x86_amd64' : 'x64', 'x86_arm' : 'arm', 'x86_arm64' : 'arm64' }
8198943edd73a363c266633e1aa5b2a9e9c9f526
135
https://github.com/XX-net/XX-Net.git
206
def _find_vc2017(): root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles") if not root: return None, None try: path = subprocess.check_output([ os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), "-latest", "-prerelease", "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", "-property", "installationPath", "-products", "*", ], encoding="m
17
275
_find_vc2017
19
0
1
6
seaborn/tests/_core/test_mappings.py
40,840
Thoroughly update scaling logic and internal API
seaborn
12
Python
16
test_mappings.py
def test_categorical_multi_lookup_categorical(self): x = pd.Series(["a", "b", "c"]).astype("category") colors = color_palette(n_colors=len(x)) scale = get_default_scale(x) m = ColorSemantic().setup(x, scale) assert_series_equal(m(x), pd.Series(colors))
6f3077f12b7837106ba0a79740fbfd547628291b
67
https://github.com/mwaskom/seaborn.git
53
def test_categorical_multi_lookup_categorical(self): x = pd.Series(["a", "b", "c"]).astype("category") colors = color_palette(n_colors=len(x)) scale = get_default_scale(x) m = ColorSemantic().setup(x, scale) assert_series_equal(m
16
114
test_categorical_multi_lookup_categorical
40
0
1
12
mkdocs/tests/config/config_options_tests.py
224,742
Refactor tests for ConfigOption errors
mkdocs
10
Python
38
config_options_tests.py
def test_deprecated_option_move(self): option = config_options.Deprecated(moved_to='new') config = {'old': 'value'} option.pre_validation(config, 'old') self.assertEqual( option.warnings, [ "The configuration option 'old' has been deprecated and will be removed in a " "future release of MkDocs. Use 'new' instead." ], ) self.assertEqual(config, {'new': 'value'})
13b9c0dbd18d5a1b9705ca171a9e3b383a8e7d97
56
https://github.com/mkdocs/mkdocs.git
144
def test_deprecated_option_move(self): option = config_options.Deprecated(moved_to='new') config = {'old': 'value'} option.pre_validation(config, 'old') self.assertEqual( option.warnings, [ "The configuration option 'old' has been deprecated and will be removed in a " "future release of MkD
10
103
test_deprecated_option_move
46
0
4
20
homeassistant/components/forked_daapd/media_player.py
287,963
Add browse media to forked-daapd (#79009) * Add browse media to forked-daapd * Use elif in async_browse_image * Add tests * Add tests * Add test * Fix test
core
18
Python
36
media_player.py
async def async_turn_on(self) -> None: # restore state await self.api.set_volume(volume=self._last_volume * 100) if self._last_outputs: futures: list[asyncio.Task[int]] = [] for output in self._last_outputs: futures.append( asyncio.create_task( self.api.change_output( output["id"], selected=output["selected"], volume=output["volume"], ) ) ) await asyncio.wait(futures) else: # enable all outputs await self.api.set_enabled_outputs( [output["id"] for output in self._outputs] )
499c3410d1177eeec478af366e275a41b3e6ea60
114
https://github.com/home-assistant/core.git
347
async def async_turn_on(self) -> None: # restore state await self.api.set_volume(volume=self._last_volume * 100) if self._last_outputs: futures: list[asyncio.Task[int]] = [] for output in self._last_outputs: futures.append( asyncio.create_task( self.api.change_output( output["id"], selected=output["selected"], volume=output["volume"], ) ) ) await asyncio.w
20
188
async_turn_on
17
0
7
25
nuitka/tools/quality/Git.py
178,970
Quality: Fix formatting when adding files on Windows * These have the wrong newlines potentially, so try again in case of failure after cleaning the newlines in checkout.
Nuitka
12
Python
15
Git.py
def updateWorkingFile(path, orig_object_hash, new_object_hash): patch = check_output( ["git", "diff", "--no-color", orig_object_hash, new_object_hash] ) git_path = path.replace(os.path.sep, "/").encode("utf8")
033d29fee17fbf13a53bf89f89ca5c444ff3dd0b
166
https://github.com/Nuitka/Nuitka.git
32
def updateWorkingFile(path, orig_object_hash, new_object_hash): patch = check_output( ["git", "diff", "--no-color", orig_object_hash, new_object_hash] ) git_path = path.replace(os.path.sep, "/").encode("utf8")
11
73
updateWorkingFile
13
0
2
5
pyxel/editor/music_editor.py
111,049
Renamed the sounds property of Music
pyxel
9
Python
12
music_editor.py
def get_field(self, index): if index >= pyxel.NUM_CHANNELS: return music = pyxel.music(self.music_no_var) return music.snds_list[index]
4f4459f6f8d37d3b687f7844e63abb0f672d8a98
32
https://github.com/kitao/pyxel.git
44
def get_field(self, index): if index >= pyxel.NUM_CHANNELS: return music = pyxel.music(self.music_no_var) return music.snds_list[index]
8
50
get_field
267
0
23
70
gradio/documentation.py
180,603
Fix default value in docs for objects (#1900)
gradio
17
Python
138
documentation.py
def document_fn(fn): doc_str = inspect.getdoc(fn) doc_lines = doc_str.split("\n") signature = inspect.signature(fn) description, parameters, returns, examples = [], {}, [], [] mode = "description" for line in doc_lines: line = line.rstrip() if line == "Parameters:": mode = "parameter" elif line == "Example:": mode = "example" elif line == "Returns:": mode = "return" else: if mode == "description": description.append(line if line.strip() else "<br>") continue assert line.startswith( " " ), f"Documentation format for {fn.__name__} has format error in line: {line}" line = line[4:] if mode == "parameter": colon_index = line.index(": ") assert ( colon_index > -1 ), f"Documentation format for {fn.__name__} has format error in line: {line}" parameter = line[:colon_index] parameter_doc = line[colon_index + 2 :] parameters[parameter] = parameter_doc elif mode == "return": returns.append(line) elif mode == "example": examples.append(line) description_doc = " ".join(description) parameter_docs = [] for param_name, param in signature.parameters.items(): if param_name.startswith("_"): continue if param_name == "kwargs" and param_name not in parameters: continue parameter_doc = { "name": param_name, "annotation": param.annotation, "kind": param.kind.description, "doc": parameters.get(param_name), } if param_name in parameters: del parameters[param_name] if param.default != inspect.Parameter.empty: default = param.default if type(default) == str: default = '"' + default + '"' if default.__class__.__module__ != "builtins": default = f"{default.__class__.__name__}()" parameter_doc["default"] = default elif parameter_doc["doc"] is not None and "kwargs" in parameter_doc["doc"]: parameter_doc["kwargs"] = True parameter_docs.append(parameter_doc) assert ( len(parameters) == 0 ), f"Documentation format for {fn.__name__} documents nonexistent parameters: {''.join(parameters.keys())}" if len(returns) == 0: return_docs = {} elif len(returns) == 1: return_docs = {"annotation": signature.return_annotation, "doc": returns[0]} else: return_docs = {} # raise ValueError("Does not support multiple returns yet.") examples_doc = "\n".join(examples) if len(examples) > 0 else None return description_doc, parameter_docs, return_docs, examples_doc
3ef4d4da4c1d39818d8bde82701f5e75b4b2cbe8
436
https://github.com/gradio-app/gradio.git
899
def document_fn(fn): doc_str = inspect.getdoc(fn) doc_lines = doc_str.split("\n") signature = inspect.signature(fn) description, parameters, returns, examples = [], {}, [], [] mode = "description" for line in doc_lines: line = line.rstrip() if line == "Parameters:": mode = "parameter" elif line == "Example:": mode = "example" elif line == "Returns:": mode = "return" else: if mode == "description": description.append(line if line.strip() else "<br>") continue assert line.startswith( " " ), f"Documentation format for {fn.__name__} has format error in line: {line}" line = line[4:] if mode == "parameter": colon_index = line.index(": ") assert ( colon_index > -1 ), f"Documentation format for {fn.__name__} has format error in line: {line}" parameter = line[:colon_index] parameter_doc = line[colon_index + 2 :] parameters[parameter] = parameter_doc elif mode == "return": returns.append(line) elif mode == "example": examples.append(line) description_doc = " ".join(description) parameter_docs = [] for param_name, param in signature.parameters.items(): if param_name.startswith("_"): continue if param_name == "kwargs" and param_name not in parameters: continue parameter_doc = { "name": param_name, "annotation": param.annotation, "kind": param.kind.description, "doc": parameters.get(param_name), } if param_name in parameters:
44
808
document_fn
133
0
1
33
pandas/tests/indexing/test_indexing.py
166,554
DEPR: df.iloc[:, foo] = bar attempt to set inplace (#45333)
pandas
13
Python
50
test_indexing.py
def test_astype_assignment(self): # GH4312 (iloc) df_orig = DataFrame( [["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") ) df = df_orig.copy() msg = "will attempt to set the values inplace instead" with tm.assert_produces_warning(FutureWarning, match=msg): df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64) expected = DataFrame( [[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") ) tm.assert_frame_equal(df, expected) df = df_orig.copy() with tm.assert_produces_warning(FutureWarning, match=msg): df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True) expected = DataFrame( [[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") ) tm.assert_frame_equal(df, expected) # GH5702 (loc) df = df_orig.copy() with tm.assert_produces_warning(FutureWarning, match=msg): df.loc[:, "A"] = df.loc[:, "A"].astype(np.int64) expected = DataFrame( [[1, "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") ) tm.assert_frame_equal(df, expected) df = df_orig.copy() with tm.assert_produces_warning(FutureWarning, match=msg): df.loc[:, ["B", "C"]] = df.loc[:, ["B", "C"]].astype(np.int64) expected = DataFrame( [["1", 2, 3, ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") ) tm.assert_frame_equal(df, expected)
46bcf3740b38339f62b94e66ec29537a28a17140
387
https://github.com/pandas-dev/pandas.git
406
def test_astype_assignment(self): # GH4312 (iloc) df_orig = DataFrame( [["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") ) df = df_orig.copy() msg = "will attempt to set the values inplace instead" with tm.assert_produces_warning(Futu
23
613
test_astype_assignment
40
0
3
9
wagtail/images/tests/test_jinja2.py
75,262
Reformat with black
wagtail
12
Python
30
test_jinja2.py
def render(self, string, context=None, request_context=True): if context is None: context = {} # Add a request to the template, to simulate a RequestContext if request_context: site = Site.objects.get(is_default_site=True) request = self.client.get("/test/", HTTP_HOST=site.hostname) context["request"] = request template = self.engine.from_string(string) return template.render(context)
d10f15e55806c6944827d801cd9c2d53f5da4186
78
https://github.com/wagtail/wagtail.git
118
def render(self, string, context=None, request_context=True): if context is None: context = {} # Add a request to the template, to simulate a RequestContext if request_context:
17
125
render
54
0
1
22
tests/snuba/api/endpoints/test_organization_events.py
86,354
test(perf-issues): Improve how performance issues are created in tests (#39293) This PR updates `Factories.store_event` and `load_data` to support creation of performance groups for transactions. Once this PR is merged, I will update all instances of `hack_pull_out_data`. Resolved ISP-16
sentry
14
Python
30
test_organization_events.py
def test_has_performance_issue_ids(self): data = load_data( platform="transaction", fingerprint=[f"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1"], ) self.store_event(data=data, project_id=self.project.id) query = { "field": ["count()"], "statsPeriod": "1h", "query": "has:performance.issue_ids", } response = self.do_request(query) assert response.status_code == 200, response.content assert response.data["data"][0]["count()"] == 1 query = { "field": ["count()"], "statsPeriod": "1h", "query": "!has:performance.issue_ids", } response = self.do_request(query) assert response.status_code == 200, response.content assert response.data["data"][0]["count()"] == 0
5d8a666bebd4d4b0b0200af5ed37ba504e0895be
139
https://github.com/getsentry/sentry.git
232
def test_has_performance_issue_ids(self): data = load_data( platform="transaction", fingerprint=[f"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1"], ) self.store_event(data=data, project_id=self.project.id) query = { "field": ["c
18
247
test_has_performance_issue_ids
39
0
4
12
recommenders/models/ncf/dataset.py
39,206
fix docstrings
recommenders
15
Python
27
dataset.py
def __next__(self): if self.next_row: self.row = self.next_row elif self.line_num == 0: self.row = self._extract_row_data(next(self.reader, None)) if self.row is None: raise EmptyFileException("{} is empty.".format(self.filename)) else: raise StopIteration # end of file self.next_row = self._extract_row_data(next(self.reader, None)) self.line_num += 1 return self.row
87970de68431d511a1ea28f838be1f9eba9b4c02
90
https://github.com/microsoft/recommenders.git
140
def __next__(self): if self.next_row: self.row = self.next_row elif self.line_num == 0: self.row = self._extract_row_data(next(self.reader, None)) if self.row is None: raise EmptyFileException("{} is empty.".format(self.filename)) else: raise StopIteration # end of file self.next_row = self._extract_
12
145
__next__
45
0
2
10
lib/matplotlib/backends/_backend_tk.py
108,893
Make it easier to improve UI event metadata. Currently, UI events (MouseEvent, KeyEvent, etc.) are generated by letting the GUI-specific backends massage the native event objects into a list of args/kwargs and then call `FigureCanvasBase.motion_notify_event`/`.key_press_event`/etc. This makes it a bit tricky to improve the metadata on the events, because one needs to change the signature on both the `FigureCanvasBase` method and the event class. Moreover, the `motion_notify_event`/etc. methods are directly bound as event handlers in the gtk3 and tk backends, and thus have incompatible signatures there. Instead, the native GUI handlers can directly construct the relevant event objects and trigger the events themselves; a new `Event._process` helper method makes this even shorter (and allows to keep factoring some common functionality e.g. for tracking the last pressed button or key). As an example, this PR also updates figure_leave_event to always correctly set the event location based on the *current* cursor position, instead of the last triggered location event (which may be outdated); this can now easily be done on a backend-by-backend basis, instead of coordinating the change with FigureCanvasBase.figure_leave_event. This also exposed another (minor) issue, in that resize events often trigger *two* calls to draw_idle -- one in the GUI-specific handler, and one in FigureCanvasBase.draw_idle (now moved to ResizeEvent._process, but should perhaps instead be a callback autoconnected to "resize_event") -- could probably be fixed later.
matplotlib
13
Python
38
_backend_tk.py
def scroll_event_windows(self, event): # need to find the window that contains the mouse w = event.widget.winfo_containing(event.x_root, event.y_root) if w != self._tkcanvas: return x = self._tkcanvas.canvasx(event.x_root - w.winfo_rootx()) y = (self.figure.bbox.height - self._tkcanvas.canvasy(event.y_root - w.winfo_rooty())) step = event.delta / 120 MouseEvent("scroll_event", self, x, y, step=step, guiEvent=event)._process()
4e21912d2938b0e8812c4d1f7cd902c080062ff2
107
https://github.com/matplotlib/matplotlib.git
142
def scroll_event_windows(self, event): # need to find the window that co
23
169
scroll_event_windows
96
0
7
27
dashboard/datacenter.py
129,834
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
15
Python
60
datacenter.py
async def _get_actor(actor): actor = dict(actor) worker_id = actor["address"]["workerId"] core_worker_stats = DataSource.core_worker_stats.get(worker_id, {}) actor_constructor = core_worker_stats.get( "actorTitle", "Unknown actor constructor" ) actor["actorConstructor"] = actor_constructor actor.update(core_worker_stats) # TODO(fyrestone): remove this, give a link from actor # info to worker info in front-end. node_id = actor["address"]["rayletId"] pid = core_worker_stats.get("pid") node_physical_stats = DataSource.node_physical_stats.get(node_id, {}) actor_process_stats = None actor_process_gpu_stats = [] if pid: for process_stats in node_physical_stats.get("workers", []): if process_stats["pid"] == pid: actor_process_stats = process_stats break for gpu_stats in node_physical_stats.get("gpus", []): for process in gpu_stats.get("processes", []): if process["pid"] == pid: actor_process_gpu_stats.append(gpu_stats) break actor["gpus"] = actor_process_gpu_stats actor["processStats"] = actor_process_stats return actor
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
175
https://github.com/ray-project/ray.git
387
async def _get_actor(actor): actor = dict(actor) worker_id = actor["address"]["workerId"] core_w
18
303
_get_actor
91
0
6
32
jina/orchestrate/deployments/config/docker_compose.py
12,054
feat: add default volume to dockererized executors (#4554)
jina
16
Python
72
docker_compose.py
def get_runtime_config(self) -> List[Dict]: # One Dict for replica replica_configs = [] for i_rep in range(self.service_args.replicas): cargs = copy.copy(self.service_args) cargs.name = ( f'{cargs.name}/rep-{i_rep}' if self.service_args.replicas > 1 else cargs.name ) env = cargs.env image_name = self._get_image_name(cargs.uses) container_args = self._get_container_args(cargs) config = { 'image': image_name, 'entrypoint': ['jina'], 'command': container_args, 'healthcheck': { 'test': f'python -m jina.resources.health_check.pod localhost:{cargs.port}', 'interval': '2s', }, 'environment': [ f'JINA_LOG_LEVEL={os.getenv("JINA_LOG_LEVEL", "INFO")}' ], } if env is not None: config['environment'] = [f'{k}={v}' for k, v in env.items()] if self.service_args.pod_role == PodRoleType.WORKER: config = self._update_config_with_volumes( config, auto_volume=not self.common_args.disable_auto_volume ) replica_configs.append(config) return replica_configs
984e743734b18c1117bbbc2eda49d7eceaa9343f
179
https://github.com/jina-ai/jina.git
638
def get_runtime_config(self) -> List[Dict]: # One Dict for replica replica_configs = [] for i_rep in range(self.service_args.replicas): cargs = copy.copy(self.service_args) cargs.name = ( f'{cargs.name}/rep-{i_rep
33
337
get_runtime_config
13
0
1
6
keras/legacy_tf_layers/core_test.py
274,372
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
10
Python
13
core_test.py
def testDropoutProperties(self): dp = core_layers.Dropout(0.5, name="dropout") self.assertEqual(dp.rate, 0.5) self.assertEqual(dp.noise_shape, None) dp(tf.ones(())) self.assertEqual(dp.name, "dropout")
84afc5193d38057e2e2badf9c889ea87d80d8fbf
61
https://github.com/keras-team/keras.git
47
def testDropoutProperties(self): dp = core_layers
11
93
testDropoutProperties
42
0
2
10
dashboard/modules/healthz/tests/test_healthz.py
124,558
[dashboard][2/2] Add endpoints to dashboard and dashboard_agent for liveness check of raylet and gcs (#26408) ## Why are these changes needed? As in this https://github.com/ray-project/ray/pull/26405 we added the health check for gcs and raylets. This PR expose them in the endpoint in dashboard and dashboard agent. For dashboard, we added `http://host:port/api/gcs_healthz` and it'll send RPC to GCS directly to see whether the GCS is alive or not. For agent, we added `http://host:port/api/local_raylet_healthz` and it'll send RPC to GCS to check whether raylet is alive or not. We think raylet is live if - GCS is dead - GCS is alive but GCS think the raylet is dead If GCS is dead for more than X seconds (60 by default), raylet will just crash itself, so KubeRay can still catch it.
ray
14
Python
39
test_healthz.py
def test_healthz_head(ray_start_cluster): dashboard_port = find_free_port() h = ray_start_cluster.add_node(dashboard_port=dashboard_port) uri = f"http://localhost:{dashboard_port}/api/gcs_healthz" wait_for_condition(lambda: requests.get(uri).status_code == 200) h.all_processes[ray_constants.PROCESS_TYPE_GCS_SERVER][0].process.kill() # It'll either timeout or just return an error try: wait_for_condition(lambda: requests.get(uri, timeout=1) != 200, timeout=4) except RuntimeError as e: assert "Read timed out" in str(e)
a68c02a15d041f987359c73781fb38202041a16f
91
https://github.com/ray-project/ray.git
79
def test_healthz_head(ray_start_cluster): dashboard_port = find_free_port() h = ray_start_cluster.add_node(dashboard_port=dashboard_port) uri = f"http://localhost:{dashboard_port}/api/gcs_healthz" wait_for_condition(lambda: requests.get(uri).status_code == 200) h.all_processes[ray_constants.PROCESS_TYPE_GCS_SERVER][0].process.kill() # It'll either timeout or just return an error try: wait_for_condition(lambda: requests.get(uri, timeout=1) != 200, timeout=4) except RuntimeError as e: assert "Read timed out" in str(e)
20
153
test_healthz_head
48
0
1
10
tests/sentry/api_gateway/test_proxy.py
86,558
feat(api-gateway): Proxy GET requests (#39595) This change introduces a proxy manager that proxies requests to a region silo given an org slug Currently, this only handles GETs and JSON response bodies. Later PRs will handle other methods and body types.
sentry
11
Python
39
test_proxy.py
def test_query_params(self, region_fnc_patch): query_param_dict = dict(foo="bar", numlist=["1", "2", "3"]) query_param_str = urlencode(query_param_dict, doseq=True) request = RequestFactory().get(f"http://sentry.io/echo?{query_param_str}") region_fnc_patch.return_value = SENTRY_REGION_CONFIG[0] resp = proxy_request(request, self.organization.slug) resp_json = json.loads(b"".join(resp.streaming_content)) assert resp.status_code == 200 # parse_qs returns everything in a list, including single arguments assert query_param_dict["foo"] == resp_json["foo"][0] assert query_param_dict["numlist"] == resp_json["numlist"]
ff8ef470d2fdb80df0a57890ead1e4a792ac99a2
111
https://github.com/getsentry/sentry.git
117
def test_query_params(self, region_fnc_patch): query_param_dict = dict(foo="bar", numlist=["1", "2", "3"]) query_param_str = url
25
188
test_query_params
125
0
21
24
modules/safe.py
152,809
added guard for torch.load to prevent loading pickles with unknown content
stable-diffusion-webui
12
Python
57
safe.py
def find_class(self, module, name): if module == 'collections' and name == 'OrderedDict': return getattr(collections, name) if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']: return getattr(torch._utils, name) if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage']: return getattr(torch, name) if module == 'torch.nn.modules.container' and name in ['ParameterDict']: return getattr(torch.nn.modules.container, name) if module == 'numpy.core.multiarray' and name == 'scalar': return numpy.core.multiarray.scalar if module == 'numpy' and name == 'dtype': return numpy.dtype if module == '_codecs' and name == 'encode': return encode if module == "pytorch_lightning.callbacks" and name == 'model_checkpoint': import pytorch_lightning.callbacks return pytorch_lightning.callbacks.model_checkpoint if module == "pytorch_lightning.callbacks.model_checkpoint" and name == 'ModelCheckpoint': import pytorch_lightning.callbacks.model_checkpoint return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint if module == "__builtin__" and name == 'set': return set # Forbid everything else. raise pickle.UnpicklingError(f"global '{module}/{name}' is forbidden")
875ddfeecfaffad9eee24813301637cba310337d
197
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
340
def find_class(self, module, name): if module == 'collections' and name == 'OrderedDict': return getattr(collections, name) if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']: return getattr(torch._utils, name) if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage']: return getattr(torch, name) if module == 'torch.nn.modules.container' and name in ['ParameterDict']: return getattr(torch.nn.modules.container, name) if module == 'numpy.core.multiarray' and name == 'scalar': return numpy.core.multiarray.scalar if module == 'numpy' and name == 'dtype': return numpy.dtype if module == '_codecs' and name == 'encode': return encode if module == "pytorch_lightning.callbacks" and name == 'model_checkpoint': import pytorch_lightning.callbacks return pytorch_lightning.callbacks.model_checkpoint if module == "pytorch_lightning.callbacks.model_checkpoint" and name == 'ModelCheckpoint': import pytorch_lightning.callbacks.model_checkpoint return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint
24
351
find_class
48
0
1
46
tests/utils/test_metadata.py
30,338
Create test_metadata.py
spotify-downloader
12
Python
42
test_metadata.py
def test_embed_metadata(tmpdir, monkeypatch, output_format): monkeypatch.chdir(tmpdir) monkeypatch.setattr(spotdl.utils.ffmpeg, "get_spotdl_path", lambda *_: tmpdir) yt = YoutubeDL( { "format": "bestaudio", "encoding": "UTF-8", } ) download_info = yt.extract_info( "https://www.youtube.com/watch?v=h-nHdqC3pPs", download=False ) song = Song.from_data_dump( ) output_file = Path(tmpdir / f"test.{output_format}") assert convert( input_file=(download_info["url"], download_info["ext"]), output_file=output_file, output_format=output_format, ) == (True, None) embed_metadata(output_file, song, output_format)
a96db2512e1533287684d5563d0a6b7dd065a8b7
118
https://github.com/spotDL/spotify-downloader.git
159
def test_embed_metadata(tmpdir, monkeypatch, output_format): monkeypatch.chdir(tmpdir) monkeypatch.setattr(spotdl.utils.ffmpeg, "get_spotdl_path", lambda *_: tmpdir) yt = YoutubeDL( { "format": "bestaudio", "encoding": "UTF-8", } ) download_info = yt.extract_info( "https://www.youtube.com/watch?v=h-nHdqC3pPs", download=False ) song = Song.from_data_dump( ) output_file = Path(tmpdir / f"test.{output_format}") assert convert( input_file=(download_info["url"], download_info["ext"]), output_file=output_file, output_format=output_format, ) == (True, None)
23
198
test_embed_metadata
15
0
1
2
sympy/assumptions/assume.py
200,373
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
sympy
7
Python
15
assume.py
def function(self): # Will be changed to self.args[0] after args overriding is removed return self._args[0]
24f1e7730119fe958cc8e28411f790c9a5ec04eb
13
https://github.com/sympy/sympy.git
36
def function(self): # Will be changed to self.args[0
3
24
function
39
0
2
11
src/prefect/utilities/importtools.py
57,466
Fix attribute getter support
prefect
20
Python
36
importtools.py
def __getattr__(self, attr): if attr in ("__class__", "__file__", "__frame_data", "__help_message"): super().__getattr__(attr) else: fd = self.__frame_data raise ModuleNotFoundError( f"No module named '{fd['spec']}'\n\n" "This module was originally imported at:\n" f' File "{fd["filename"]}", line {fd["lineno"]}, in {fd["function"]}\n\n' f' {"".join(fd["code_context"]).strip()}\n' + self.__help_message )
d238c7b16097895006eff9e3f081958af15cd3e5
50
https://github.com/PrefectHQ/prefect.git
160
def __getattr__(self, attr): if attr in ("__class__", "__file_
10
161
__getattr__
26
0
2
7
src/diffusers/models/resnet.py
335,804
Simplify FirUp/down, unet sde (#71) * refactor fir up/down sample * remove variance scaling * remove variance scaling from unet sde * refactor Linear * style * actually remove variance scaling * add back upsample_2d, downsample_2d * style * fix FirUpsample2D
diffusers
13
Python
19
resnet.py
def forward(self, x): if self.use_conv: h = self._upsample_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) h = h + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: h = self._upsample_2d(x, k=self.fir_kernel, factor=2) return h
53a42d0a0cab99e9a905b117b9893052c6849e10
75
https://github.com/huggingface/diffusers.git
79
def forward(self, x): if self.use_conv: h = self._upsample_2d(x, self.Conv2d_0.weight, k=self.fir_kernel) h = h + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: h = self._upsample_2d(x, k=self.fir_kernel, factor=2)
13
111
forward
13
0
1
5
python/ray/ml/preprocessors/scaler.py
138,558
[ml] add more preprocessors (#23904) Adding some more common preprocessors: * MaxAbsScaler * RobustScaler * PowerTransformer * Normalizer * FeatureHasher * Tokenizer * HashingVectorizer * CountVectorizer API docs: https://ray--23904.org.readthedocs.build/en/23904/ray-air/getting-started.html Co-authored-by: Kai Fricke <krfricke@users.noreply.github.com>
ray
9
Python
13
scaler.py
def __repr__(self): stats = getattr(self, "stats_", None) return ( f"StandardScaler(columns={self.columns}, ddof={self.ddof}, stats={stats})" )
cc08c01adedad6cd89f3ab310ed58100ed6dbc26
20
https://github.com/ray-project/ray.git
44
def __repr__(self):
6
51
__repr__
20
0
3
6
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
63,420
upd; format
transferlearning
15
Python
19
pyparsing.py
def addCondition(self, *fns, **kwargs): for fn in fns: self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'), fatal=kwargs.get('fatal', False))) self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
66
https://github.com/jindongwang/transferlearning.git
117
def addCondition(self, *fns, **kwargs): for fn in fns: self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'), fa
12
107
addCondition
219
0
1
49
tests/test_categorical.py
41,855
Revert unnecessary (and broken) backwards compat in catplot (#2839)
seaborn
13
Python
34
test_categorical.py
def test_plot_elements(self): g = cat.catplot(x="g", y="y", data=self.df, kind="point") assert len(g.ax.collections) == 1 want_lines = self.g.unique().size + 1 assert len(g.ax.lines) == want_lines g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="point") want_collections = self.h.unique().size assert len(g.ax.collections) == want_collections want_lines = (self.g.unique().size + 1) * self.h.unique().size assert len(g.ax.lines) == want_lines g = cat.catplot(x="g", y="y", data=self.df, kind="bar") want_elements = self.g.unique().size assert len(g.ax.patches) == want_elements assert len(g.ax.lines) == want_elements g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="bar") want_elements = self.g.unique().size * self.h.unique().size assert len(g.ax.patches) == want_elements assert len(g.ax.lines) == want_elements g = cat.catplot(x="g", data=self.df, kind="count") want_elements = self.g.unique().size assert len(g.ax.patches) == want_elements assert len(g.ax.lines) == 0 g = cat.catplot(x="g", hue="h", data=self.df, kind="count") want_elements = self.g.unique().size * self.h.unique().size assert len(g.ax.patches) == want_elements assert len(g.ax.lines) == 0 g = cat.catplot(y="y", data=self.df, kind="box") want_artists = 1 assert len(self.get_box_artists(g.ax)) == want_artists g = cat.catplot(x="g", y="y", data=self.df, kind="box") want_artists = self.g.unique().size assert len(self.get_box_artists(g.ax)) == want_artists g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="box") want_artists = self.g.unique().size * self.h.unique().size assert len(self.get_box_artists(g.ax)) == want_artists g = cat.catplot(x="g", y="y", data=self.df, kind="violin", inner=None) want_elements = self.g.unique().size assert len(g.ax.collections) == want_elements g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="violin", inner=None) want_elements = self.g.unique().size * self.h.unique().size assert len(g.ax.collections) == want_elements g = cat.catplot(x="g", y="y", data=self.df, kind="strip") want_elements = self.g.unique().size assert len(g.ax.collections) == want_elements g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="strip") want_elements = self.g.unique().size + self.h.unique().size assert len(g.ax.collections) == want_elements
de1ecf0e0d0064982ebf4f13e1b1afddd27c80ff
767
https://github.com/mwaskom/seaborn.git
586
def test_plot_elements(self): g = cat.catplot(x="g", y="y", data=self.df, kind="point") assert len(g.ax.collections) == 1 want_lines = self.g.unique().size + 1 assert len(g.ax.lines) == want_lines g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="point") want_collections = self.h.unique().size assert len(g.ax.collections) == want_collections want_lines = (self.g.unique().size + 1) * self.h.unique().size assert len(g.ax.lines) == want_lines g = cat.catplot(x="g", y="y", data=self.df, kind="bar") want_elements = self.g.unique().size assert len(g.ax.patches) == want_elements assert len(g.ax.lines) == want_elements g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="bar") want_elements = self.g.unique().size * self.h.unique().size assert
25
1,242
test_plot_elements
22
0
3
4
dashboard/modules/job/common.py
144,365
[jobs] Monitor jobs in the background to avoid requiring clients to poll (#22180)
ray
9
Python
18
common.py
def get_all_jobs(self) -> Dict[str, JobStatusInfo]: raw_job_ids = _internal_kv_list(self.JOB_STATUS_KEY_PREFIX) job_ids = [job_id.decode() for job_id in raw_job_ids] return {job_id: self.get_status(job_id) for job_id in job_ids}
8806b2d5c43f256188632c245dd741774776dad0
48
https://github.com/ray-project/ray.git
42
def get_all_jobs(self) -> Dict[str, JobStatusInfo]: raw_job_ids = _internal_kv_list(self.JOB_STATUS_KEY_PREFIX) job_ids = [job_id.decode() for job_id in raw_job_ids] return {job_id: self.get_status(job_id) for job_id in job_ids}
12
73
get_all_jobs
20
0
4
6
tests/integration_tests/flows/test_mysql_api_pytest_based.py
117,106
It mysql api test pytest (#3694) * migration to pytest * Tests start passing * Fully working tests * Increase timeout for mindsdb start * reduce amount of logs * show logs only for failed tests
mindsdb
11
Python
15
test_mysql_api_pytest_based.py
def get_record(self, key, value): if key in self: for x in self: if x[key] == value: return x return None
ae4fa77a2c0a9fa57cc9c8bc7e8961dd01e4067e
31
https://github.com/mindsdb/mindsdb.git
78
def get_record(self, key, value): if key in self: for x in self: if x[key] == value: return x r
5
46
get_record
61
0
1
28
tests/sentry/api/endpoints/test_organization_sdk_updates.py
97,911
fix(sdk): Do not error if a project is using an unknown version (#32206)
sentry
13
Python
42
test_organization_sdk_updates.py
def test_unknown_version(self, mock_index_state): min_ago = iso_format(before_now(minutes=1)) self.store_event( data={ "event_id": "a" * 32, "message": "oh no", "timestamp": min_ago, "fingerprint": ["group-1"], "sdk": {"name": "example.sdk", "version": "dev-master@32e5415"}, }, project_id=self.project.id, assert_no_errors=False, ) self.store_event( data={ "event_id": "b" * 32, "message": "b", "timestamp": min_ago, "fingerprint": ["group-2"], "sdk": {"name": "example.sdk", "version": "2.0.0"}, }, project_id=self.project.id, assert_no_errors=False, ) with self.feature(self.features): response = self.client.get(self.url) update_suggestions = response.data assert len(update_suggestions) == 0
3ba27f5b5845de0a5a89d5cbf2e5df752915d9d7
160
https://github.com/getsentry/sentry.git
365
def test_unknown_version(self, mock_index_state): min_ago = iso_format(before_now(minutes=1)) self.store_event( data={
21
281
test_unknown_version
133
0
7
24
numpy/lib/shape_base.py
160,198
ENH: Maintain subclass info for `np.kron` * Replace `*` call with `multiply` * Handle `mat` cases to perform reshape * Remove use result wrapping to maintain consistency with ufuncs
numpy
13
Python
89
shape_base.py
def kron(a, b): b = asanyarray(b) a = array(a, copy=False, subok=True, ndmin=b.ndim) ndb, nda = b.ndim, a.ndim nd = max(ndb, nda) if (nda == 0 or ndb == 0): return _nx.multiply(a, b) as_ = a.shape bs = b.shape if not a.flags.contiguous: a = reshape(a, as_) if not b.flags.contiguous: b = reshape(b, bs) # Equalise the shapes by prepending smaller one with 1s as_ = (1,)*max(0, ndb-nda) + as_ bs = (1,)*max(0, nda-ndb) + bs # Compute the product a_arr = a.reshape(a.size, 1) b_arr = b.reshape(1, b.size) is_any_mat = isinstance(a_arr, matrix) or isinstance(b_arr, matrix) # In case of `mat`, convert result to `array` result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat)) # Reshape back result = result.reshape(as_+bs) transposer = _nx.arange(nd*2).reshape([2, nd]).ravel(order='f') result = result.transpose(transposer) result = result.reshape(_nx.multiply(as_, bs)) return result if not is_any_mat else matrix(result, copy=False)
730f3154f48e33f22b2ea8814eb10a45aa273e17
278
https://github.com/numpy/numpy.git
229
def kron(a, b): b = asanyarray(b) a = array(a, copy=False, subok=True, ndmin=b.ndim) ndb, nda = b.ndim, a.ndim nd = max(ndb, nda) if (nda == 0 or ndb == 0): return _nx.multiply(a, b) as_ = a.shape bs = b.shape if not a.flags.contiguous: a = reshape(a, as_) if not b.flags.contiguous: b = reshape(b, bs) # Equalise the shapes by prepending smaller one with 1s as_ = (1,)*max(0, ndb-nda) + as_ bs = (1,)*max(0, nda-ndb) + bs # Compute the product a_arr = a.reshape(a.size, 1) b_arr = b.reshape(1, b.s
33
429
kron
11
0
1
2
.venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/base.py
61,081
upd; format
transferlearning
8
Python
11
base.py
def get_candidate_lookup(self): # type: () -> CandidateLookup raise NotImplementedError("Subclass should override")
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
10
https://github.com/jindongwang/transferlearning.git
24
def get_candidate_lookup(self): # type: () -> CandidateLookup raise NotImplementedError("Subclass should override")
3
20
get_candidate_lookup
9
0
1
4
tests/components/utility_meter/test_sensor.py
296,070
Remove EVENT_TIME_CHANGED and EVENT_TIMER_OUT_OF_SYNC (#69643) Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
core
11
Python
9
test_sensor.py
async def test_self_reset_hourly(hass): await _test_self_reset( hass, gen_config("hourly"), "2017-12-31T23:59:00.000000+00:00" )
fe6a4bfb1dbb37dd16a0d73d776ad5f604154670
18
https://github.com/home-assistant/core.git
25
async def test_self_reset_hourly(hass): await _test_self_reset( hass, gen_config("hourly"), "2017-12-31T23
4
36
test_self_reset_hourly
39
0
4
12
rllib/agents/a3c/tests/test_a2c.py
139,039
[RLlib] A2/3C Config objects (A2CConfig and A3CConfig). (#24332)
ray
14
Python
32
test_a2c.py
def test_a2c_compilation(self): config = a3c.A2CConfig().rollouts(num_rollout_workers=2, num_envs_per_worker=2) num_iterations = 1 # Test against all frameworks. for _ in framework_iterator(config, with_eager_tracing=True): for env in ["CartPole-v0", "Pendulum-v1", "PongDeterministic-v0"]: trainer = config.build(env=env) for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) check_compute_single_action(trainer) trainer.stop()
b2b1c95aa5f94c74d192caca0d86945f2b4ce986
92
https://github.com/ray-project/ray.git
202
def test_a2c_compilation(self): config = a3c.A2CConfig().rollouts(num_rollout_workers=2, num_envs_per_worker=2) num_iterations = 1 # Test against all frameworks. for _ in framework_iterator(config, with_eager_tracing=True): for env in ["CartPole-v0", "Pendulum-v1", "PongDeterministic-v0"]: trainer = config.build(env=env) for i in range(num_iterations): resu
23
154
test_a2c_compilation
15
0
2
4
django/db/models/sql/where.py
205,901
Refs #33476 -- Reformatted code with Black.
django
11
Python
13
where.py
def _resolve_leaf(expr, query, *args, **kwargs): if hasattr(expr, "resolve_expression"): expr = expr.resolve_expression(query, *args, **kwargs) return expr
9c19aff7c7561e3a82978a272ecdaad40dda5c00
37
https://github.com/django/django.git
39
def _resolve_leaf(expr, query, *args, **kwargs): if hasattr(expr,
7
57
_resolve_leaf
39
0
1
12
tools/preview/preview.py
101,422
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
faceswap
10
Python
27
preview.py
def update_tk_image(self) -> None: logger.trace("Updating tk image") # type: ignore self._build_faces_image() img = np.vstack((self._faces_source, self._faces_dest)) size = self._get_scale_size(img) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) pilimg = Image.fromarray(img) pilimg = pilimg.resize(size, Image.ANTIALIAS) self._tk_image = ImageTk.PhotoImage(pilimg) self._tk_vars["refresh"].set(False) logger.trace("Updated tk image") # type: ignore
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
102
https://github.com/deepfakes/faceswap.git
118
def update_tk_image(self) -> None: logger.trace("Updating tk image") # type: ignore self._build_faces_image() img = np.vstack((self._
25
172
update_tk_image
14
1
1
2
tests/sentry/middleware/test_ratelimit_middleware.py
95,301
feat(ratelimits): Add headers with rate limit details (#30951) The headers allow API users to know where they are in terms of their rate limits. It'll be returned for every API that can be rate limited except when there is an internal exception. At the time of this commit, rate limits are not enforced except for some specific endpoints. * Several improvements to rate limit headers Headers now track how many requests are left in the current window, and when the next window starts Also, rate limit metadata is in a dataclass
sentry
10
Python
14
test_ratelimit_middleware.py
def get(self, request): return Response({"ok": True}) urlpatterns = [ url(r"^/ratelimit$", RateLimitHeaderTestEndpoint.as_view(), name="ratelimit-header-endpoint") ] @override_settings(ROOT_URLCONF="tests.sentry.middleware.test_ratelimit_middleware")
68b1cdf3b1bcb7990834a890b8a32a021bc75666
@override_settings(ROOT_URLCONF="tests.sentry.middleware.test_ratelimit_middleware")
16
https://github.com/getsentry/sentry.git
20
def get(self, request): return Response({"ok": True}) urlpatt
11
72
get