n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
36
0
1
17
wagtail/admin/tests/pages/test_edit_page.py
71,603
Reformat with black
wagtail
13
Python
32
test_edit_page.py
def _create_page(self, parent): response = self.client.post( reverse("wagtailadmin_pages:add", args=("tests", "simplepage", parent.pk)), { "title": "Hello, world!", "content": "Some content", "slug": "hello-world", "action-publish": "publish", }, follow=True, ) self.assertRedirects( response, reverse("wagtailadmin_explore", args=(parent.pk,)) ) page = SimplePage.objects.get() self.assertTrue(page.live) return response, page
d10f15e55806c6944827d801cd9c2d53f5da4186
97
https://github.com/wagtail/wagtail.git
199
def _create_page(self, parent): response = self.client.post( reverse("wagtailadmin_pages:add", args=("tests", "simplepage", parent.pk)), { "title": "Hello, world!", "content": "Some content", "slug
17
164
_create_page
8
0
1
2
saleor/graphql/plugins/schema.py
29,853
Add plugin manager promise (#11414)
saleor
10
Python
8
schema.py
def resolve_plugin(_root, info, manager, **data): return resolve_plugin(data.get("id"), manager)
decd505f55d02c616ce5b804c06a71e120d15c15
24
https://github.com/saleor/saleor.git
14
def resolve_plugin(_root, info, manager, **data):
6
37
resolve_plugin
72
0
1
27
test/document_stores/test_opensearch.py
257,779
feat: FAISS in OpenSearch: Support HNSW for dot product and l2 (#3029) * support faiss hnsw * blacken * update docs * improve similarity check * add tests * update schema * set ef_search param correctly * Apply suggestions from code review Co-authored-by: Agnieszka Marzec <97166305+agnieszka-m@users.noreply.github.com> * regenerate docs Co-authored-by: Massimiliano Pippi <mpippi@gmail.com> Co-authored-by: Agnieszka Marzec <97166305+agnieszka-m@users.noreply.github.com>
haystack
19
Python
58
test_opensearch.py
def test__create_document_index_no_index_no_mapping_faiss(self, mocked_document_store): mocked_document_store.client.indices.exists.return_value = False mocked_document_store.knn_engine = "faiss" mocked_document_store._create_document_index(self.index_name) _, kwargs = mocked_document_store.client.indices.create.call_args assert kwargs["body"] == { "mappings": { "dynamic_templates": [ {"strings": {"mapping": {"type": "keyword"}, "match_mapping_type": "string", "path_match": "*"}} ], "properties": { "content": {"type": "text"}, "embedding": { "dimension": 768, "method": { "engine": "faiss", "name": "hnsw", "parameters": {"ef_construction": 512, "m": 16}, "space_type": "innerproduct", }, "type": "knn_vector", }, "name": {"type": "keyword"}, }, }, "settings": {"analysis": {"analyzer": {"default": {"type": "standard"}}}, "index": {"knn": True}}, }
92046ce5b54ddd0cc21ee98bff30ba507ec9d054
174
https://github.com/deepset-ai/haystack.git
501
def test__create_document_index_no_index_no_mapping_faiss(self, mocked_document_store): mocked_document_store.client.indices.exists.return_value = False mocked_document_store.knn_engine = "faiss" mocked_document_store._create_document_index(self.index_name) _, kwargs = mocked_document_store.client.indices.create.call_args assert kwargs["body"] == { "mappings": { "dynamic_templates": [ {"strings": {"mapping": {"type": "keyword"}, "match_mapping_type": "string", "path_match": "*"}} ], "properties": { "content": {"type": "text"}, "embedding": { "dimension": 768, "method": { "engine": "faiss", "name": "hnsw", "parameters": {"ef_construction": 512, "m": 16}, "space
14
330
test__create_document_index_no_index_no_mapping_faiss
27
0
1
17
tests/aggregation_regress/tests.py
200,959
Refs #33476 -- Reformatted code with Black.
django
18
Python
23
tests.py
def test_annotation_with_value(self): values = ( Book.objects.filter( name="Practical Django Projects", ) .annotate( discount_price=F("price") * 2, ) .values( "discount_price", ) .annotate(sum_discount=Sum("discount_price")) ) self.assertSequenceEqual( values, [{"discount_price": Decimal("59.38"), "sum_discount": Decimal("59.38")}], )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
74
https://github.com/django/django.git
198
def test_annotation_with_value(self): values = ( Book.objects.filter( name="Practical Django Projects", ) .annotate( discount_price=F("price") * 2, ) .values( "discount_price", ) .annotate(sum_discount=Sum("discount_price")) ) self.assertSequenceEqual( values, [{"discount_pric
14
130
test_annotation_with_value
26
0
1
6
test/document_stores/test_base.py
258,135
feat: add SQLDocumentStore tests (#3517) * port SQL tests * cleanup document_store_tests.py from sql tests * leftover * Update .github/workflows/tests.yml Co-authored-by: Sara Zan <sara.zanzottera@deepset.ai> * review comments * Update test/document_stores/test_base.py Co-authored-by: bogdankostic <bogdankostic@web.de> Co-authored-by: Sara Zan <sara.zanzottera@deepset.ai> Co-authored-by: bogdankostic <bogdankostic@web.de>
haystack
15
Python
17
test_base.py
def test_in_filters(self, ds, documents): ds.write_documents(documents) result = ds.get_all_documents(filters={"year": {"$in": ["2020", "2021", "n.a."]}}) assert len(result) == 6 result = ds.get_all_documents(filters={"year": ["2020", "2021", "n.a."]}) assert len(result) == 6
2bb81331b75aec68de0d45c4cb116170d265f1fe
73
https://github.com/deepset-ai/haystack.git
60
def test_in_filters(self, ds, documents): ds.write_documents(documents) result = ds.get_all_documents(filters={"year": {"$in": ["2020", "2021", "n.a."]}}) assert len(result) == 6 result = ds.get_all_documents(filters={"year": ["2020", "2021"
9
127
test_in_filters
18
0
1
7
wagtail/contrib/table_block/tests.py
73,588
Reformat with black
wagtail
8
Python
15
tests.py
def test_empty_table_block_is_not_rendered(self): value = None block = TableBlock() result = block.render(value) expected = "" self.assertHTMLEqual(result, expected) self.assertNotIn("None", result)
d10f15e55806c6944827d801cd9c2d53f5da4186
41
https://github.com/wagtail/wagtail.git
67
def test_empty_table_block_is_not_rendered(self): value = None block = TableBlock() result = block.render(value) expected = "" self.assertHTMLEqual(result, expected) self.assertNotIn("None", result)
10
73
test_empty_table_block_is_not_rendered
58
0
7
15
mindsdb/integrations/handlers/mongodb_handler/mongodb_handler.py
115,336
fixes
mindsdb
15
Python
45
mongodb_handler.py
def flatten(self, row, level=0): # move sub-keys to upper level # TODO is disabled now if level <= 0: return row add = {} del_keys = [] for k, v in row.items(): if isinstance(v, dict): for k2, v2 in self.flatten(v, level=level - 1).items(): add[f'{k}.{k2}'] = v2 del_keys.append(k) if add: row.update(add) for key in del_keys: del row[key] return row
acc5b7c65d4f0356f7c68e343ef43735f2f33593
101
https://github.com/mindsdb/mindsdb.git
213
def flatten(self, row, level=0): # move sub-keys to upper level # TODO is disabled now if level <= 0: return row add = {} del_keys = [] for k, v in row.items(): if isinstance(v, dict): for k2, v2 in self.flatten(v, level=level - 1).items(): add[f'{k}.{k2}'] = v2 del_keys.ap
16
166
flatten
13
0
1
4
Lib/test/test_enum.py
175,535
Revert "bpo-40066: [Enum] update str() and format() output (GH-30582)" (GH-30632) This reverts commit acf7403f9baea3ae1119fc6b4a3298522188bf96.
cpython
11
Python
12
test_enum.py
def test_format(self): Perm = self.Perm self.assertEqual(format(Perm.R, ''), 'R') self.assertEqual(format(Perm.R | Perm.X, ''), 'R|X')
42a64c03ec5c443f2a5c2ee4284622f5d1f5326c
44
https://github.com/python/cpython.git
33
def test_format(self): Perm = self.Perm self.assertE
7
75
test_format
16
0
2
5
python/ray/util/client/server/server_stubs.py
132,962
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
10
Python
14
server_stubs.py
def __reduce__(self): remote_obj = self.get_remote_obj() if remote_obj is None: return (self.__class__, (self.client_id, self.id)) return (identity, (remote_obj,))
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
42
https://github.com/ray-project/ray.git
47
def __reduce__(self): remote_obj = self.get_remote_obj() if remote_obj is None: return (self.__class__, (self.client_id, self.id)) return (identi
8
63
__reduce__
63
0
5
36
homeassistant/components/google_travel_time/config_flow.py
289,594
Move default option handling to config_flow for google_travel_time (#80607) Move default option handling to config_flow
core
17
Python
50
config_flow.py
async def async_step_user(self, user_input=None) -> FlowResult: errors = {} user_input = user_input or {} if user_input: try: await self.hass.async_add_executor_job( validate_config_entry, self.hass, user_input[CONF_API_KEY], user_input[CONF_ORIGIN], user_input[CONF_DESTINATION], ) return self.async_create_entry( title=user_input.get(CONF_NAME, DEFAULT_NAME), data=user_input, options=default_options(self.hass), ) except InvalidApiKeyException: errors["base"] = "invalid_auth" except UnknownException: errors["base"] = "cannot_connect" return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required( CONF_NAME, default=user_input.get(CONF_NAME, DEFAULT_NAME) ): cv.string, vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_DESTINATION): cv.string, vol.Required(CONF_ORIGIN): cv.string, } ), errors=errors, )
b35cfe711a7032bc1e41b685ea180277abc99edb
183
https://github.com/home-assistant/core.git
572
async def async_step_user(self, user_input=None) -> FlowResult: errors = {} user_input = user_input or {} if user_input: try: await self.hass.async_add_executor_job( validate_config_entry, self.hass, user_input[CONF_API_KEY], user_input[CONF_ORIGIN], user_input[CONF_DESTINATION], ) return self.async_create_entry( title=user_input.get(CONF_NAME, DEFAULT_NAME), data=user_input, options=default_options(self.hass), ) except InvalidApiKeyException: errors["base"] = "invalid_auth" except UnknownException: errors["base"] = "cannot_connect" return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required( CONF_NAME, default=user_input.get(CONF_NAME, DEFAULT_NAME) ): cv.string, vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_DESTINATION): cv.string, vol.Required(CONF_ORIGIN): cv.string, } )
30
282
async_step_user
53
0
1
22
saleor/checkout/tests/test_checkout.py
25,845
Fix incorrect handling of unavailable products in checkout (#9058) * Fix incorrect handling of unavailable products in CheckoutComplete * Fix incorrect handling of unavailable products in CheckoutPaymentCreate * Refactor fetch_checkout_lines methods - return list of correct lines and invalid variant ids * Raise validation error when completing checkout with empty lines * Raise ValidationError when creating payment for checkout with empty lines
saleor
11
Python
35
test_checkout.py
def test_change_address_in_checkout_to_same(checkout, address): checkout.shipping_address = address checkout.billing_address = address.get_copy() checkout.save(update_fields=["shipping_address", "billing_address"]) shipping_address_id = checkout.shipping_address.id billing_address_id = checkout.billing_address.id manager = get_plugins_manager() lines, _ = fetch_checkout_lines(checkout) checkout_info = fetch_checkout_info(checkout, lines, [], manager) change_shipping_address_in_checkout( checkout_info, address, lines, [], manager, checkout.channel.shipping_method_listings.all(), ) change_billing_address_in_checkout(checkout, address) checkout.refresh_from_db() assert checkout.shipping_address.id == shipping_address_id assert checkout.billing_address.id == billing_address_id assert checkout_info.shipping_address == address
a4f2c7976dae1f9608b1bc130e497d558169848f
130
https://github.com/saleor/saleor.git
139
def test_change_address_in_checkout_to_same(checkout, address): checkout.shipping_address = address checkout.billing_address = address.get_copy() checkout.save(update_fields=["shipping_address", "billing_addres
24
202
test_change_address_in_checkout_to_same
58
0
7
15
src/accelerate/accelerator.py
337,220
make deepspeed optimizer match parameters of passed optimizer (#246) * make deepspeed optimizer match parameters of passed optimizer, instead of all model parameters * style Co-authored-by: Jack Hessel <jackh@allenai.org>
accelerate
13
Python
34
accelerator.py
def prepare_model(self, model): if self.device_placement: model = model.to(self.device) if self.distributed_type == DistributedType.MULTI_GPU: kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs ) elif self.distributed_type == DistributedType.MULTI_CPU: kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} model = torch.nn.parallel.DistributedDataParallel(model, **kwargs) if self.native_amp: model.forward = torch.cuda.amp.autocast()(model.forward) model.forward = convert_outputs_to_fp32(model.forward) return model
a0995e1ccb81cea86a065d05c520112b156079d8
157
https://github.com/huggingface/accelerate.git
195
def prepare_model(self, model): if self.device_placement: model = mo
26
242
prepare_model
22
0
1
12
tests/orion/models/test_flow_run_alert_policies.py
55,646
Running on postgres
prefect
15
Python
20
test_flow_run_alert_policies.py
async def failed_policy(session, notifier_block): policy = await models.flow_run_alert_policies.create_flow_run_alert_policy( session=session, flow_run_alert_policy=schemas.core.FlowRunAlertPolicy( name="My Success Policy", state_names=["Failed"], tags=[], block_document_id=notifier_block.id, ), ) await session.commit() return policy
c0a02e64ffd641513a757a6676b6ecdabba91158
60
https://github.com/PrefectHQ/prefect.git
98
async def failed_policy(session, notifier_block): policy = await models.flow
17
95
failed_policy
14
0
1
9
sympy/functions/combinatorial/numbers.py
197,219
Deprecate redundant static methods
sympy
9
Python
14
numbers.py
def divides(p, n): sympy_deprecation_warning( , deprecated_since_version="1.11", active_deprecations_target='deprecated-carmichael-static-methods', ) return n % p == 0
b27e2b44626d138bd6ea235fbf114644baa5b144
26
https://github.com/sympy/sympy.git
55
def divides(p, n): sympy_deprecation_warning( ,
6
44
divides
6
0
1
3
python3.10.4/Lib/bz2.py
221,182
add python 3.10.4 for windows
XX-Net
8
Python
6
bz2.py
def readinto(self, b): self._check_can_read() return self._buffer.readinto(b)
8198943edd73a363c266633e1aa5b2a9e9c9f526
22
https://github.com/XX-net/XX-Net.git
27
def readinto(self, b): self._check_can_read() return self._buffer.r
5
38
readinto
210
0
14
44
keras/utils/dataset_utils.py
269,177
update dataset_utils.py
keras
17
Python
119
dataset_utils.py
def _convert_dataset_to_list(dataset,data_size_warning_flag = True): # TODO prakashsellathurai: add failing test cases for list of tuples,tuples of nd array # TODO prakashsellathurai: add support for Batched and unbatched tf datasets if isinstance(dataset,tuple): if len(dataset) == 0: raise ValueError('`dataset` must be a non-empty list/tuple of' ' numpy.ndarrays or tf.data.Dataset objects.') dataset_iterator = list(zip(*dataset)) elif isinstance(dataset,list): if len(dataset) == 0: raise ValueError('`dataset` must be a non-empty list/tuple of' ' numpy.ndarrays or tf.data.Dataset objects.') if isinstance(dataset[0],np.ndarray): dataset_iterator = list(zip(*dataset)) else: dataset_iterator = list(dataset) elif isinstance(dataset,np.ndarray): dataset_iterator = list(dataset) elif isinstance(dataset,tf.data.Dataset): dataset_iterator = list(dataset) else: raise TypeError('`dataset` must be either a tf.data.Dataset object' f' or a list/tuple of arrays. Received : {type(dataset)}' ) dataset_as_list = [] try: dataset_iterator = iter(dataset_iterator) first_datum = next(dataset_iterator) dataset_as_list.append(first_datum) except ValueError: raise ValueError('Received an empty Dataset i.e dataset with no elements. ' '`dataset` must be a non-empty list/tuple of' ' numpy.ndarrays or tf.data.Dataset objects.') start_time = time.time() for i,datum in enumerate(dataset_iterator): if data_size_warning_flag: if i % 10 == 0: cur_time = time.time() # warns user if the dataset is too large to iterate within 10s if int(cur_time - start_time) > 10 and data_size_warning_flag: warnings.warn('Takes too long time to process the `dataset`,' 'this function is only for small datasets ' '(e.g. < 10,000 samples).' ) data_size_warning_flag = False dataset_as_list.append(datum) return dataset_as_list
f6a1bda81886a721413eb21a12fcbd69b3f14dfa
237
https://github.com/keras-team/keras.git
531
def _convert_dataset_to_list(dataset,data_size_warning_flag = True): # TODO prakashsellathurai: add failing test cases for list of tuples,tuples of nd array # TODO prakashsellathurai: add support for Batched and unbatched tf datasets if isinstance(dataset,tuple): if len(dataset) == 0: raise ValueError('`dataset` must be a non-empty list/tuple of' ' numpy.ndarrays or tf.data.Dataset objects.') dataset_iterator = list(zip(*dataset)) elif isinstance(dataset,list): if len(dataset) == 0: raise ValueError('`dataset` must be a non-empty list/tuple of' ' numpy.ndarrays or tf.data.Dataset objects.') if isinstance(dataset[0],np.ndarray): dataset_iterator = list(zip(*dataset)) else: dataset_iterator = list(dataset) elif isinstance(dataset,np.ndarray): dataset_iterator = list(dataset) elif isinstance(dataset,tf.data.Dataset): dataset_iterator = list(dataset) else: raise TypeError('`dataset` must be either a tf.data.Dataset object' f' or a list/tuple of arrays. Received : {type(dataset)}' ) dataset_as_list = [] try: dataset_iterator = iter(dataset_iterator) first_datum = next(dataset_iterator) dataset_as_list.append(first_datum) except ValueError: raise ValueError('Received an empty Dataset i.e dataset with no elements. ' '`dataset` must be a
31
420
_convert_dataset_to_list
68
0
4
8
nni/retiarii/nn/pytorch/api.py
112,699
Add license header and typehints for NAS (#4774)
nni
15
Python
57
api.py
def __repr__(self) -> str: reprs = [] for arg in self.arguments: if isinstance(arg, ValueChoiceX) and not isinstance(arg, ValueChoice): reprs.append('(' + repr(arg) + ')') # add parenthesis for operator priority else: reprs.append(repr(arg)) return self.repr_template.format(*reprs) # the following are a series of methods to create "ValueChoiceX" # which is a transformed version of value choice # https://docs.python.org/3/reference/datamodel.html#special-method-names # Special operators that can be useful in place of built-in conditional operators.
1896212902bd8d1fa11c5df6c8e1fe38ae4b5392
68
https://github.com/microsoft/nni.git
153
def __repr__(self) -> str: reprs = [] for arg in self.arguments: if isinstance(arg, ValueChoiceX) and not isinstance(arg, ValueChoice): reprs.append('(' + repr(arg) + ')') # add parenthesis for operator priority else: reprs.append(repr(arg)) return self.repr_template.format(*reprs) # the following are a series of methods to create "ValueChoiceX" # which is a transformed version of value choice # https://docs.python.org/3/reference/datamodel.html#special-method-names # Spe
13
118
__repr__
12
0
3
4
youtube_dl/jsinterp.py
106,220
Back-port JS interpreter upgrade from yt-dlp PR #1437
youtube-dl
10
Python
9
jsinterp.py
def __iter__(self): for scope in self.stack: for scope_item in iter(scope): yield scope_item
96f87aaa3b34d80bc72097a7475d8093849091fc
22
https://github.com/ytdl-org/youtube-dl.git
44
def __iter__(self): for scope in self.stack: for scope_item in iter(scope): yield scope_item
6
35
__iter__
113
0
7
24
yt_dlp/extractor/tiktok.py
162,324
[extractor,cleanup] Use `_search_nextjs_data`
yt-dlp
14
Python
82
tiktok.py
def _real_extract(self, url): video_id = self._match_id(url) try: return self._extract_aweme_app(video_id) except ExtractorError as e: self.report_warning(f'{e}; Retrying with webpage') # If we only call once, we get a 403 when downlaoding the video. self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id, note='Downloading video webpage') next_data = self._search_nextjs_data(webpage, video_id, default='{}') if next_data: status = traverse_obj(next_data, ('props', 'pageProps', 'statusCode'), expected_type=int) or 0 video_data = traverse_obj(next_data, ('props', 'pageProps', 'itemInfo', 'itemStruct'), expected_type=dict) else: sigi_json = self._search_regex( r'>\s*window\[[\'"]SIGI_STATE[\'"]\]\s*=\s*(?P<sigi_state>{.+});', webpage, 'sigi data', group='sigi_state') sigi_data = self._parse_json(sigi_json, video_id) status = traverse_obj(sigi_data, ('VideoPage', 'statusCode'), expected_type=int) or 0 video_data = traverse_obj(sigi_data, ('ItemModule', video_id), expected_type=dict) if status == 0: return self._parse_aweme_video_web(video_data, url) elif status == 10216: raise ExtractorError('This video is private', expected=True) raise ExtractorError('Video not available', video_id=video_id)
135dfa2c7ebc9284db940713c0dc6cbc19ca5fa4
215
https://github.com/yt-dlp/yt-dlp.git
336
def _real_extract(self, url): video_id = self._match_id(url) try: return self._extract_aweme_app(video_id) except ExtractorError as e: self.report_warning(f'{e}; Retrying with webpage') # If we only call once, we get a 403 when downlaoding the video. self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id, note='Downloading video webpage') next_data = self._search_nextjs_data(webpage, video_id, default='{}') if next_data: status = traverse_obj(next_data, ('props', 'pageProps', 'statusCode'), expected_type=int) or 0 video_data = traverse_obj(next_data, ('props', 'pageProps', 'itemInfo', 'itemStruct'), expected_type=dict) else: sigi_json = self._search_regex( r'>\s*window\[[\'"]SIGI_STATE[\'"]\]\s*=\s*(?P<sigi_state>{.+});', webpage, 'sigi data', group='sigi_state') sigi_data = self._parse_json(sigi_json, video_id) status = traverse_obj(sigi_data, ('VideoPage', 'statusCode'), expected_type=int) or 0 video_data = traverse_obj(sigi_data, ('ItemModule', video_id), expected_type=dict) if status == 0:
28
352
_real_extract
111
0
8
32
python/ray/_private/function_manager.py
147,136
[Python Worker] add feature flag to support forking from workers (#23260) Make sure Python dependencies can be imported on demand, without the background importer thread. Use cases are: If the pubsub notification for a new export is lost, importing can still be done. Allow not running the background importer thread, without affecting Ray's functionalities. Add a feature flag to support forking from Python workers, by Enable fork support in gRPC. Disable importer thread and only leave the main thread in the Python worker. The importer thread will not run after forking anyway.
ray
18
Python
80
function_manager.py
def _wait_for_function(self, function_descriptor, job_id, timeout=10): start_time = time.time() # Only send the warning once. warning_sent = False while True: with self.lock: if self._worker.actor_id.is_nil() and ( function_descriptor.function_id in self._function_execution_info ): break elif not self._worker.actor_id.is_nil() and ( self._worker.actor_id in self._worker.actors ): break if time.time() - start_time > timeout: warning_message = ( "This worker was asked to execute a function " f"that has not been registered ({function_descriptor}, " f"node={self._worker.node_ip_address}, " f"worker_id={self._worker.worker_id.hex()}, " f"pid={os.getpid()}). You may have to restart Ray." ) if not warning_sent: logger.error(warning_message) ray._private.utils.push_error_to_driver( self._worker, ray_constants.WAIT_FOR_FUNCTION_PUSH_ERROR, warning_message, job_id=job_id, ) warning_sent = True # Try importing in case the worker did not get notified, or the # importer thread did not run. self._worker.import_thread._do_importing() time.sleep(0.001)
909cdea3cdbebb11ea2e62355b99f8bc3008c3ac
158
https://github.com/ray-project/ray.git
652
def _wait_for_function(self, function_descriptor, job_id, timeout=10): start_time = time.time() # Only send the warning once. warning_sent = False while True: with self.lock: if self._worker.actor_id.is_nil() and ( function_descriptor.function_id in self._function_execution_info ): break elif not self._worker.actor_id.is_nil() and ( self._worker.actor_id in self._worker.actors ): break if time.time() - start_time > timeout: warning_message = ( "This worker was asked to execute a function " f"that has not been registered ({function_descriptor}, " f"node={self._worker.node_ip_address}, " f"worker_id={self._worker.worker_id.hex()}, " f"pid={os.getpid()}). You may have to restart Ray." ) if not warning
32
297
_wait_for_function
125
0
1
17
tests/sentry/eventstore/test_models.py
90,825
ref: fix sentry.models test (#35382) * ref: fix sentry.models test * ref: split and rename sentry.models.tests so it actually runs
sentry
12
Python
75
test_models.py
def test_event_node_id(self): # Create an event without specifying node_id. A node_id should be generated e1 = Event(project_id=1, event_id="abc", data={"foo": "bar"}) assert e1.data.id is not None, "We should have generated a node_id for this event" e1_node_id = e1.data.id e1.data.save() e1_body = nodestore.get(e1_node_id) assert e1_body == {"foo": "bar"}, "The event body should be in nodestore" e1 = Event(project_id=1, event_id="abc") assert e1.data.data == {"foo": "bar"}, "The event body should be loaded from nodestore" assert e1.data.id == e1_node_id, "The event's node_id should be the same after load" # Event with no data should not be saved to nodestore e2 = Event(project_id=1, event_id="mno", data=None) e2_node_id = e2.data.id assert e2.data.data == {} # NodeData returns {} by default eventstore.bind_nodes([e2], "data") assert e2.data.data == {} e2_body = nodestore.get(e2_node_id) assert e2_body is None
b75841f096c4c8ebda0dffa159f16dbddcf2fc69
167
https://github.com/getsentry/sentry.git
251
def test_event_node_id(self): # Create an event without specifying node_id. A node_id should be generated e1 = Event(project_id=1, event_id="abc", data={"foo": "bar"}) assert e1.data.id is not None, "We should have generated a node_id for this event" e1_node_id = e1.data.id e1.data.save() e1_body = nodestore.get(e1_node_id) assert e1_body == {"foo": "bar"}, "The event body should be in nodestore" e1 = Event(project_id=1, event_id="abc") assert e1.data.data == {"foo": "bar"}, "The event body should be loaded from nodestore" assert e1.data.id == e1_node_id, "The event's node_id should be the same after load" # Event with no data should not be saved to nodestore e2 = Event(project_id=1, event_id
18
282
test_event_node_id
37
0
7
12
.venv/lib/python3.8/site-packages/pip/_vendor/html5lib/filters/optionaltags.py
62,530
upd; format
transferlearning
16
Python
26
optionaltags.py
def __iter__(self): for previous, token, next in self.slider(): type = token["type"] if type == "StartTag": if (token["data"] or not self.is_optional_start(token["name"], previous, next)): yield token elif type == "EndTag": if not self.is_optional_end(token["name"], next): yield token else: yield token
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
79
https://github.com/jindongwang/transferlearning.git
193
def __iter__(self): for previous, token, next in self.slider(): type = token["type"] if type == "StartTag": if (token["data"] or not self.is_optional_start(token["name"], previous, next)): yield token elif type == "EndTag": if not s
9
135
__iter__
37
0
3
12
airflow/providers/sftp/hooks/sftp.py
43,253
Convert sftp hook to use paramiko instead of pysftp (#24512)
airflow
17
Python
28
sftp.py
def get_conn(self) -> paramiko.SFTPClient: # type: ignore[override] if self.conn is None: # TODO: remove support for ssh_hook when it is removed from SFTPOperator if self.ssh_hook is not None: self.conn = self.ssh_hook.get_conn().open_sftp() else: self.conn = super().get_conn().open_sftp() return self.conn
f3aacebe502c4ea5dc2b7d29373539296fa037eb
61
https://github.com/apache/airflow.git
122
def get_conn(self) -> paramiko.SFTPClient: # type: ignore[override] if self.conn is None: # TODO: remove support for ssh_hook when it is removed from SFTPO
8
105
get_conn
75
0
3
16
tests/models/deformable_detr/test_modeling_deformable_detr.py
33,685
Add Deformable DETR (#17281) * First draft * More improvements * Improve model, add custom CUDA code * Import torch before * Add script that imports custom layer * Add everything in new ops directory * Import custom layer in modeling file * Fix ARCHIVE_MAP typo * Creating the custom kernel on the fly. * Import custom layer in modeling file * More improvements * Fix CUDA loading * More improvements * Improve conversion script * Improve conversion script * Make it work until encoder_outputs * Make forward pass work * More improvements * Make logits match original implementation * Make implementation also support single_scale model * Add support for single_scale and dilation checkpoint * Add support for with_box_refine model * Support also two stage model * Improve tests * Fix more tests * Make more tests pass * Upload all models to the hub * Clean up some code * Improve decoder outputs * Rename intermediate hidden states and reference points * Improve model outputs * Move tests to dedicated folder * Improve model outputs * Fix retain_grad test * Improve docs * Clean up and make test_initialization pass * Improve variable names * Add copied from statements * Improve docs * Fix style * Improve docs * Improve docs, move tests to model folder * Fix rebase * Remove DetrForSegmentation from auto mapping * Apply suggestions from code review * Improve variable names and docstrings * Apply some more suggestions from code review * Apply suggestion from code review * better docs and variables names * hint to num_queries and two_stage confusion * remove asserts and code refactor * add exception if two_stage is True and with_box_refine is False * use f-strings * Improve docs and variable names * Fix code quality * Fix rebase * Add require_torch_gpu decorator * Add pip install ninja to CI jobs * Apply suggestion of @sgugger * Remove DeformableDetrForObjectDetection from auto mapping * Remove DeformableDetrModel from auto mapping * Add model to toctree * Add model back to mappings, skip model in pipeline tests * Apply @sgugger's suggestion * Fix imports in the init * Fix copies * Add CPU implementation * Comment out GPU function * Undo previous change * Apply more suggestions * Remove require_torch_gpu annotator * Fix quality * Add logger.info * Fix logger * Fix variable names * Fix initializaztion * Add missing initialization * Update checkpoint name * Add model to doc tests * Add CPU/GPU equivalence test * Add Deformable DETR to pipeline tests * Skip model for object detection pipeline Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com> Co-authored-by: Nouamane Tazi <nouamane98@gmail.com> Co-authored-by: Sylvain Gugger <Sylvain.gugger@gmail.com>
transformers
15
Python
52
test_modeling_deformable_detr.py
def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.image_size, self.image_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels
59407bbeb31fff8340938768051c9daabd38d7a7
170
https://github.com/huggingface/transformers.git
258
def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torc
26
257
prepare_config_and_inputs
18
0
1
4
pandas/tests/reshape/test_from_dummies.py
167,518
Initial draft: from_dummies (#41902)
pandas
12
Python
17
test_from_dummies.py
def test_with_prefix_basic(dummies_basic): expected = DataFrame({"col1": ["a", "b", "a"], "col2": ["b", "a", "c"]}) result = from_dummies(dummies_basic, sep="_") tm.assert_frame_equal(result, expected)
ed55bdf198590dd572f2e546c7b2afe7ae98ba74
49
https://github.com/pandas-dev/pandas.git
26
def test_with_prefix_basic(dummies_basic): expected = DataFrame({"col1": ["a", "b", "a"], "col2": ["b", "a", "c"]}) result = fr
9
89
test_with_prefix_basic
14
0
2
6
homeassistant/components/rainmachine/binary_sensor.py
306,924
Fix bug with 1st gen RainMachine controllers and unknown API calls (#78070) Co-authored-by: epenet <6771947+epenet@users.noreply.github.com>
core
14
Python
14
binary_sensor.py
def update_from_latest_data(self) -> None: if self.entity_description.key == TYPE_FLOW_SENSOR: self._attr_is_on = self.coordinator.data.get("system", {}).get( "useFlowSensor" )
9fc9d50e077d17cd35822701a8c7b85efa80e49d
39
https://github.com/home-assistant/core.git
65
def update_from_latest_data(self) -> None: if self.ent
9
68
update_from_latest_data
135
0
7
41
freqtrade/freqai/prediction_models/RL/RLPrediction_env.py
150,590
add reward function
freqtrade
13
Python
85
RLPrediction_env.py
def report(self): # get total trade long_trade = 0 short_trade = 0 neutral_trade = 0 for trade in self.trade_history: if trade['type'] == 'long': long_trade += 1 elif trade['type'] == 'short': short_trade += 1 else: neutral_trade += 1 negative_trade = 0 positive_trade = 0 for tr in self.close_trade_profit: if tr < 0.: negative_trade += 1 if tr > 0.: positive_trade += 1 total_trade_lr = negative_trade+positive_trade total_trade = long_trade + short_trade sharp_ratio = self.sharpe_ratio() sharp_log = self.get_sharpe_ratio() from tabulate import tabulate headers = ["Performance", ""] performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], ["Total reward", "{0:.3f}".format(self.total_reward)], ["Start profit(unit)", "{0:.2f}".format(1.)], ["End profit(unit)", "{0:.3f}".format(self._total_profit)], ["Sharp ratio", "{0:.3f}".format(sharp_ratio)], ["Sharp log", "{0:.3f}".format(sharp_log)], # ["Sortino ratio", "{0:.2f}".format(0) + '%'], ["winrate", "{0:.2f}".format(positive_trade*100/total_trade_lr) + '%'] ] tabulation = tabulate(performanceTable, headers, tablefmt="fancy_grid", stralign="center") print(tabulation) result = { "Start": "{0:.2f}".format(1.), "End": "{0:.2f}".format(self._total_profit), "Sharp": "{0:.3f}".format(sharp_ratio), "Winrate": "{0:.2f}".format(positive_trade*100/total_trade_lr) } return result
8eeaab27467fa2e0bdc7314bdb888998bbb20af8
273
https://github.com/freqtrade/freqtrade.git
640
def report(self): # get total trade long_trade = 0 short_trade = 0 neutral_trade = 0 for trade in self.trade_history: if trade['type'] == 'long': long_trade += 1 elif trade['type'] == 'short': short_trade += 1 else: neutral_trade += 1 negative_trade = 0 positive_trade = 0 for tr in self.close_trade_profit: if tr < 0.: negative_trade += 1 if tr > 0.: positive_trade += 1 total_trade_lr = negative_trade+positive_trade total_trade = long_trade + short_trade sharp_ratio = self.sharpe_ratio() sharp_log = self.get_sharpe_ratio() from tabulate import tabulate headers = ["Performance", ""] performanceTable = [["Total Trade", "{0:.2f}".format(total_trade)], ["Total reward", "{0:.3f}".format(self.total_reward)], ["Start profit(unit)", "{0:.2f}".format(1.)], ["End profit(unit)", "{0:.3f}".format(self._to
28
469
report
86
0
4
18
tests/models/layoutlmv3/test_tokenization_layoutlmv3.py
38,888
Add LayoutLMv3 (#17060) * Make forward pass work * More improvements * Remove unused imports * Remove timm dependency * Improve loss calculation of token classifier * Fix most tests * Add docs * Add model integration test * Make all tests pass * Add LayoutLMv3FeatureExtractor * Improve integration test + make fixup * Add example script * Fix style * Add LayoutLMv3Processor * Fix style * Add option to add visual labels * Make more tokenizer tests pass * Fix more tests * Make more tests pass * Fix bug and improve docs * Fix import of processors * Improve docstrings * Fix toctree and improve docs * Fix auto tokenizer * Move tests to model folder * Move tests to model folder * change default behavior add_prefix_space * add prefix space for fast * add_prefix_spcae set to True for Fast * no space before `unique_no_split` token * add test to hightligh special treatment of added tokens * fix `test_batch_encode_dynamic_overflowing` by building a long enough example * fix `test_full_tokenizer` with add_prefix_token * Fix tokenizer integration test * Make the code more readable * Add tests for LayoutLMv3Processor * Fix style * Add model to README and update init * Apply suggestions from code review * Replace asserts by value errors * Add suggestion by @ducviet00 * Add model to doc tests * Simplify script * Improve README * a step ahead to fix * Update pair_input_test * Make all tokenizer tests pass - phew * Make style * Add LayoutLMv3 to CI job * Fix auto mapping * Fix CI job name * Make all processor tests pass * Make tests of LayoutLMv2 and LayoutXLM consistent * Add copied from statements to fast tokenizer * Add copied from statements to slow tokenizer * Remove add_visual_labels attribute * Fix tests * Add link to notebooks * Improve docs of LayoutLMv3Processor * Fix reference to section Co-authored-by: SaulLu <lucilesaul.com@gmail.com> Co-authored-by: Niels Rogge <nielsrogge@Nielss-MacBook-Pro.local>
transformers
16
Python
45
test_tokenization_layoutlmv3.py
def test_number_of_added_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # test 1: single sequence words, boxes = self.get_words_and_boxes() sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True) # Method is implemented (e.g. not GPT-2) if len(attached_sequences) != 2: self.assertEqual( tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences) ) # test 2: two sequences question, words, boxes = self.get_question_words_and_boxes() sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False) attached_sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=True) # Method is implemented (e.g. not GPT-2) if len(attached_sequences) != 2: self.assertEqual( tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences) )
31ee80d55673f32c0f5d50936f371e661b74b21a
179
https://github.com/huggingface/transformers.git
412
def test_number_of_added_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # test 1: single sequence words, boxes = self.get_words_and_boxes() sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True) # Method is implemented (e.g. not GPT-2) if len(attached_sequences) != 2: self.assertEqual( tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences) ) # test 2: two sequences question, words, boxes = self.get_question_words_and_boxes() sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False) attached_seque
22
289
test_number_of_added_tokens
9
0
1
2
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
63,279
upd; format
transferlearning
10
Python
9
pyparsing.py
def _defaultExceptionDebugAction(instring, loc, expr, exc): print("Exception raised:" + _ustr(exc))
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
20
https://github.com/jindongwang/transferlearning.git
11
def _defaultExceptionDebugAction(instring, loc, expr, exc): print("Exception raised:"
7
32
_defaultExceptionDebugAction
179
0
21
53
ppdet/optimizer/optimizer.py
211,007
fix params groups.trainable (#6354) * add FGD distill code * add configs * add doc * fix pretrain * pre-commit * fix ci * fix readme * fix readme * fix ci * fix param groups * fix * fix doc * fix doc,test=document_fix * fix params groups
PaddleDetection
19
Python
105
optimizer.py
def __call__(self, learning_rate, model=None): if self.clip_grad_by_norm is not None: grad_clip = nn.ClipGradByGlobalNorm( clip_norm=self.clip_grad_by_norm) else: grad_clip = None if self.regularizer and self.regularizer != 'None': reg_type = self.regularizer['type'] + 'Decay' reg_factor = self.regularizer['factor'] regularization = getattr(regularizer, reg_type)(reg_factor) else: regularization = None optim_args = self.optimizer.copy() optim_type = optim_args['type'] del optim_args['type'] if optim_type == 'AdamWDL': return build_adamwdl(model, lr=learning_rate, **optim_args) if optim_type != 'AdamW': optim_args['weight_decay'] = regularization op = getattr(optimizer, optim_type) if 'param_groups' in optim_args: assert isinstance(optim_args['param_groups'], list), '' param_groups = optim_args.pop('param_groups') params, visited = [], [] for group in param_groups: assert isinstance(group, dict) and 'params' in group and isinstance( group['params'], list), '' _params = { n: p for n, p in model.named_parameters() if any([k in n for k in group['params']]) and p.trainable is True } _group = group.copy() _group.update({'params': list(_params.values())}) params.append(_group) visited.extend(list(_params.keys())) ext_params = [ p for n, p in model.named_parameters() if n not in visited and p.trainable is True ] if len(ext_params) < len(model.parameters()): params.append({'params': ext_params}) elif len(ext_params) > len(model.parameters()): raise RuntimeError else: _params = model.parameters() params = [param for param in _params if param.trainable is True] return op(learning_rate=learning_rate, parameters=params, grad_clip=grad_clip, **optim_args)
35b1c4a4e52bd0416725984a1c360d20456f2c29
391
https://github.com/PaddlePaddle/PaddleDetection.git
852
def __call__(self, learning_rate, model=None): if self.clip_grad_by_norm is not None: grad_clip = nn.ClipGradByGlobalNorm( clip_norm=self.clip_grad_by_norm) else: grad_clip = None if self.regularizer and self.regularizer != 'None': reg_type = self.regularizer['type'] + 'Decay' reg_factor = self.regularizer['factor'] regularization = getattr(regularizer, reg_type)(reg_factor) else: regularization = None optim_args = self.optimizer.copy() optim_type = optim_args['type'] del optim_args['type'] if optim_type == 'AdamWDL': return build_adamwdl(model, lr=learning_rate, **optim_args) if optim_type != 'AdamW':
47
641
__call__
73
0
1
36
tests/sentry/api/endpoints/test_organization_dashboard_widget_details.py
98,790
fix(widget-builder): Do widget validation with columns and aggregates as fields (#33986) There was code to construct columns and aggregates from fields to account for stale frontend during the transition to split up fields, but enough time has passed that we can use the columns and aggregates as the source of truth for validation
sentry
16
Python
64
test_organization_dashboard_widget_details.py
def test_issue_search_condition(self): self.user = self.create_user(is_superuser=False) self.create_member( user=self.user, organization=self.organization, role="member", teams=[self.team] ) self.login_as(self.user) event = self.store_event( data={ "event_id": "a" * 32, "transaction": "/example", "message": "how to make fast", "timestamp": iso_format(before_now(minutes=2)), "fingerprint": ["group_1"], }, project_id=self.project.id, ) data = { "title": "EPM Big Number", "displayType": "big_number", "queries": [ { "name": "", "fields": ["epm()"], "columns": [], "aggregates": ["epm()"], "conditions": f"issue:{event.group.qualified_short_id}", "orderby": "", } ], } response = self.do_request( "post", self.url(), data=data, ) assert response.status_code == 200, response.data
a88dd006ae647debe4a9d17ad3908d6fdcc576ce
183
https://github.com/getsentry/sentry.git
489
def test_issue_search_condition(self): self.user = self.create_user(is_superuser=False) self.create_member( user=self.user, organization=self.organization, role="member", teams=[self.team] ) self.login_as(self.user) event = self.store_event( data={ "event_id": "a" * 32, "transaction": "/example", "message": "how to make fast", "timestamp": iso_format(before_now(minutes
26
326
test_issue_search_condition
77
0
9
15
src/accelerate/commands/config/config_args.py
338,317
Add support for torch dynamo (#829) * Add torch dynamo optimizations * More work * Fix enum values * Add to basic config * fix more tests * Apply suggestions from code review Co-authored-by: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> Co-authored-by: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com>
accelerate
13
Python
48
config_args.py
def from_yaml_file(cls, yaml_file=None): yaml_file = default_yaml_config_file if yaml_file is None else yaml_file with open(yaml_file, "r", encoding="utf-8") as f: config_dict = yaml.safe_load(f) if "compute_environment" not in config_dict: config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE if "mixed_precision" not in config_dict: config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else "no" if "fp16" in config_dict: # Convert the config to the new format. del config_dict["fp16"] if "use_cpu" not in config_dict: config_dict["use_cpu"] = False if "dynamo_backend" not in config_dict: config_dict["dynamo_backend"] = DynamoBackend.NO return cls(**config_dict)
74642aac95a261148d32324688c1d6391775aded
121
https://github.com/huggingface/accelerate.git
199
def from_yaml_file(cls, yaml_file=None): yaml_file = default_yaml_config_file if yaml_file is None else yaml_file with open(yaml_file, "r", encoding="utf-8") as f: config_dict = yaml.safe_load(f) if "compute_environment" not in config_dict: config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE if "mixed_precision" not in config_dict: config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else "no" if "fp16" in config_dict: # Convert th
14
217
from_yaml_file
21
0
1
9
wagtail/images/views/chooser.py
78,707
Use ChosenResponseMixin for returning 'image chosen' responses
wagtail
10
Python
18
chooser.py
def get_chosen_response_data(self, image): response_data = super().get_chosen_response_data(image) preview_image = image.get_rendition("max-165x165") response_data["preview"] = { "url": preview_image.url, "width": preview_image.width, "height": preview_image.height, } return response_data
dd892a650827770318d21e5984ca5d984a510655
53
https://github.com/wagtail/wagtail.git
96
def get_chosen_response_data(self, image): response_data = super().get_chosen_response_data(image) preview_image = image.get_rendition("max-165x165") response_data["preview"] = { "url":
10
93
get_chosen_response_data
13
0
1
5
tests/renderables/test_underline_bar.py
182,064
Underline bar renderable
textual
10
Python
13
test_underline_bar.py
def test_highlight_out_of_bounds_end(): bar = UnderlineBar(highlight_range=(3, 9), width=6) assert render(bar) == ( f"{GREY}━━{STOP}{GREY}╸{STOP}{MAGENTA}━━━{STOP}" )
5651e97a64b850b80f42799e7f7d868f1f11ab7b
30
https://github.com/Textualize/textual.git
28
def test_highlight_out_of_bounds_end(): bar = UnderlineBar(highlight_range=(3, 9), wi
9
71
test_highlight_out_of_bounds_end
15
0
1
6
Tests/test_file_tga.py
242,971
When reading past the end of a scan line, reduce bytes left
Pillow
11
Python
11
test_file_tga.py
def test_cross_scan_line(): with Image.open("Tests/images/cross_scan_line.tga") as im: assert_image_equal_tofile(im, "Tests/images/cross_scan_line.png") with Image.open("Tests/images/cross_scan_line_truncated.tga") as im: with pytest.raises(OSError): im.load()
f0353c599676d694692174e32dc3acee2912b4a0
43
https://github.com/python-pillow/Pillow.git
45
def test_cross_scan_line(): with Image.open("Tests/images/cross_scan_line.tga") as im: assert_image_equal_tofile(im, "Tests/images/cross_scan_line.png") with Image.open("Tests/im
9
85
test_cross_scan_line
15
0
1
3
src/diffusers/models/resnet.py
335,658
save intermediate grad tts
diffusers
9
Python
13
resnet.py
def forward(self, x, mask): output = self.block(x * mask) return output * mask # unet_score_estimation.py
0926dc24180a8931de6081f6de7bc44c1366678c
23
https://github.com/huggingface/diffusers.git
27
def forward(self, x, mask): output = self.block(x * mask) return output * m
6
36
forward
13
0
1
3
tests/orion/models/test_agents.py
53,909
Add models layer for agents
prefect
10
Python
13
test_agents.py
async def test_read_agent(self, agents, session): read_agent = await models.agents.read_agents(session=session) assert len(read_agent) == len(agents)
bcee6a35d831f5dfd55d77c0ad3eee3ea7b41e57
32
https://github.com/PrefectHQ/prefect.git
26
async def test_read_agent(self, agents, session): read_agent = await models.agents.read_agents(session=session)
8
51
test_read_agent
75
1
1
16
tests/rest/client/test_login.py
246,605
Add type hints to `tests/rest/client` (#12066)
synapse
12
Python
60
test_login.py
def test_deactivated_user(self) -> None: redirect_url = "https://legit-site.com/" # First login (to create the user). self._test_redirect(redirect_url) # Deactivate the account. self.get_success( self.deactivate_account_handler.deactivate_account( self.user_id, False, create_requester(self.user_id) ) ) # Request the CAS ticket. cas_ticket_url = ( "/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket" % (urllib.parse.quote(redirect_url)) ) # Get Synapse to call the fake CAS and serve the template. channel = self.make_request("GET", cas_ticket_url) # Because the user is deactivated they are served an error template. self.assertEqual(channel.code, 403) self.assertIn(b"SSO account deactivated", channel.result["body"]) @skip_unless(HAS_JWT, "requires jwt")
64c73c6ac88a740ee480a0ad1f9afc8596bccfa4
@skip_unless(HAS_JWT, "requires jwt")
91
https://github.com/matrix-org/synapse.git
238
def test_deactivated_user(self) -> None: redirect_url = "https://legit-site.com/" # First login (to create the user). self._test_redirect(redirect_url) # Deactivate the account. self.get_success( self.deactivate_account_handler.deactivate_account( self.user_id, False, create_requester(self.user_id) ) ) # Request the CAS ticket. cas_ticket_url = ( "/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket" % (urllib.parse.quote(redirect_url)) ) # Get Synapse to call the fake CAS and serve the template. channel = self.make_request("GET", cas_ticket_url) # Because the user is deactivated they are served an error template. self.assertEqual(channel.code, 403) self.assertIn(b"SSO account
21
169
test_deactivated_user
87
0
4
17
src/transformers/models/flaubert/modeling_tf_flaubert.py
33,795
TF: tf.debugging assertions without tf.running_eagerly() protection (#19030)
transformers
15
Python
58
modeling_tf_flaubert.py
def get_masks(slen, lengths, causal, padding_mask=None): bs = shape_list(lengths)[0] if padding_mask is not None: mask = padding_mask else: # assert lengths.max().item() <= slen alen = tf.range(slen, dtype=lengths.dtype) mask = alen < tf.expand_dims(lengths, axis=1) # attention mask is the same as mask, or triangular inferior attention (causal) if causal: attn_mask = tf.less_equal( tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1)) ) else: attn_mask = mask # sanity check # assert shape_list(mask) == [bs, slen] tf.debugging.assert_equal(shape_list(mask), [bs, slen]) if causal: tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen]) return mask, attn_mask
31be02f14b1724c677bb2e32a5101c7cb6448556
162
https://github.com/huggingface/transformers.git
190
def get_masks(slen, lengths, causal, padding_mask=None): bs = shape_list(lengths)[0] if padding_mask is not None: mask = padding_mask else: # assert lengths.max().item() <= slen alen = tf.range(slen, dtype=lengths.dtype) mask = alen < tf.expand_dims(lengths, axis=1) # attention mask is the same as mask, or triangular inferior attention (causal) if causal: attn_mask = tf.less_equal( tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1)) ) else: at
20
243
get_masks
7
0
1
3
mitmproxy/tools/console/master.py
252,600
Replace blinker with custom implementation (#5528) * replace blinker with custom implementation The major benefit here is type checking, followed by proper support for async receivers. * fix compatibility with Python 3.9 * fix nits * try harder to force gc * try harderer * coverage++ * coverage++ * nits
mitmproxy
9
Python
7
master.py
def __setattr__(self, name, value): super().__setattr__(name, value) signals.update_settings.send()
f4dc2f2cfdb40e04022e4deb4aa67578deff5d23
26
https://github.com/mitmproxy/mitmproxy.git
20
def __setattr__(self, name, value): super().__setattr__(name, value) signals.update_set
8
41
__setattr__
25
0
1
9
tests/utilities/test_pydantic.py
56,766
Add support for literal "type" fields to pydantic utility
prefect
11
Python
24
test_pydantic.py
def test_both_type_field_and_dispatch_key_cannot_be_set(self): with pytest.raises( ValueError, match="Model class 'Base' defines a `__dispatch_key__` and a type field. Only one of these may be defined for dispatch", ):
f5cc99efbbe532e89b123c9afc9179ac36d3e2da
34
https://github.com/PrefectHQ/prefect.git
60
def test_both_type_field_and_dispatch_key_cannot_be_set(self): with pytest.raises( ValueError, match="Mod
6
32
test_both_type_field_and_dispatch_key_cannot_be_set
28
0
1
8
pandas/tests/arrays/test_datetimes.py
169,414
BUG: DatetimeArray-datetimelike mixed resos (#48894)
pandas
12
Python
21
test_datetimes.py
def test_sub_datetimelike_scalar_mismatch(self): dti = pd.date_range("2016-01-01", periods=3) dta = dti._data._as_unit("us") ts = dta[0]._as_unit("s") result = dta - ts expected = (dti - dti[0])._data._as_unit("us") assert result.dtype == "m8[us]" tm.assert_extension_array_equal(result, expected)
8c3c9e3bdc6e6870036428bd192c8fa92b93c295
74
https://github.com/pandas-dev/pandas.git
76
def test_sub_datetimelike_scalar_mismatch(self): dti = pd.date_range("2016-01-01", periods=3) dta = dti._data._as_unit("us") ts = dta[0]._as_unit("s") result = dta - ts
15
125
test_sub_datetimelike_scalar_mismatch
19
0
1
2
.venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/requirements.py
61,136
upd; format
transferlearning
7
Python
18
requirements.py
def project_name(self): # type: () -> NormalizedName # No need to canonicalise - the candidate did this return self.candidate.project_name
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
https://github.com/jindongwang/transferlearning.git
39
def project_name(self): # type: () -> NormalizedName # No nee
3
20
project_name
16
0
1
6
tests/snuba/api/endpoints/test_organization_events_v2.py
92,725
tests(discover): Improve stability of eventsv2 tests (#36641) Same motivation as #36619, this aims to improve the stability of the eventsv2 tests by moving the event timestamps further in the past.
sentry
12
Python
13
test_organization_events_v2.py
def setUp(self): super().setUp() self.ten_mins_ago = iso_format(before_now(minutes=10)) self.eleven_mins_ago = iso_format(before_now(minutes=11)) self.transaction_data = load_data("transaction", timestamp=before_now(minutes=10)) self.features = {}
ef5a739249de199b25d2cba7a2ee52820d9f34de
61
https://github.com/getsentry/sentry.git
50
def setUp(self): super().
12
102
setUp
147
0
1
53
netbox/dcim/tests/test_cablepaths.py
264,821
Update tests
netbox
14
Python
86
test_cablepaths.py
def test_206_unidirectional_split_paths(self): interface1 = Interface.objects.create(device=self.device, name='Interface 1') interface2 = Interface.objects.create(device=self.device, name='Interface 2') interface3 = Interface.objects.create(device=self.device, name='Interface 3') rearport1 = RearPort.objects.create(device=self.device, name='Rear Port 1', positions=4) frontport1_1 = FrontPort.objects.create( device=self.device, name='Front Port 1:1', rear_port=rearport1, rear_port_position=1 ) frontport1_2 = FrontPort.objects.create( device=self.device, name='Front Port 1:2', rear_port=rearport1, rear_port_position=2 ) # Create cables 1 cable1 = Cable(terminations=[ CableTermination(cable_end='A', termination=interface1), CableTermination(cable_end='B', termination=rearport1), ]) cable1.save() self.assertPathExists( (interface1, cable1, rearport1), is_complete=False, is_split=True ) self.assertEqual(CablePath.objects.count(), 1) # Create cables 2-3 cable2 = Cable(terminations=[ CableTermination(cable_end='A', termination=interface2), CableTermination(cable_end='B', termination=frontport1_1), ]) cable2.save() cable3 = Cable(terminations=[ CableTermination(cable_end='A', termination=interface3), CableTermination(cable_end='B', termination=frontport1_2), ]) cable3.save() self.assertPathExists( (interface2, cable2, frontport1_1, rearport1, cable1, interface1), is_complete=True, is_active=True ) self.assertPathExists( (interface3, cable3, frontport1_2, rearport1, cable1, interface1), is_complete=True, is_active=True ) self.assertEqual(CablePath.objects.count(), 3) # Delete cable 1 cable1.delete() # Check that the partial path was deleted and the two complete paths are now partial self.assertPathExists( (interface2, cable2, frontport1_1, rearport1), is_complete=False ) self.assertPathExists( (interface3, cable3, frontport1_2, rearport1), is_complete=False ) self.assertEqual(CablePath.objects.count(), 2)
304282bd4f20aa80b4826b47777b87972ac11832
397
https://github.com/netbox-community/netbox.git
630
def test_206_unidirectional_split_paths(self): interface1 = Interface.objects.create(device=self.device, name='Interface 1') interface2 = Interface.objects.create(device=self.device, name='Interface 2') interface3 = Interface.objects.create(device=self.device, name='Interface 3') rearport1 = RearPort.objects.create(device=self.device, name='Rear Port 1', positions=4) frontport1_1 = FrontPort.objects.create( device=self.device, name='Front Port 1:1', rear_port=rearport1, rear_port_position=1 ) frontport1_2 = FrontPort.objects.create( device=self.device, name='Front Port 1:2', rear_port=rearport1, rear_port_position=2 ) # Create cables 1 cable1 = Cable(terminations=[ CableTermination(cable_end='A', termination=interface1), CableTermination(cable_end='B', termination=rearport1), ]) cable1.save() self.assertPathExists( (interface1, cable1, rearport1), is_complete=False, is_split=True ) self.assertEqual(CablePath.objects.count(), 1) # Create cables 2-3 cable2 = Cable(terminations=[ CableTermi
35
606
test_206_unidirectional_split_paths
22
0
3
6
lib/matplotlib/pyplot.py
109,811
Generalize validation that pyplot commands are documented Until now, the test made some exclusions (_NON_PLOT_COMMANDS) and reqired all functions to be documented in a single autosummary block. This change ensures the documentation of the _NON_PLOT_COMMANDS and it allows the commands to be spread across arbitrary many autosummary sections. This is in preparation of regrouping the pyplot commands similar to the Axes documentation. This also pending deprecates `pyplot.get_plot_commands`, which should not be a public function. I'm defensive by using pending, because if `get_plot_commands` is used somewhere, that's most likely some downstream lib and we want to give them time to adapt. Co-authored-by: hannah <story645@gmail.com>
matplotlib
9
Python
20
pyplot.py
def get_plot_commands(): NON_PLOT_COMMANDS = { 'connect', 'disconnect', 'get_current_fig_manager', 'ginput', 'new_figure_manager', 'waitforbuttonpress'} return (name for name in _get_pyplot_commands() if name not in NON_PLOT_COMMANDS)
352bb1fb5f30bfdda8c0240b463afef952944efd
35
https://github.com/matplotlib/matplotlib.git
56
def get_plot_commands(): NON_
4
64
get_plot_commands
68
0
3
18
pandas/tests/frame/test_reductions.py
171,100
DEPR: Enforce deprecation of numeric_only=None in DataFrame aggregations (#49551) * WIP * DEPR: Enforce deprecation of numeric_only=None in DataFrame aggregations * Partial reverts * numeric_only in generic/series, fixup * cleanup * Remove docs warning * fixups * Fixups
pandas
14
Python
40
test_reductions.py
def test_any_all_np_func(self, func, data, expected): # GH 19976 data = DataFrame(data) if any(is_categorical_dtype(x) for x in data.dtypes): with pytest.raises( TypeError, match="dtype category does not support reduction" ): func(data) # method version with pytest.raises( TypeError, match="dtype category does not support reduction" ): getattr(DataFrame(data), func.__name__)(axis=None) else: result = func(data) assert isinstance(result, np.bool_) assert result.item() is expected # method version result = getattr(DataFrame(data), func.__name__)(axis=None) assert isinstance(result, np.bool_) assert result.item() is expected
b7ea7c6dfd100c40b0bc45aacf6d92c5c22f2e63
136
https://github.com/pandas-dev/pandas.git
287
def test_any_all_np_func(self, func, data, expected): # GH 19976 data = DataFrame(data) if any(is_categorical_dtype(x) for x in data.dtypes): with pytest.raises(
22
221
test_any_all_np_func
34
0
3
6
tests/test_segment_tools.py
186,287
Add test for line crop issue
textual
12
Python
22
test_segment_tools.py
def test_line_crop_highlight_reverse_bug(): segments_joined = [Segment('a1あ11bcdaef123a1a')] segments_split = [Segment('a1あ11bcdaef'), Segment('1'), Segment('23a1a')] joined1 = "".join(seg.text for seg in line_crop(segments_split, start=9, end=16, total=23)) joined2 = "".join(seg.text for seg in line_crop(segments_joined, start=9, end=16, total=23)) assert joined1 == joined2
d3c91075c658d1d366824c862f05449ad3f5016d
93
https://github.com/Textualize/textual.git
52
def test_line_crop_highlight_reverse_bug(): segments_joined = [Segment('a1あ11bcdaef123a1a')] segments_split = [Segment('a1あ11bcdaef'), Segment('1'), Segment('23a1a')] joined1 = "".join(seg.text for seg in line_crop(segments_split, start=9, end=16, total=23)) joined2 = "".join(seg.text for seg in line_crop(segments_joined, start=9, end=16, total=23)) assert joined1 == joined2
13
152
test_line_crop_highlight_reverse_bug
148
0
1
78
zerver/tests/test_message_send.py
84,320
realm: Removed WILDCARD_MENTION_POLICY_STREAM_ADMINS option. This commit removes WILDCARD_MENTION_POLICY_STREAM_ADMINS option of wildcard_mention_policy since we are not moving forward with stream administrator concept and instead working on new permssions model as per #19525. We also add a migration to change wildcard_mention_policy of existing realms to WILDCARD_MENTION_POLICY_ADMINS. This change is fine since we were already treating both the setting values as same as stream admin concept was not implemented completely.
zulip
10
Python
77
test_message_send.py
def test_wildcard_mention_restrictions(self) -> None: cordelia = self.example_user("cordelia") iago = self.example_user("iago") polonius = self.example_user("polonius") shiva = self.example_user("shiva") realm = cordelia.realm stream_name = "test_stream" self.subscribe(cordelia, stream_name) self.subscribe(iago, stream_name) self.subscribe(polonius, stream_name) self.subscribe(shiva, stream_name) do_set_realm_property( realm, "wildcard_mention_policy", Realm.WILDCARD_MENTION_POLICY_EVERYONE, acting_user=None, ) self.send_and_verify_wildcard_mention_message("polonius") do_set_realm_property( realm, "wildcard_mention_policy", Realm.WILDCARD_MENTION_POLICY_MEMBERS, acting_user=None, ) self.send_and_verify_wildcard_mention_message("polonius", test_fails=True) # There is no restriction on small streams. self.send_and_verify_wildcard_mention_message("polonius", sub_count=10) self.send_and_verify_wildcard_mention_message("cordelia") do_set_realm_property( realm, "wildcard_mention_policy", Realm.WILDCARD_MENTION_POLICY_FULL_MEMBERS, acting_user=None, ) do_set_realm_property(realm, "waiting_period_threshold", 10, acting_user=None) iago.date_joined = timezone_now() iago.save() shiva.date_joined = timezone_now() shiva.save() cordelia.date_joined = timezone_now() cordelia.save() self.send_and_verify_wildcard_mention_message("cordelia", test_fails=True) self.send_and_verify_wildcard_mention_message("cordelia", sub_count=10) # Administrators and moderators can use wildcard mentions even if they are new. self.send_and_verify_wildcard_mention_message("iago") self.send_and_verify_wildcard_mention_message("shiva") cordelia.date_joined = timezone_now() - datetime.timedelta(days=11) cordelia.save() self.send_and_verify_wildcard_mention_message("cordelia") do_set_realm_property( realm, "wildcard_mention_policy", Realm.WILDCARD_MENTION_POLICY_MODERATORS, acting_user=None, ) self.send_and_verify_wildcard_mention_message("cordelia", test_fails=True) self.send_and_verify_wildcard_mention_message("cordelia", sub_count=10) self.send_and_verify_wildcard_mention_message("shiva") cordelia.date_joined = timezone_now() cordelia.save() do_set_realm_property( realm, "wildcard_mention_policy", Realm.WILDCARD_MENTION_POLICY_ADMINS, acting_user=None ) self.send_and_verify_wildcard_mention_message("shiva", test_fails=True) # There is no restriction on small streams. self.send_and_verify_wildcard_mention_message("shiva", sub_count=10) self.send_and_verify_wildcard_mention_message("iago") do_set_realm_property( realm, "wildcard_mention_policy", Realm.WILDCARD_MENTION_POLICY_NOBODY, acting_user=None ) self.send_and_verify_wildcard_mention_message("iago", test_fails=True) self.send_and_verify_wildcard_mention_message("iago", sub_count=10)
83383090f9461b81bf718afc449bc0b2196db0cd
431
https://github.com/zulip/zulip.git
716
def test_wildcard_mention_restrictions(self) -> None: cordelia = self.example_user("cordelia") iago = self.example_user("iago") polonius = self.example_user("polonius") shiva = self.example_user("shiva") realm = cordelia.realm stream_name = "test_stream" self.subscribe(cordelia, stream_name) self.subscribe(iago, stream_name) self.subscribe(polonius, stream_name) self.subscribe(shiva, stream_name) do_set_realm_property( realm, "wildcard_mention_policy", Realm.WILDCARD_MENTION_POLICY_EVERYONE, acting_user=None, ) self.send_and_verify_wildcard_mention_message("polonius") do_set_realm_property( realm, "wildcard_mention_policy", Realm.WILDCARD_MENTION_POLICY_MEMBERS, acting_user=None, ) self.send_and_verify_wildcard_mention_message("polonius", test_fails=True) # There is no restrict
28
659
test_wildcard_mention_restrictions
46
0
3
8
jax/experimental/sparse/bcoo.py
122,348
[sparse] Make BCSR vmappable. PiperOrigin-RevId: 481257762
jax
13
Python
40
bcoo.py
def _bcoo_to_elt(cont, _, val, axis): if axis is None: return val if axis >= val.n_batch: raise ValueError(f"Cannot map in_axis={axis} for BCOO array with n_batch={val.n_batch}. " "in_axes for batched BCOO operations must correspond to a batch dimension.") return BCOO((cont(val.data, axis), cont(val.indices, axis)), shape=val.shape[:axis] + val.shape[axis + 1:])
69525cd96dc3a55258aeabcd6624ddf909595198
75
https://github.com/google/jax.git
87
def _bcoo_to_elt(cont, _, val, axis): if axis is None: return val if axis >= val.n_batch: raise ValueError(f"Cannot map in_axis={axis} for BCOO array with n_batch={
11
124
_bcoo_to_elt
9
0
1
3
tests/nn_tests.py
181,650
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
tpot
9
Python
9
nn_tests.py
def test_nn_conf_dict(): clf = TPOTClassifier(config_dict=classifier_config_nn) assert clf.config_dict == classifier_config_nn
388616b6247ca4ea8de4e2f340d6206aee523541
19
https://github.com/EpistasisLab/tpot.git
18
def test_nn_conf_dict(): clf = TPOTClassifier(config_dict=classifier_config_nn) assert clf.config
5
34
test_nn_conf_dict
43
0
1
9
dashboard/modules/serve/tests/test_schema.py
145,458
[serve] Expose deployment statuses in REST API (#22611)
ray
11
Python
38
test_schema.py
def test_valid_serve_application_status_schema(self): # Ensure a valid ServeApplicationStatusSchema can be generated serve_application_status_schema = { "deployment_1": {"status": "HEALTHY", "message": ""}, "deployment_2": { "status": "UNHEALTHY", "message": "this deployment is deeply unhealthy", }, } serve_application_status_to_schema(serve_application_status_schema) # This function is defined globally to be accessible via import path
e85540a1a2fb2b5a121dfe54f45342a7046bc3d7
38
https://github.com/ray-project/ray.git
132
def test_valid_serve_application_status_schema(self): # Ensure a valid ServeApplicationStatusSchema can be generated serve_application_status_schema = { "deployment_1": {"status": "HEALTHY", "message": ""}, "deployment_2": { "status": "UNHEALTHY",
4
78
test_valid_serve_application_status_schema
22
0
1
7
python3.10.4/Lib/configparser.py
221,672
add python 3.10.4 for windows
XX-Net
9
Python
22
configparser.py
def readfp(self, fp, filename=None): warnings.warn( "This method will be removed in Python 3.12. " "Use 'parser.read_file()' instead.", DeprecationWarning, stacklevel=2 ) self.read_file(fp, source=filename)
8198943edd73a363c266633e1aa5b2a9e9c9f526
35
https://github.com/XX-net/XX-Net.git
83
def readfp(self, fp, filename=None): warnings.warn( "This method will be removed in Python 3.12. " "Use 'parser.read_file()' instead.", DeprecationWarning, stacklevel=2 ) self.read_file(fp, source=filename)
10
58
readfp
127
0
13
26
body/human_pose/ambiguity_aware/lib/utils/misc.py
8,959
update
insightface
21
Python
64
misc.py
def process_dataset_for_video(path, is_mpi=False): # add some content for specified dataset(h5) f = h5py.File(path, "a") imagenames = [name.decode() for name in f['imagename'][:]] seqnames = ['/'.join(name.split('/')[:-1]) for name in imagenames] if is_mpi: indices_in_seq_ref = [int(name.split('/')[-1].split('.')[0].split('_')[1]) for name in imagenames] # reset indices indices_in_seq = [] i = 0 last_seqname = None for index, seqname in zip(indices_in_seq_ref, seqnames): if last_seqname is not None and seqname != last_seqname: i = 0 last_seqname = seqname indices_in_seq.append(i) i += 1 # indices_in_seq = [i for i, index in enumerate(indices_in_seq)] else: indices_in_seq = [int(name.split('/')[-1]) for name in imagenames] f['index_in_seq'] = indices_in_seq f['seqname'] = [name.encode() for name in seqnames] seq_lens = {} for seqname in seqnames: if seqname not in seq_lens: seq_lens[seqname] = 0 seq_lens[seqname] += 1 f['seqlen'] = [seq_lens[seqname] for seqname in seqnames] f.close()
4b3c8211b3e3eca5f9fdf6553bbd45c9c7587b0d
241
https://github.com/deepinsight/insightface.git
313
def process_dataset_for_video(path, is_mpi=False): # add some content for specified dataset(h5) f = h5py.File(path, "a") imagenames = [name.decode() for name in f['imagename'][:]] seqnames = ['/'.join(name.split('/')[:-1]) for name in imagenames] if is_mpi: indices_in_seq_ref = [int(name.split('/')[-1].split('.')[0].split('_')[1]) for name in imagenames] # reset indices indices_in_seq = [] i = 0 last_seqname = None for index, seqname in zip(indices_in_seq_ref, seqnames): if last_seqname is not None and seqname != last_seqname: i = 0 last_seqname = seqname indices_in_seq.append(i) i += 1 # indices_in_seq = [i for i, index in enumerate(indices_in_seq)] else: indices_in_seq = [int(name.split('/')[-1]) for name in imagenames] f['index_in_seq'] = indi
24
395
process_dataset_for_video
12
0
1
3
tests/orion/api/test_flows.py
54,948
Use status constants instead of hardcoded values Closes: PrefectHQ/orion#1673
prefect
13
Python
12
test_flows.py
async def test_read_flow_by_name_returns_404_if_does_not_exist(self, client): response = await client.get(f"/flows/{uuid4()}") assert response.status_code == status.HTTP_404_NOT_FOUND
37549d157007f6eef07ed8b1e2e14efb73134840
25
https://github.com/PrefectHQ/prefect.git
25
async def test_read_flow_by_name_returns_404_if_does_not_exist(self, client): response = await client.get(f"/flows/{uuid4()}") assert response.status_code == status.HTTP_404_NOT_FOUND
9
49
test_read_flow_by_name_returns_404_if_does_not_exist
534
1
46
117
dask/array/einsumfuncs.py
156,932
Removed unused loop control variables (`B007`) (#9458) Co-authored-by: James Bourbeau <jrbourbeau@gmail.com>
dask
19
Python
224
einsumfuncs.py
def parse_einsum_input(operands): if len(operands) == 0: raise ValueError("No input operands") if isinstance(operands[0], basestring): subscripts = operands[0].replace(" ", "") operands = [asarray(o) for o in operands[1:]] # Ensure all characters are valid for s in subscripts: if s in ".,->": continue if s not in einsum_symbols_set: raise ValueError("Character %s is not a valid symbol." % s) else: tmp_operands = list(operands) operand_list = [] subscript_list = [] for _ in range(len(operands) // 2): operand_list.append(tmp_operands.pop(0)) subscript_list.append(tmp_operands.pop(0)) output_list = tmp_operands[-1] if len(tmp_operands) else None operands = [asarray(v) for v in operand_list] subscripts = "" last = len(subscript_list) - 1 for num, sub in enumerate(subscript_list): for s in sub: if s is Ellipsis: subscripts += "..." elif isinstance(s, int): subscripts += einsum_symbols[s] else: raise TypeError( "For this input type lists must contain " "either int or Ellipsis" ) if num != last: subscripts += "," if output_list is not None: subscripts += "->" for s in output_list: if s is Ellipsis: subscripts += "..." elif isinstance(s, int): subscripts += einsum_symbols[s] else: raise TypeError( "For this input type lists must contain " "either int or Ellipsis" ) # Check for proper "->" if ("-" in subscripts) or (">" in subscripts): invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) if invalid or (subscripts.count("->") != 1): raise ValueError("Subscripts can only contain one '->'.") # Parse ellipses if "." in subscripts: used = subscripts.replace(".", "").replace(",", "").replace("->", "") unused = list(einsum_symbols_set - set(used)) ellipse_inds = "".join(unused) longest = 0 if "->" in subscripts: input_tmp, output_sub = subscripts.split("->") split_subscripts = input_tmp.split(",") out_sub = True else: split_subscripts = subscripts.split(",") out_sub = False for num, sub in enumerate(split_subscripts): if "." in sub: if (sub.count(".") != 3) or (sub.count("...") != 1): raise ValueError("Invalid Ellipses.") # Take into account numerical values if operands[num].shape == (): ellipse_count = 0 else: ellipse_count = max(operands[num].ndim, 1) ellipse_count -= len(sub) - 3 if ellipse_count > longest: longest = ellipse_count if ellipse_count < 0: raise ValueError("Ellipses lengths do not match.") elif ellipse_count == 0: split_subscripts[num] = sub.replace("...", "") else: rep_inds = ellipse_inds[-ellipse_count:] split_subscripts[num] = sub.replace("...", rep_inds) subscripts = ",".join(split_subscripts) if longest == 0: out_ellipse = "" else: out_ellipse = ellipse_inds[-longest:] if out_sub: subscripts += "->" + output_sub.replace("...", out_ellipse) else: # Special care for outputless ellipses output_subscript = "" tmp_subscripts = subscripts.replace(",", "") for s in sorted(set(tmp_subscripts)): if s not in einsum_symbols_set: raise ValueError("Character %s is not a valid symbol." % s) if tmp_subscripts.count(s) == 1: output_subscript += s normal_inds = "".join(sorted(set(output_subscript) - set(out_ellipse))) subscripts += "->" + out_ellipse + normal_inds # Build output string if does not exist if "->" in subscripts: input_subscripts, output_subscript = subscripts.split("->") else: input_subscripts = subscripts # Build output subscripts tmp_subscripts = subscripts.replace(",", "") output_subscript = "" for s in sorted(set(tmp_subscripts)): if s not in einsum_symbols_set: raise ValueError("Character %s is not a valid symbol." % s) if tmp_subscripts.count(s) == 1: output_subscript += s # Make sure output subscripts are in the input for char in output_subscript: if char not in input_subscripts: raise ValueError("Output character %s did not appear in the input" % char) # Make sure number operands is equivalent to the number of terms if len(input_subscripts.split(",")) != len(operands): raise ValueError( "Number of einsum subscripts must be equal to the number of operands." ) return (input_subscripts, output_subscript, operands) @derived_from(np)
b016998fa931f644df4d266a3ed5e7604c20d2a9
@derived_from(np)
785
https://github.com/dask/dask.git
1,923
def parse_einsum_input(operands): if len(operands) == 0: raise ValueError("No input operands") if isinstance(operands[0], basestring): subscripts = operands[0].replace(" ", "") operands = [asarray(o) for o in operands[1:]] # Ensure all characters are valid for s in subscripts: if s in ".,->": continue if s not in einsum_symbols_set: raise ValueError("Character %s is not a valid symbol." % s) else: tmp_operands = list(operands) operand_list = [] subscript_list = [] for _ in range(len(operands) // 2): operand_list.append(tmp_operands.pop(0)) subscript_list.append(tmp_operands.pop(0)) output_list = tmp_operands[-1] if len(tmp_operands) else None operands = [asarray(v) for v in operand_list] subscripts = "" last = len(subscript_list) - 1 for num, sub in enumerate(subscript_list): for s in sub: if s is Ellipsis: subscripts += "..." elif isinstance(s, int): subscripts += einsum_symbols[s] else: raise TypeError( "For this input type lists must contain " "either int or Ellipsis" ) if num != last: subscripts += "," if output_list is not None: subscripts += "->" for s in output_list: if s is Ellipsis: subscripts += "..." elif isinstance(s, int): subscripts += einsum_symbols[s] else: raise TypeError( "For this input type lists must contain " "either int or Ellipsis" ) # Check for proper "->" if ("-" in subscripts) or (">" in subscripts): invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) if invalid or (subscripts.count("->") != 1): raise ValueError("Subscripts can only contain one '->'.") # Parse ellipses if "." in subscripts: used = subscripts.replace(".", "").replace(",", "").replace("->", "") unused = list(einsum_symbols_set - set(used)) ellipse_inds = "".join(unused) longest = 0 if "->" in subscripts: input_tmp, output_sub = subscripts.split("->") split_subscripts = input_tmp.split(",") out_sub = True else: split_subscripts = subscripts.split(",") out_sub = False for num, sub in enumerate(split_subscripts): if "." in sub: if (sub.count(".") != 3) or (sub.count("...") != 1): raise ValueError("Invalid Ellipses.") # Take into account numerical values if operands[num].shape == (): ellipse_count = 0 else: ellipse_count = max(operands[num].ndim, 1) ellipse_count -= len(sub) - 3 if ellipse_count > longest: longest = ellipse_count if ellipse_count < 0: raise ValueError("Ellipses lengths do not match.") elif ellipse_count == 0: split_subscripts[num] = sub.replace("...", "") else: rep_inds = ellipse_inds[-ellipse_count:] split_subscripts[num] = sub.replace("...", rep_inds) subscripts = ",".join(split_subscripts) if longest == 0: out_ellipse = "" else: out_ellipse = ellipse_inds[-longest:] if out_sub: subscripts += "->" + output_sub.replace("...", out_ellipse) else: # Special care for outputless ellipses output_subscript = "" tmp_subscripts = subscripts.replace(",", "") for s in sorted(set(tmp_subscripts)): if s not in einsum_symbols_set: raise ValueError("Character %s is not a valid symbol." % s) if tmp_subscripts.count(s) == 1:
57
1,377
parse_einsum_input
21
0
1
4
python/ray/serve/tests/test_application.py
147,078
[serve] Implement `serve.run()` and `Application` (#23157) These changes expose `Application` as a public API. They also introduce a new public method, `serve.run()`, which allows users to deploy their `Applications` or `DeploymentNodes`. Additionally, the Serve CLI's `run` command and Serve's REST API are updated to use `Applications` and `serve.run()`. Co-authored-by: Edward Oakes <ed.nmi.oakes@gmail.com>
ray
8
Python
18
test_application.py
def test_basic_run(self, serve_instance): deployments = [self.f, self.g, self.C, self.D] responses = ["f reached", "g reached", "C reached", "D reached"] self.deploy_and_check_responses(deployments, responses)
aaf47b2493beb985bfbc52dbdf1f52fc48377d74
46
https://github.com/ray-project/ray.git
49
def test_basic_run(self, serve_instance): deployments = [self.f, self.g, self.C, self.D] responses = ["f reached", "g reached", "C reached", "D reached"]
10
75
test_basic_run
24
0
1
8
keras/saving/experimental/saving_lib_test.py
280,445
Keras Model saving - Use GFile handle for python zipfile when loading and saving model. PiperOrigin-RevId: 486753122
keras
12
Python
19
saving_lib_test.py
def test_load_model_api_endpoint(self): temp_filepath = Path(os.path.join(self.get_temp_dir(), "mymodel.keras")) model = self._get_functional_model() ref_input = np.random.random((10, 32)) ref_output = model.predict(ref_input) model.save(temp_filepath, save_format="keras_v3") model = keras.models.load_model(temp_filepath) self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)
0d37837d448a3b9202d5c4c9928ef40940578719
92
https://github.com/keras-team/keras.git
72
def test_load_model_api_endpoint(self): temp_filepath = Path(os.path.join(self.get_temp_dir(), "mymodel.keras")) model = self._get_functiona
22
145
test_load_model_api_endpoint
141
0
6
40
homeassistant/components/minecraft_server/__init__.py
294,148
Add World Message/MOTD support for MinecraftServer Integration (#66297)
core
14
Python
79
__init__.py
async def _async_status_request(self) -> None: try: status_response = await self._hass.async_add_executor_job( self._mc_status.status, self._MAX_RETRIES_STATUS ) # Got answer to request, update properties. self.version = status_response.version.name self.protocol_version = status_response.version.protocol self.players_online = status_response.players.online self.players_max = status_response.players.max self.latency_time = status_response.latency self.motd = (status_response.description).get("text") self.players_list = [] if status_response.players.sample is not None: for player in status_response.players.sample: self.players_list.append(player.name) self.players_list.sort() # Inform user once about successful update if necessary. if self._last_status_request_failed: _LOGGER.info( "Updating the properties of '%s:%s' succeeded again", self.host, self.port, ) self._last_status_request_failed = False except OSError as error: # No answer to request, set all properties to unknown. self.version = None self.protocol_version = None self.players_online = None self.players_max = None self.latency_time = None self.players_list = None self.motd = None # Inform user once about failed update if necessary. if not self._last_status_request_failed: _LOGGER.warning( "Updating the properties of '%s:%s' failed - OSError: %s", self.host, self.port, error, ) self._last_status_request_failed = True
43772b3fa9db00d146292854ee3b52392a29dd37
221
https://github.com/home-assistant/core.git
694
async def _async_status_request(self) -> None: try: status_response = await self._hass.async_add_executor_job( self._mc_status.status, self._MAX_RETRIES_STATUS ) # Got answer to request, update properties. self.version = status_response.version.name self.protocol_version = status_response.version.protocol self.players_online = status_response.players.online self.players_max = status_response.players.max self.latency_time = status_response.latency self.motd = (status_response.description).get("text") self.players_list = [] if status_response.players.sample is not None: for player in status_response.players.sample: self.players_list.append(player.name) self.players_list.sort() # Inform user once about successful update if necessary. if self._last_status_request_failed: _LOGGER.info( "Updating the properties of '%s:%s' succeeded again", self.host, self.port, ) self._last_status_request_failed = False except OSError as error: # No answer to request, set all properties to unknown. self.version = None self.protocol_version = None self.players_online = None self.players_max = None self.latency_time = None self.players_list = None self.motd = None # Inform user once about failed update if necessary. if not self._last_status_request_failed: _LOGGER.warning( "Updating the properties of '%s:%s' failed - OSError: %s", self.host, self.port, error, )
35
358
_async_status_request
30
1
1
10
tests/admin_views/tests.py
207,846
Refs #33476 -- Reformatted code with Black.
django
11
Python
28
tests.py
def test_redirect_on_add_view_continue_button(self): response = self.client.post( reverse("admin:admin_views_modelwithstringprimarykey_add"), { "string_pk": "123/history", "_continue": "1", # Save and continue editing }, ) self.assertEqual(response.status_code, 302) # temporary redirect self.assertIn("/123_2Fhistory/", response.headers["location"]) # PK is quoted @override_settings(ROOT_URLCONF="admin_views.urls")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@override_settings(ROOT_URLCONF="admin_views.urls")
54
https://github.com/django/django.git
130
def test_redirect_on_add_view_continue_button(self): response = self.client.post( reverse("admin:admin_views_modelwithstringprimarykey_add"), { "string_pk": "123/history", "_continue": "1", # Save and continue editing }, ) self.assertEqual(response.status_code, 302) # temporary redirect self.assertIn("/123_2Fhistory/", response.headers["locatio
12
114
test_redirect_on_add_view_continue_button
16
0
2
4
albumentations/augmentations/transforms.py
225,721
Move common functions to utils.py (#1260) * Move common functions into util.py * Fix mypy errors
albumentations
10
Python
16
transforms.py
def apply(self, image, **params): if not is_rgb_image(image): raise TypeError("ToSepia transformation expects 3-channel images.") return F.linear_transformation_rgb(image, self.sepia_transformation_matrix)
c3cb70a39473ed8a8601758f0cd3e67c6e1d076c
33
https://github.com/albumentations-team/albumentations.git
40
def apply(self, image, **params): if not is_rgb_image(image): raise TypeError("ToSepia t
9
53
apply
36
0
2
24
rllib/agents/dqn/tests/test_apex_dqn.py
140,038
[RLlib] APEX-DQN and R2D2 config objects. (#25067)
ray
17
Python
31
test_apex_dqn.py
def test_apex_zero_workers(self): config = ( apex.ApexConfig() .rollouts(num_rollout_workers=0) .resources(num_gpus=0) .training( replay_buffer_config={ "learning_starts": 1000, }, optimizer={ "num_replay_buffer_shards": 1, }, ) .reporting( min_sample_timesteps_per_reporting=100, min_time_s_per_reporting=1, ) ) for _ in framework_iterator(config): trainer = config.build(env="CartPole-v0") results = trainer.train() check_train_results(results) print(results) trainer.stop()
ec89fe52033e8087d0ca4e5bb9331863d0bb3a52
100
https://github.com/ray-project/ray.git
316
def test_apex_zero_workers(self): config = ( apex.ApexConfig() .rollouts(num_rollout_workers=0) .resources(num_gpus=0) .training( replay_buffer_config={ "learning_starts": 1000, }, optimizer={ "num_replay_buffer_shards": 1, }, ) .reporting( min_sample_timesteps_per_reporting=100, min_time_s_per_reporting=1, )
25
163
test_apex_zero_workers
44
0
3
17
homeassistant/components/onkyo/media_player.py
291,473
Use _attr in onkyo media player (#82832)
core
12
Python
33
media_player.py
def _parse_audio_information(self, audio_information_raw): values = _parse_onkyo_payload(audio_information_raw) if values is False: self._audio_info_supported = False return if values: info = { "format": _tuple_get(values, 1), "input_frequency": _tuple_get(values, 2), "input_channels": _tuple_get(values, 3), "listening_mode": _tuple_get(values, 4), "output_channels": _tuple_get(values, 5), "output_frequency": _tuple_get(values, 6), } self._attr_extra_state_attributes[ATTR_AUDIO_INFORMATION] = info else: self._attr_extra_state_attributes.pop(ATTR_AUDIO_INFORMATION, None)
19abba7f6ba24fe746889e33c5364702a62946bf
105
https://github.com/home-assistant/core.git
227
def _parse_audio_information(self, audio_information_raw): values = _parse_onkyo_payload(audio_information_raw) if values is False: self._audio_info_supported = False return if values: info = { "format": _tuple_get(values, 1), "input_frequency": _tuple_get(values, 2), "i
11
166
_parse_audio_information
85
0
1
38
tests/jobs/test_scheduler_job.py
47,808
Fix TI failure handling when task cannot be unmapped. (#23119) At first glance this looks like a lot of un-related changed, but it is all related to handling errors in unmapping: - Ensure that SimpleTaskInstance (and thus the Zombie callback) knows about map_index, and simplify the code for SimpleTaskInstance -- no need for properties, just attributes works. - Be able to create a TaskFail from a TI, not a Task. This is so that we can create the TaskFail with the mapped task so we can delay unmapping the task in TI.handle_failure as long as possible. - Change email_alert and get_email_subject_content to take the task so we can pass the unmapped Task around.
airflow
11
Python
57
test_scheduler_job.py
def test_process_executor_events(self, mock_stats_incr, mock_task_callback, dag_maker): dag_id = "test_process_executor_events" task_id_1 = 'dummy_task' session = settings.Session() with dag_maker(dag_id=dag_id, fileloc='/test_path1/'): task1 = EmptyOperator(task_id=task_id_1) ti1 = dag_maker.create_dagrun().get_task_instance(task1.task_id) mock_stats_incr.reset_mock() executor = MockExecutor(do_update=False) task_callback = mock.MagicMock() mock_task_callback.return_value = task_callback self.scheduler_job = SchedulerJob(executor=executor) self.scheduler_job.processor_agent = mock.MagicMock() ti1.state = State.QUEUED session.merge(ti1) session.commit() executor.event_buffer[ti1.key] = State.FAILED, None self.scheduler_job._process_executor_events(session=session) ti1.refresh_from_db(session=session) assert ti1.state == State.FAILED self.scheduler_job.executor.callback_sink.send.assert_not_called() self.scheduler_job.processor_agent.reset_mock() # ti in success state ti1.state = State.SUCCESS session.merge(ti1) session.commit() executor.event_buffer[ti1.key] = State.SUCCESS, None self.scheduler_job._process_executor_events(session=session) ti1.refresh_from_db(session=session) assert ti1.state == State.SUCCESS self.scheduler_job.executor.callback_sink.send.assert_not_called() mock_stats_incr.assert_has_calls( [ mock.call('scheduler.tasks.killed_externally'), mock.call('operator_failures_EmptyOperator'), mock.call('ti_failures'), ], any_order=True, )
91b82763c5c17e8ab021f2d4f2a5681ea90adf6b
288
https://github.com/apache/airflow.git
390
def test_process_executor_events(self, mock_stats_incr, mock_task_callback, dag_maker): dag_id = "test_process_executor_events" task_id_1 = 'dummy_task' session = settings.Session() with dag_maker(dag_id=dag_id, fileloc='/test_path1/'): task1 = EmptyOperator(task_id=task_id_1) ti1 = dag_maker.create_dagrun().get_task_instance(task1.task_id) mock_stats_incr.reset_mock() executor = MockExecutor(do_update=False) task_callback = mock.MagicMock() mock_task_callback.return_value = task_callback self.scheduler_job = SchedulerJob(executor=executor) self.scheduler_job.processor_agent = mock.MagicMock() ti1.state = State.QUEUED session.merge(ti1) session.commit() executor.event_buffer[ti1.key] = State.FAILED, None self.scheduler_job._process_executor_events(session=session) ti1.refresh_from_db(session=session) assert ti1.state == State.FAILED self.scheduler_job.executor.callback_sink.send.assert_not_called() self.scheduler_job.processor_agent.reset_mock() # ti in success state ti1.state = State.SUCCESS session.merge(ti1) session.commit() executor.event_
45
471
test_process_executor_events
101
0
1
47
tests/snuba/api/endpoints/test_organization_events_mep.py
94,529
feat(mep): For the transaction column treat unparam/null the same (#37678) * feat(mep): For the transaction column treat unparam/null the same - This causes the query builder to transform the transaction tag so that the values `<< unparameterized >>` and null (or empty value) both become `<< unparameterized >>` so that null never shows up - This causes `!has:transaction` to raise a 400, since there won't ever be a result - TODO: probably should move this logic to transactions later - This causes `has:transaction` to be ignored since "all transactions" will have a transaction name now * fix: Check type before accessing properties * ref: Move txn specific stuff out of the builder
sentry
11
Python
56
test_organization_events_mep.py
def test_has_transaction(self): self.store_transaction_metric( 1, tags={}, timestamp=self.min_ago, ) self.store_transaction_metric( 100, tags={"transaction": "foo_transaction"}, timestamp=self.min_ago, ) query = { "project": [self.project.id], "orderby": "p50(transaction.duration)", "field": [ "transaction", "p50(transaction.duration)", ], "query": "has:transaction", "statsPeriod": "24h", "dataset": "metricsEnhanced", "per_page": 50, } response = self.do_request(query) assert response.status_code == 200, response.content assert len(response.data["data"]) == 2 data = response.data["data"] meta = response.data["meta"] assert data[0]["transaction"] == "<< unparameterized >>" assert data[0]["p50(transaction.duration)"] == 1 assert data[1]["transaction"] == "foo_transaction" assert data[1]["p50(transaction.duration)"] == 100 assert meta["isMetricsData"] query = { "project": [self.project.id], "orderby": "p50(transaction.duration)", "field": [ "transaction", "p50(transaction.duration)", ], "query": "!has:transaction", "statsPeriod": "24h", "dataset": "metricsEnhanced", "per_page": 50, } response = self.do_request(query) assert response.status_code == 400, response.content
23d8888328564e6a86d1bfe0c36aea6f6f084f6a
239
https://github.com/getsentry/sentry.git
542
def test_has_transaction(self): self.store_transaction_metric( 1, tags={}, timestamp=self.min_ago, ) self.store_transaction_metric( 100, tags={"transaction": "foo_transaction"}, timestamp=self.min_ago, ) query = { "project": [self.project.id], "orderby": "p50(transaction.duration)", "field": [ "transaction", "p50(transaction.duration)", ], "query": "has:transaction", "statsPeriod": "24h", "dataset": "metricsEnhanced", "per_page": 50, } response = self.do_request(query) assert response.status_code == 200, response.content assert len(response.data["data"]) == 2 data = response.data["data"] meta = response.data["meta"] assert data[0]["transaction"] == "<< unparameterized >>" assert data[0]["p50(transaction.duration)"] == 1 assert data[1]["transaction"] == "foo_transaction" assert data[1]["p50(transaction.duration)"] == 100 assert meta["isMetricsData"] query = { "project": [self.project.id], "orderby": "p50(transaction.duration)", "field": [ "transaction", "p50(transaction.duration)", ], "query": "!has:transaction", "statsPeriod": "24h", "dataset": "metricsEnhanced", "per_page": 50, } response = self.do_request(query) assert response.status_code == 4
16
414
test_has_transaction
12
1
1
2
test/lib/ansible_test/_internal/completion.py
266,478
ansible-test - Defer loading of completion entries. (#76852) * ansible-test - Defer loading of completion entries. This avoids a traceback when running ansible-test outside of a supported directory.
ansible
8
Python
12
completion.py
def windows_completion(): # type: () -> t.Dict[str, WindowsRemoteCompletionConfig] return load_completion('windows', WindowsRemoteCompletionConfig) @cache
e9ffcf3c85f2fa40a20ee03bd9c1ce7296574cd1
@cache
12
https://github.com/ansible/ansible.git
18
def windows_completion(): # type: () -> t.Dict[str, WindowsRemoteCompletionConfig] return load_completion('windows'
4
29
windows_completion
89
0
1
2
celery/concurrency/asynpool.py
208,404
[pre-commit.ci] pre-commit autoupdate (#7927) * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/asottile/pyupgrade: v3.2.2 → v3.3.1](https://github.com/asottile/pyupgrade/compare/v3.2.2...v3.3.1) - [github.com/PyCQA/flake8: 5.0.4 → 6.0.0](https://github.com/PyCQA/flake8/compare/5.0.4...6.0.0) - [github.com/pre-commit/pre-commit-hooks: v4.3.0 → v4.4.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.3.0...v4.4.0) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
celery
9
Python
73
asynpool.py
def unpack_from(fmt, iobuf, unpack=unpack): return unpack(fmt, iobuf.getvalue()) # <-- BytesIO __all__ = ('AsynPool',) logger = get_logger(__name__) error, debug = logger.error, logger.debug UNAVAIL = frozenset({errno.EAGAIN, errno.EINTR}) #: Constant sent by child process when started (ready to accept work) WORKER_UP = 15 #: A process must've started before this timeout (in secs.) expires. PROC_ALIVE_TIMEOUT = 4.0 SCHED_STRATEGY_FCFS = 1 SCHED_STRATEGY_FAIR = 4 SCHED_STRATEGIES = { None: SCHED_STRATEGY_FAIR, 'default': SCHED_STRATEGY_FAIR, 'fast': SCHED_STRATEGY_FCFS, 'fcfs': SCHED_STRATEGY_FCFS, 'fair': SCHED_STRATEGY_FAIR, } SCHED_STRATEGY_TO_NAME = {v: k for k, v in SCHED_STRATEGIES.items()} Ack = namedtuple('Ack', ('id', 'fd', 'payload'))
ae73d5d777feefb4044bc37bbe618cad242202f8
22
https://github.com/celery/celery.git
97
def unpack_from(fmt, iobuf, unpack=unpack): return unpack(fmt, iobuf.getvalue()) # <-- BytesIO __all__ = ('AsynPool',) logger = get_logger(__name__) error, debug = logger.error, logger.debug UNAVAIL = frozenset({errno.EAGAIN, errno.EINTR}) #: Constant sent by child process when started (ready to accept work) WORKER_UP = 15 #: A process must've started before this timeout (in secs.) expires. PROC_ALIVE_TIMEOUT = 4.0 SCHED_STRATEGY_FCFS = 1 SCHED_STRATEGY_FAIR = 4 SCHED_STRATEGIES = { None: SCHED_STRATEGY_FAIR, 'default': SCHED_STRATEGY_FAIR, 'fast': SCHED_STRATEGY_FCFS, 'fcfs': SCHED_STRATEGY_FCFS, 'fair
27
208
unpack_from
13
0
1
3
lib/ansible/cli/doc.py
267,129
expand ansible-doc coverage (#74963) * Expand ansible-doc to tests/filters and fix existing issues enable filter/test docs if in single file or companion yaml add docs for several filters/tests plugins allow .yml companion for docs for other plugins, must be colocated verify plugins are valid (not modules, cannot) fix 'per collection' filtering limit old style deprecation (_ prefix) to builtin/legacy start move to pathlib for saner path handling moved some funcitons, kept backwards compat shims with deprecation notice Co-authored-by: Abhijeet Kasurde <akasurde@redhat.com> Co-authored-by: Felix Fontein <felix@fontein.de> Co-authored-by: Sandra McCann <samccann@redhat.com>
ansible
9
Python
13
doc.py
def add_collection_plugins(plugin_list, plugin_type, coll_filter=None): display.deprecated("add_collection_plugins method, use ansible.plugins.list functions instead.", version='2.17') plugin_list.update(list_plugins(plugin_type, coll_filter))
b439e41a915ccec0ccbabecc966919ea406db74e
32
https://github.com/ansible/ansible.git
18
def add_collection_plugins(plugin_list, plugin_type, coll_filter=None): display.deprecated("add_collection_plugins method, use ansible.plugins.list functions ins
9
52
add_collection_plugins
172
0
1
114
zerver/tests/test_realm.py
84,512
bulk_create: Add users to system user groups in bulk_create_users. This commit modifies bulk_create_users to add the users to the respective system groups. And due to this change, now bots in development environment are also added to system groups. Tests are changed accordingly as more UserGroupMembeship objects are created.
zulip
12
Python
53
test_realm.py
def test_changing_waiting_period_updates_system_groups(self) -> None: realm = get_realm("zulip") members_system_group = UserGroup.objects.get( realm=realm, name="@role:members", is_system_group=True ) full_members_system_group = UserGroup.objects.get( realm=realm, name="@role:fullmembers", is_system_group=True ) self.assert_length(UserGroupMembership.objects.filter(user_group=members_system_group), 10) self.assert_length( UserGroupMembership.objects.filter(user_group=full_members_system_group), 10 ) self.assertEqual(realm.waiting_period_threshold, 0) hamlet = self.example_user("hamlet") othello = self.example_user("othello") prospero = self.example_user("prospero") self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=hamlet ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=othello ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=prospero ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=hamlet ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=othello ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=prospero ).exists() ) hamlet.date_joined = timezone_now() - timedelta(days=50) hamlet.save() othello.date_joined = timezone_now() - timedelta(days=75) othello.save() prospero.date_joined = timezone_now() - timedelta(days=150) prospero.save() do_set_realm_property(realm, "waiting_period_threshold", 100, acting_user=None) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=hamlet ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=othello ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=prospero ).exists() ) self.assertFalse( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=hamlet ).exists() ) self.assertFalse( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=othello ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=prospero ).exists() ) do_set_realm_property(realm, "waiting_period_threshold", 70, acting_user=None) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=hamlet ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=othello ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=prospero ).exists() ) self.assertFalse( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=hamlet ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=othello ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=prospero ).exists() )
150f77aea2002aa4f9c174a8e73c9a3b83b71e34
616
https://github.com/zulip/zulip.git
1,262
def test_changing_waiting_period_updates_system_groups(self) -> None: realm = get_realm("zulip") members_system_group = UserGroup.objects.get( realm=realm, name="@role:members", is_system_group=True ) full_members_system_group = UserGroup.objects.get( realm=realm, name="@role:fullmembers", is_system_group=True ) self.assert_length(UserGroupMembership.objects.filter(user_group=members_system_group), 10) self.assert_length( UserGroupMembership.objects.filter(user_group=full_members_system_group), 10 ) self.assertEqual(realm.waiting_period_threshold, 0) hamlet = self.example_user("hamlet") othello = self.example_user("othello") prospero = self.example_user("prospero") self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=hamlet ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=othello ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=prospero ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=hamlet ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=othello ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=prospero ).exists() ) hamlet.date_joined = timezone_now() - timedelta(days=50) hamlet.save() othello.date_joined = timezone_now() - timedelta(days=75) othello.save() prospero.date_joined = timezone_now() - timedelta(days=150) prospero.save() do_set_realm_property(realm, "waiting_period_threshold", 100, acting_user=None) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=hamlet ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=othello ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=prospero ).exists() ) self.assertFalse( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=hamlet ).exists() ) self.assertFalse( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=othello ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=full_members_system_group, user_profile=prospero ).exists() ) do_set_realm_property(realm, "waiting_period_threshold", 70, acting_user=None) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=hamlet ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_profile=othello ).exists() ) self.assertTrue( UserGroupMembership.objects.filter( user_group=members_system_group, user_pr
32
978
test_changing_waiting_period_updates_system_groups
113
0
4
29
tools/analysis_tools/analyze_results.py
245,750
[Refactor]: update analyze_results.py (#8430) * update analyze_results.py * working in progress * fix panoptic config bug * update * update * Support panoptic_seg visualization * fix base config * recover config * update misc.py * fix bug * update * update * update * support new dataflow * update * update * update doc str * update
mmdetection
17
Python
84
analyze_results.py
def detection_evaluate(self, dataset, results, topk=20, eval_fn=None): if eval_fn is None: eval_fn = bbox_map_eval else: assert callable(eval_fn) prog_bar = ProgressBar(len(results)) _mAPs = {} data_info = {} for i, (result, ) in enumerate(zip(results)): # self.dataset[i] should not call directly # because there is a risk of mismatch data_info = dataset.prepare_data(i) data_info['bboxes'] = data_info['gt_bboxes'].tensor data_info['labels'] = data_info['gt_bboxes_labels'] pred = result['pred_instances'] pred_bboxes = pred['bboxes'].cpu().numpy() pred_scores = pred['scores'].cpu().numpy() pred_labels = pred['labels'].cpu().numpy() dets = [] for label in range(len(dataset.metainfo['CLASSES'])): index = np.where(pred_labels == label)[0] pred_bbox_scores = np.hstack( [pred_bboxes[index], pred_scores[index].reshape((-1, 1))]) dets.append(pred_bbox_scores) mAP = eval_fn(dets, data_info) _mAPs[i] = mAP prog_bar.update() # descending select topk image _mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1])) good_mAPs = _mAPs[-topk:] bad_mAPs = _mAPs[:topk] return good_mAPs, bad_mAPs
d18ec258093098f92f9ef30266f9ddd2acebf864
270
https://github.com/open-mmlab/mmdetection.git
437
def detection_evaluate(self, dataset, results, topk=20, eval_fn=None): if eval_fn is None: eval_fn = bbox_map_eval else: assert callable(eval_fn) prog_bar = ProgressBar(len(results)) _mAPs = {} data_info = {} for i, (result, ) in enumerate(zip(results)): # self.dataset[i] should not call directly # because there is a risk of mismatch data_info = dataset.prepare_data(i) data_info['bboxes'] = data_info['gt_bboxes'].tensor data_info['labels'] = data_info['gt_bboxes_labels'] pred = result['pred_instances'] pred_bboxes = pred['bboxes'].cpu().numpy
45
444
detection_evaluate
34
1
2
9
bootloader/waflib/Tools/qt5.py
263,609
Bootloader: Building: Unpack waf's lib archive. Doing so makes it easier to modify. This is a temporary measure until the next waf version is released (although I'm tempted to keep it since it's much more IDE completion friendly).
pyinstaller
13
Python
29
qt5.py
def process_mocs(self): lst = self.to_nodes(getattr(self, 'moc', [])) self.source = self.to_list(getattr(self, 'source', [])) for x in lst: prefix = x.name[:x.name.rfind('.')] moc_target = 'moc_%s.%d.cpp' % (prefix, self.idx) moc_node = x.parent.find_or_declare(moc_target) self.source.append(moc_node) self.create_task('moc', x, moc_node) @feature('qt5') @after_method('apply_link')
64ccb7aea824fbec57f7ed1bbe483ec486183c13
@feature('qt5') @after_method('apply_link')
99
https://github.com/pyinstaller/pyinstaller.git
75
def process_mocs(self): lst = self.to_nodes(getattr(self, 'moc', [])) self.source = self.to_list(getattr(self, 'source', [])) for x in lst: prefix = x.name[:x.name.rfind('.')] moc_target = 'moc_%s.%d.cpp
20
182
process_mocs
17
0
2
4
python3.10.4/Lib/bdb.py
221,112
add python 3.10.4 for windows
XX-Net
9
Python
15
bdb.py
def user_call(self, frame, args): name = frame.f_code.co_name if not name: name = '???' print('+++ call', name, args)
8198943edd73a363c266633e1aa5b2a9e9c9f526
31
https://github.com/XX-net/XX-Net.git
37
def user_call(self, frame, args): name = frame.f_code.co_name if not nam
8
51
user_call
21
0
1
2
python3.10.4/Lib/glob.py
217,585
add python 3.10.4 for windows
XX-Net
7
Python
20
glob.py
def glob1(dirname, pattern): return _glob1(dirname, pattern, None, False) # This helper function recursively yields relative pathnames inside a literal # directory.
8198943edd73a363c266633e1aa5b2a9e9c9f526
18
https://github.com/XX-net/XX-Net.git
21
def glob1(dirname, pattern): return _glob1(dirname, pattern, None, False) # This helper function recursively yields rel
4
27
glob1
21
0
2
7
python/ray/serve/controller.py
126,563
[Serve] Enable lightweight config update (#27000)
ray
11
Python
18
controller.py
def get_app_config(self) -> Dict: checkpoint = self.kv_store.get(CONFIG_CHECKPOINT_KEY) if checkpoint is None: return ServeApplicationSchema.get_empty_schema_dict() else: _, config, _ = pickle.loads(checkpoint) return config
286343601aa60c8a4222e954388a9055fbe59e90
44
https://github.com/ray-project/ray.git
74
def get_app_config(self) -> Dict: checkpoint = self.kv_store.get(CONFIG_CHECKPOINT_KEY) if checkpoint is None:
13
71
get_app_config
25
0
13
60
yt_dlp/extractor/pladform.py
162,475
[Pladform] Fix redirection to external player (#2550) Authored by: KiberInfinity
yt-dlp
12
Python
21
pladform.py
def _real_extract(self, url): video_id = self._match_id(url) qs = parse_qs(url) pl = qs.get('pl', ['1'])[0] video = self._download_xml( 'http://out.pladform.ru/getVideo', video_id, query={ 'pl': pl, 'videoid': video_id, }, fatal=False)
f7d48541312f1dafbac4fae639cf3a06df776abc
398
https://github.com/yt-dlp/yt-dlp.git
104
def _real_extract(self, url): video_id = self._match_id(url) qs = parse_qs(url) pl = qs.get('pl', ['1'])[0] video = self._download_xml( 'http://out.pladform.ru/getVideo', video_id, query={ 'pl': pl,
13
103
_real_extract
77
0
6
26
pipenv/patched/pip/_vendor/requests/sessions.py
22,113
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
pipenv
12
Python
57
sessions.py
def prepare_request(self, request): cookies = request.cookies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies ) # Set environment's basic authentication if not explicitly set. auth = request.auth if self.trust_env and not auth and not self.auth: auth = get_netrc_auth(request.url) p = PreparedRequest() p.prepare( method=request.method.upper(), url=request.url, files=request.files, data=request.data, json=request.json, headers=merge_setting( request.headers, self.headers, dict_class=CaseInsensitiveDict ), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, hooks=merge_hooks(request.hooks, self.hooks), ) return p
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
180
https://github.com/pypa/pipenv.git
344
def prepare_request(self, request): cookies = request.cookies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merge
30
273
prepare_request
22
0
1
10
pandas/tests/reshape/merge/test_join.py
169,601
CLN/TST: Use fixture instead of setup_method (#49004)
pandas
11
Python
17
test_join.py
def test_handle_overlap_arbitrary_key(self, df, df2): joined = merge( df, df2, left_on="key2", right_on="key1", suffixes=(".foo", ".bar"), ) assert "key1.foo" in joined assert "key2.bar" in joined
fd8e3e773887c0ba9721406b3034494fff2c2567
42
https://github.com/pandas-dev/pandas.git
104
def test_handle_overlap_arbitrary_key(self, df, df2): joined = merge(
9
70
test_handle_overlap_arbitrary_key
113
0
2
10
homeassistant/components/homekit_controller/config_flow.py
311,432
Remove deprecated helper functions from homekit_controller pairing flow (#65270)
core
11
Python
75
config_flow.py
async def _entry_from_accessory(self, pairing): # The bulk of the pairing record is stored on the config entry. # A specific exception is the 'accessories' key. This is more # volatile. We do cache it, but not against the config entry. # So copy the pairing data and mutate the copy. pairing_data = pairing.pairing_data.copy() # Use the accessories data from the pairing operation if it is # available. Otherwise request a fresh copy from the API. # This removes the 'accessories' key from pairing_data at # the same time. if not (accessories := pairing_data.pop("accessories", None)): accessories = await pairing.list_accessories_and_characteristics() parsed = Accessories.from_list(accessories) accessory_info = parsed.aid(1).services.first( service_type=ServicesTypes.ACCESSORY_INFORMATION ) name = accessory_info.value(CharacteristicsTypes.NAME, "") return self.async_create_entry(title=name, data=pairing_data)
cc94af2872945667d80f8f76512260ae6205d739
92
https://github.com/home-assistant/core.git
247
async def _entry_from_accessory(self, pairing): # The bulk of the pairing record is stored on the config entry. # A specific exception is the 'accessories' key. This is more # volatile. We do cache it, but not against the config entry. # So copy the pairing data and mutate the copy. pairing_data = pairing.pairing_data.copy() # Use the accessories data from the pairing operation if it is # available. Otherwise request a fresh copy from the API. # This removes the 'accessories' key from pairing_data at # the same time. if not (accessories := pairing_data.pop("accessories", None)): accessories = await pairing.list_accessories_and_characteristics() parsed = Accessories.from_list(accessories) accessory_info = parsed.aid(1).services.first( service_type=ServicesTypes.ACCESSORY_INFORMATION ) name = accessory_info.value(CharacteristicsTypes.NAME, "") return self.async_create_entry(title=name, data=pairing_data)
25
160
_entry_from_accessory
173
0
21
42
airflow/cli/commands/db_command.py
46,681
Consistent DB upgrade/downgrade arguments (#22537) This is a follow up to #22102, and be forewarned, this might be a bikeshed. If this gets contentious at all, I'll just close it and move on. I think it's a little bit easier for users to have consistent flags/arguments for the `airflow db upgrade` and `airflow db downgrade` commands. This PR just tweaks the argument processing to expect `--to-revision` and `--to-version` instead of `--revision` and `--version`, respectively. That change makes the arguments to those commands more consistent with the `--from-revision` and `--from-version` arguments. Doing so also avoids overloading the `--version` flag, which is usually a flag that prints out the version information of the command itself (eg: Airflow's version, which is available via `airflow version`). An argument against this change is that the `--to-...` arguments can be understood to be implied, like this: ```bash airflow db upgrade --from-version 10.15.8 # Upgrade from 10.15.8 to the current Airflow version ``` and this means that you do not necessarily need to always specify the `--to-...` arguments. By having both `--to-` and `--from-` arguments, users might think that they always need to specify both a `--to-` and `--from-` argument. I also fixed an unrelated grammar typo, which corrects the grammar used to log the operation.
airflow
15
Python
114
db_command.py
def downgrade(args): if args.to_revision and args.to_version: raise SystemExit("Cannot supply both `--to-revision` and `--to-version`.") if args.from_version and args.from_revision: raise SystemExit("`--from-revision` may not be combined with `--from-version`") if (args.from_revision or args.from_version) and not args.show_sql_only: raise SystemExit( "Args `--from-revision` and `--from-version` may only be used with `--show-sql-only`" ) if not (args.to_version or args.to_revision): raise SystemExit("Must provide either --to-revision or --to-version.") from_revision = None if args.from_revision: from_revision = args.from_revision elif args.from_version: from_revision = REVISION_HEADS_MAP.get(args.from_version) if not from_revision: raise SystemExit(f"Unknown version {args.from_version!r} supplied as `--from-version`.") if args.to_version: to_revision = REVISION_HEADS_MAP.get(args.to_version) if not to_revision: raise SystemExit(f"Downgrading to version {args.to_version} is not supported.") elif args.to_revision: to_revision = args.to_revision if not args.show_sql_only: print("Performing downgrade with database " + repr(settings.engine.url)) else: print("Generating sql for downgrade -- downgrade commands will *not* be submitted.") if args.show_sql_only or ( args.yes or input( "\nWarning: About to reverse schema migrations for the airflow metastore. " "Please ensure you have backed up your database before any upgrade or " "downgrade operation. Proceed? (y/n)\n" ).upper() == "Y" ): db.downgrade(to_revision=to_revision, from_revision=from_revision, show_sql_only=args.show_sql_only) if not args.show_sql_only: print("Downgrade complete") else: raise SystemExit("Cancelled")
60d90896486cc3d9f1fc0029ca9833c7d561caa4
229
https://github.com/apache/airflow.git
435
def downgrade(args): if args.to_revision and args.to_version: raise SystemExit("Cannot supply both `--to-revision` and `--to-version`.") if args.from_version and args.from_revision: raise SystemExit("`--from-revision` may not be combined with `--from-version`") if (args.from_revision or args.from_version) and not args.show_sql_only: raise SystemExit( "Args `--from-revision` and `--from-version` may only be used with `--show-sql-only`" ) if not (args.to_v
19
412
downgrade
24
1
1
10
tests/components/subaru/test_config_flow.py
294,967
Add 2FA support for Subaru integration setup (#68753) * Add 2FA support for Subaru integration setup * Update config flow to abort with 2FA request fail
core
14
Python
24
test_config_flow.py
async def two_factor_verify_form(hass, two_factor_start_form): with patch( MOCK_API_2FA_REQUEST, return_value=True, ), patch(MOCK_API_2FA_CONTACTS, new_callable=PropertyMock) as mock_contacts: mock_contacts.return_value = MOCK_2FA_CONTACTS return await hass.config_entries.flow.async_configure( two_factor_start_form["flow_id"], user_input={config_flow.CONF_CONTACT_METHOD: "email@addr.com"}, ) @pytest.fixture
ab0abdc988ac101217ba043909c4be8b33101ab3
@pytest.fixture
61
https://github.com/home-assistant/core.git
89
async def two_factor_verify_form(hass, two_factor_start_form): with patch( MOCK_API_2FA_REQUEST,
19
108
two_factor_verify_form
12
1
1
3
label_studio/tasks/models.py
177,839
fix: DEV-2372: Delete action doesn't decrease total annotations counter (#2354) * fix: DEV-2372: Delete action doesn't decrease total annotations counter * Update test_api_tasks.py * Fix negative total annotations * Update models.py
label-studio
13
Python
12
models.py
def remove_predictions_from_project(sender, instance, **kwargs): instance.task.total_predictions = instance.task.predictions.all().count() - 1 instance.task.save(update_fields=['total_predictions']) @receiver(post_save, sender=Prediction)
323578fd2e49def3df2b3d7b7a9fc9af0132d592
@receiver(post_save, sender=Prediction)
44
https://github.com/heartexlabs/label-studio.git
20
def remove_predictions_from_project(sender, instance, **kwargs): instance.task.total_predic
14
88
remove_predictions_from_project
14
0
1
8
tests/providers/amazon/aws/operators/test_ecs.py
43,626
Standardize AWS ECS naming (#20332) * Rename ECS Hook and Operator
airflow
12
Python
13
test_ecs.py
def set_up_log_fetcher(self, logger_mock): self.logger_mock = logger_mock self.log_fetcher = EcsTaskLogFetcher( log_group="test_log_group", log_stream_name="test_log_stream_name", fetch_interval=timedelta(milliseconds=1), logger=logger_mock, )
9c0ba1b6abc593bad6fe51ed52d9c0963cd09b7c
40
https://github.com/apache/airflow.git
78
def set_up_log_fetcher(self, logger_mock): self.logger_mock = logger_mock self.log_fetcher = EcsTaskLogFetcher( log_group="test_log_group", log_stream_name="test_log_stream_name", fetch_interval=timedelta(milliseconds=1), logger=logger_mock, )
11
63
set_up_log_fetcher
163
0
14
58
src/textual/app.py
183,139
fix broken align and error logic
textual
18
Python
108
app.py
async def process_messages(self) -> None: active_app.set(self) log("---") log(f"driver={self.driver_class}") if os.getenv("TEXTUAL_DEVTOOLS") == "1": try: await self.devtools.connect() self.log(f"Connected to devtools ({self.devtools.url})") except DevtoolsConnectionError: self.log(f"Couldn't connect to devtools ({self.devtools.url})") try: if self.css_file is not None: self.stylesheet.read(self.css_file) self.stylesheet.parse() if self.css is not None: self.stylesheet.add_source( self.css, path=f"<{self.__class__.__name__}>" ) except Exception as error: self.on_exception(error) self._print_error_renderables() return if self.css_monitor: self.set_interval(0.5, self.css_monitor, name="css monitor") self.log("started", self.css_monitor) self._running = True try: load_event = events.Load(sender=self) await self.dispatch_message(load_event) # Wait for the load event to be processed, so we don't go in to application mode beforehand # await load_event.wait() driver = self._driver = self.driver_class(self.console, self) driver.start_application_mode() try: mount_event = events.Mount(sender=self) await self.dispatch_message(mount_event) # TODO: don't override `self.console` here self.console = Console(file=sys.__stdout__) self.title = self._title self.refresh() await self.animator.start() with redirect_stdout(StdoutRedirector(self.devtools, self._log_file)): # type: ignore await super().process_messages() await self.animator.stop() await self.close_all() finally: driver.stop_application_mode() except Exception as error: self.on_exception(error) finally: self._running = False if self._exit_renderables: self._print_error_renderables() if self.devtools.is_connected: await self._disconnect_devtools() if self._log_console is not None: self._log_console.print( f"Disconnected from devtools ({self.devtools.url})" ) if self._log_file is not None: self._log_file.close()
191a6b7775a7ca3cde794eef96ebd86fac4fb455
370
https://github.com/Textualize/textual.git
919
async def process_messages(self) -> None: active_app.set(self) log("---") log(f"driver={self.driver_class}") if os.getenv("TEXTUAL_DEVTOOLS") == "1": try: await self.devtools.connect() self.log(f"Connected to devtools ({self.devtools.url})") except DevtoolsConnectionError: self.log(f"Couldn't connect to devtools ({self.devtools.url})") try: if self.css_file is not None: self.stylesheet.read(self.css_file) self.stylesheet.parse() if self.css is not None: self.stylesheet.add_source( self.css, path=f"<{self.__class__.__name__}>" ) except Exception as error: self.on_exception(error) self._print_error_renderables() return if self.css_monitor: self.set_interval(0.5, self.css_monitor, name="css monitor") self.log("started", self.css_monitor) self._running = True try: load_event = events.Load(sender=self)
62
672
process_messages
19
0
1
7
pandas/tests/io/pytables/test_read.py
164,085
TST: Remove unused fixtures (#45692) * TST: Remove unused fixtures * Undo a removed fixture * Add back other fixtures * Undo a file * Try undoing this? * Revert "Try undoing this?" This reverts commit 0e56cb04f5e8cb1f7b2ac4c5e6191485bb2fe1ab.
pandas
13
Python
19
test_read.py
def test_pytables_native2_read(datapath): with ensure_clean_store( datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r" ) as store: str(store) d1 = store["detector"] assert isinstance(d1, DataFrame)
f46df091df3afea25a273f491d1f6b2c7d20b32c
43
https://github.com/pandas-dev/pandas.git
52
def test_pytables_native2_read(datapath): with ensure
9
79
test_pytables_native2_read
93
0
7
47
wagtail/admin/views/pages/edit.py
72,484
Reformat with black
wagtail
20
Python
75
edit.py
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update( { "page": self.page, "page_for_status": self.page_for_status, "content_type": self.page_content_type, "edit_handler": self.edit_handler, "errors_debug": self.errors_debug, "action_menu": PageActionMenu( self.request, view="edit", page=self.page ), "preview_modes": self.page.preview_modes, "form": self.form, "next": self.next_url, "has_unsaved_changes": self.has_unsaved_changes, "page_locked": self.page_perms.page_locked(), "workflow_state": self.workflow_state if self.workflow_state and self.workflow_state.is_active else None, "current_task_state": self.page.current_workflow_task_state, "publishing_will_cancel_workflow": self.workflow_tasks and getattr(settings, "WAGTAIL_WORKFLOW_CANCEL_ON_PUBLISH", True), "locale": None, "translations": [], } ) if getattr(settings, "WAGTAIL_I18N_ENABLED", False): user_perms = UserPagePermissionsProxy(self.request.user) context.update( { "locale": self.page.locale, "translations": [ { "locale": translation.locale, "url": reverse( "wagtailadmin_pages:edit", args=[translation.id] ), } for translation in self.page.get_translations() .only("id", "locale", "depth") .select_related("locale") if user_perms.for_page(translation).can_edit() ], } ) return context
d10f15e55806c6944827d801cd9c2d53f5da4186
261
https://github.com/wagtail/wagtail.git
838
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update( { "page": self.page, "page_for_status": self.page_for_status, "content_type": self.page_content_type, "edit_handler": self.edit_handler, "errors_debug": self.errors_debug, "action_menu": PageActionMenu( self.request, view="edit", page=self.page ), "preview_modes": self.page.preview_modes, "form": self.form, "next": self.next_url, "has_unsaved_changes": self.has_unsaved_changes, "page_locked": self.page_perms.page_locked(), "workflow_state": self.workflow_state if self.workflow_state and self.workflow_state.is_active else None, "current_task_state": self.page.current_workflow_task_state, "publishing_will_cancel_workflow": self.workflow_tasks and getattr(settings, "WAGTAIL_WORKFLOW_CANCEL_ON_PUBLISH", True), "locale": None, "translations": [], } ) if getattr(settings, "WAGTAIL_I18N_ENABLED", False): user_perms = UserPagePermissionsProxy(self.request.user) context.update( { "locale": self.page.locale, "translations": [ { "locale": translation.locale, "url": reverse( "wagtailadmin_pages:edit", args=[translation.id] ), } for translation in self.page.get_translations() .only("id", "locale", "depth") .select_related("locale") if user_perms.for_page(translation).can_edit()
39
432
get_context_data
11
0
1
4
tests/pytests/functional/modules/test_vault.py
216,021
Add tests and changelog
salt
10
Python
9
test_vault.py
def test_vault_read_secret_issue_61084(sys_mod): result = sys_mod.argspec("vault.read_secret") assert isinstance(result, dict) assert isinstance(result.get("vault.read_secret"), dict)
121c61c832a58874acf5ad55c7eb20c598995dff
33
https://github.com/saltstack/salt.git
23
def test_vault_read_secret_issue_61084(sys_mod): res
7
58
test_vault_read_secret_issue_61084
217
0
6
43
rllib/examples/simulators/sumo/marlenvironment.py
137,963
[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)
ray
15
Python
136
marlenvironment.py
def step(self, action_dict): self.resetted = False self.steps += 1 logger.debug( "====> [SUMOTestMultiAgentEnv:step] Episode: %d - Step: %d <====", self.episodes, self.steps, ) dones = {} dones["__all__"] = False shuffled_agents = sorted( action_dict.keys() ) # it may seem not smar to sort something that # may need to be shuffled afterwards, but it # is a matter of consistency instead of using # whatever insertion order was used in the dict if self._config["scenario_config"]["agent_rnd_order"]: # randomize the agent order to minimize SUMO's # insertion queues impact logger.debug("Shuffling the order of the agents.") self.rndgen.shuffle(shuffled_agents) # in-place shuffle # Take action for agent in shuffled_agents: self.agents[agent].step(action_dict[agent], self.simulation) logger.debug("Before SUMO") ongoing_simulation = self.simulation.step( until_end=False, agents=set(action_dict.keys()) ) logger.debug("After SUMO") # end of the episode if not ongoing_simulation: logger.info("Reached the end of the SUMO simulation.") dones["__all__"] = True obs, rewards, infos = {}, {}, {} for agent in action_dict: # check for collisions if self.simulation.collisions[agent] > 0: # punish the agent and remove it from the simulation dones[agent] = True obs[agent] = [0, 0] rewards[agent] = -self.agents[agent].config["max_speed"] # infos[agent] = "Collision" self.simulation.traci_handler.remove(agent, reason=tc.REMOVE_VAPORIZED) else: dones[agent] = agent not in self.simulation.veh_subscriptions obs[agent] = self.get_observation(agent) rewards[agent] = self.get_reward(agent) # infos[agent] = "" logger.debug("Observations: %s", pformat(obs)) logger.debug("Rewards: %s", pformat(rewards)) logger.debug("Dones: %s", pformat(dones)) logger.debug("Info: %s", pformat(infos)) logger.debug("========================================================") return obs, rewards, dones, dones, infos ########################################################################### # ACTIONS & OBSERATIONS SPACE
8e680c483ce326cefc62e44f68ab1a6948b1c3d2
329
https://github.com/ray-project/ray.git
743
def step(self, action_dict): self.resetted = False self.steps += 1 logger.debug( "====> [SUMOTestMultiAgentEnv:step] Episode: %d - Step: %d <====", self.episodes, self.steps, ) dones = {} dones["__all__"] = False shuffled_agents = sorted( action_dict.keys() ) # it may seem not smar to sort something that # may need to be shuffled afterwards, but it # is a matter of consistency instead of using # whatever insertion order was used in the dict if self._config["scenario_config"]["agent_rnd_order"]: # randomize the agent order to minimize SUMO's # insertion queues impact logger.debug("Shuffling the order of the agents.") self.rndgen.shuffle(shuffled_agents) # in-place shuffle # Take action for agent in shuffled_agents: self.agents[agent].step(action_dict[agent], self.simulation) logger.debug("Before SUMO") ongoing_simulation = self.simulation.step( until_end=False, agents=set(action_dict.keys()) ) logger.debug("After SUMO") # end of the episode
36
549
step
17
0
1
7
packages/syft/src/syft/core/tensor/autodp/phi_tensor.py
1,077
Renamed entities -> data subject, NDEPT -> phi tensor
PySyft
10
Python
13
phi_tensor.py
def __neg__(self) -> PhiTensor: return PhiTensor( child=self.child * -1, min_vals=self.max_vals * -1, max_vals=self.min_vals * -1, data_subjects=self.entities, )
44fa2242416c7131fef4f00db19c5ca36af031dc
44
https://github.com/OpenMined/PySyft.git
74
def __neg__(self) -> PhiTensor: return PhiTensor( child=self.child * -1, min_vals=sel
8
66
__neg__
22
0
3
8
lib/matplotlib/backends/backend_wx.py
107,321
Ensure that all toolbar (old/new) subclasses can be init'ed consistently i.e. with the same signature: with `canvas` as sole arg for the old-style toolbars, with `toolmanager` as sole arg for the new ones. Subclasses that explicitly support setting a parent widget keep that support (except for gtk, which stashed that in the `.win` attribute but never used it), but that argument is always optional now; the default is the canvas' parent. The goal is to later replace all _get_toolbar implementations by a simple call (always with the same signature (dependent on the value of rcParams["toolbar"])) to the correct class in the FigureManagerBase constructor.
matplotlib
12
Python
15
backend_wx.py
def _get_toolbar(self): if mpl.rcParams['toolbar'] == 'toolbar2': toolbar = NavigationToolbar2Wx(self.canvas) elif mpl.rcParams['toolbar'] == 'toolmanager': toolbar = ToolbarWx(self.toolmanager) else: toolbar = None return toolbar
8cd60178545b70c82a99504c53332e16713a60bd
48
https://github.com/matplotlib/matplotlib.git
82
def _get_toolbar(self): if mpl.rcParams['t
9
85
_get_toolbar
54
0
1
17
networkx/algorithms/operators/tests/test_all.py
177,258
Make all.py generator friendly (#5984) * Make compose_all generator friendly * Make disjoint_union_all and intersection_all generator friendly * Refactor disjoint_union_all to yield relabeled graphs * Make union_all generator friendly * Fix intersection_all * Fix union_all signature * Allow passing an infinite rename generator to union_all * Copy over generalizations to binary.py * Clean up rename * Simplify first_label in disjoint_union_all * Simplify disjoint_union_all * Add missing R.graph.update in intersection_all
networkx
10
Python
37
test_all.py
def test_intersection_all(): G = nx.Graph() H = nx.Graph() R = nx.Graph(awesome=True) G.add_nodes_from([1, 2, 3, 4]) G.add_edge(1, 2) G.add_edge(2, 3) H.add_nodes_from([1, 2, 3, 4]) H.add_edge(2, 3) H.add_edge(3, 4) R.add_nodes_from([1, 2, 3, 4]) R.add_edge(2, 3) R.add_edge(4, 1) I = nx.intersection_all([G, H, R]) assert set(I.nodes()) == {1, 2, 3, 4} assert sorted(I.edges()) == [(2, 3)] assert I.graph["awesome"]
50ff08de69c6e9541cd6c029bede5dabf56cfe73
175
https://github.com/networkx/networkx.git
101
def test_intersection_all(): G = nx.Graph() H = nx.Graph() R = nx.Graph(awesome=True) G.add_nodes_from([1, 2, 3, 4]) G.add_edge(1, 2) G.add_edge(2, 3) H.add_nodes_from([1, 2, 3, 4]) H
16
262
test_intersection_all
26
0
2
34
python/ray/tune/tuner.py
136,173
[Tune] Fix Jupyter output with Ray Client and `Tuner` (#29956) Ensures that we can have rich Jupyter output with the Tuner API. Signed-off-by: Antoni Baum <antoni.baum@protonmail.com>
ray
13
Python
21
tuner.py
def get_results(self) -> ResultGrid: if not self._is_ray_client: return self._local_tuner.get_results() else: ( progress_reporter, string_queue, ) = self._prepare_remote_tuner_for_jupyter_progress_reporting() fit_future = self._remote_tuner.fit.remote() _stream_client_output( fit_future, progress_reporter, string_queue, ) return ray.get(fit_future)
993008e1ed8c592b268f0e66dac3260c8a14a893
63
https://github.com/ray-project/ray.git
199
def get_results(self) -> ResultGrid: if not self._is_ray_c
15
102
get_results
51
0
1
18
tests/core/test_configuration.py
45,149
Change the default auth backend to session (#21640) * Change default backend As part of AIP-42, change the default auth backend to validate using the session, so that the UI can use the API. If auth_backends has been set to a non-default value, include the session in the list of backends. * When updating a deprecated config value from env, set it back to env Otherwise this means the config seen by an execed sub-process would be different (and wrong, taking neither the configured env var value, nor the new default, but instead just what is in the config file!) * Remove the chart auth_backends setting Co-authored-by: Ash Berlin-Taylor <ash@apache.org>
airflow
14
Python
45
test_configuration.py
def test_auth_backends_adds_session(self): test_conf = AirflowConfigParser(default_config='') # Guarantee we have deprecated settings, so we test the deprecation # lookup even if we remove this explicit fallback test_conf.deprecated_values = { 'api': { 'auth_backends': ( re.compile(r'^airflow\.api\.auth\.backend\.deny_all$|^$'), 'airflow.api.auth.backend.session', '3.0', ), }, } test_conf.read_dict({'api': {'auth_backends': 'airflow.api.auth.backend.basic_auth'}}) with pytest.warns(FutureWarning): test_conf.validate() assert ( test_conf.get('api', 'auth_backends') == 'airflow.api.auth.backend.basic_auth\nairflow.api.auth.backend.session' )
de41ccc922b3d1f407719744168bb6822bde9a58
81
https://github.com/apache/airflow.git
271
def test_auth_backends_adds_session(self): test_conf = AirflowConfigParser(default_config='') # Guarantee we have deprecated settings, so we test the deprecation # lookup even if we remove this explicit fallback test_conf.deprecated_values = { 'api': { 'auth_backends': ( re.compile(r'^airflow\.api\.auth\.backend\.deny_all$|^$'), 'airflow.api.auth.backend.session', '3.0', ), }, } test_conf.read_dict({'api': {'auth_backends': 'airflow.api.auth.backend.basic_auth'}}) with pytest.warns(FutureWarning): test_conf.validate() assert ( test_conf.get('api',
14
152
test_auth_backends_adds_session
23
0
2
8
wagtail/api/v2/tests/test_images.py
72,736
Reformat with black
wagtail
15
Python
22
test_images.py
def test_all_fields_then_remove_something(self): response = self.get_response(fields="*,-title,-tags") content = json.loads(response.content.decode("UTF-8")) for image in content["items"]: self.assertEqual(set(image.keys()), {"id", "meta", "width", "height"}) self.assertEqual( set(image["meta"].keys()), {"type", "detail_url", "download_url"} )
d10f15e55806c6944827d801cd9c2d53f5da4186
85
https://github.com/wagtail/wagtail.git
91
def test_all_fields_then_remove_something(self): response = self.get_response(fields="*,-tit
13
150
test_all_fields_then_remove_something
16
0
4
7
freqtrade/rpc/api_server/webserver.py
151,661
initial revision
freqtrade
10
Python
12
webserver.py
async def _api_shutdown_event(self): if ApiServer._message_stream: ApiServer._message_stream = None if self._ws_queue: self._ws_queue = None if self._ws_publisher_task: self._ws_publisher_task.cancel()
659c8c237f7a7e30ad0929fed448c449a01fb2bf
37
https://github.com/freqtrade/freqtrade.git
69
async def _api_shutdown_event(self):
7
62
_api_shutdown_event
8
0
1
3
src/transformers/pipelines/visual_question_answering.py
31,239
Add Visual Question Answering (VQA) pipeline (#17286) * wip * rebase * all tests pass * rebase * ready for PR * address comments * fix styles * add require_torch to pipeline test * remove remote image to improve CI consistency * address comments; fix tf/flax tests * address comments; fix tf/flax tests * fix tests; add alias * repo consistency tests * Update src/transformers/pipelines/visual_question_answering.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * address comments * Update src/transformers/pipelines/visual_question_answering.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * merge * Update src/transformers/models/auto/modeling_auto.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * merge Co-authored-by: Sijun He <sijunhe@Sijuns-MacBook-Pro.local> Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
transformers
9
Python
7
visual_question_answering.py
def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs
66336dc18374cdba550759cc923c36217159d4c9
18
https://github.com/huggingface/transformers.git
21
def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs
5
29
_forward
116
0
1
39
saleor/graphql/checkout/tests/mutations/test_checkout_create.py
27,848
Extract tests to separate files for mutations checkout shipping/billing address, checkout create (#10082) * Extract tests to separate files for mutations checkout shipping/billing address, checkout create * Move mutation files to separate directory * Add missing init file
saleor
14
Python
71
test_checkout_create.py
def test_checkout_create(api_client, stock, graphql_address_data, channel_USD): variant = stock.product_variant variant_id = graphene.Node.to_global_id("ProductVariant", variant.id) test_email = "test@example.com" shipping_address = graphql_address_data variables = { "checkoutInput": { "channel": channel_USD.slug, "lines": [{"quantity": 1, "variantId": variant_id}], "email": test_email, "shippingAddress": shipping_address, } } assert not Checkout.objects.exists() response = api_client.post_graphql(MUTATION_CHECKOUT_CREATE, variables) content = get_graphql_content(response)["data"]["checkoutCreate"] new_checkout = Checkout.objects.first() assert new_checkout is not None checkout_data = content["checkout"] assert checkout_data["token"] == str(new_checkout.token) assert new_checkout.lines.count() == 1 checkout_line = new_checkout.lines.first() assert checkout_line.variant == variant assert checkout_line.quantity == 1 assert new_checkout.shipping_address is not None assert new_checkout.shipping_address.first_name == shipping_address["firstName"] assert new_checkout.shipping_address.last_name == shipping_address["lastName"] assert ( new_checkout.shipping_address.street_address_1 == shipping_address["streetAddress1"] ) assert ( new_checkout.shipping_address.street_address_2 == shipping_address["streetAddress2"] ) assert new_checkout.shipping_address.postal_code == shipping_address["postalCode"] assert new_checkout.shipping_address.country == shipping_address["country"] assert new_checkout.shipping_address.city == shipping_address["city"].upper() assert not Reservation.objects.exists()
319a64dabf0c9449833797a089ab6a0bf02b1505
268
https://github.com/saleor/saleor.git
289
def test_checkout_create(api_client, stock, graphql_address_data, channel_USD): variant = stock.product_variant variant_id = graphene.Node.to_global_id("ProductVariant", variant.id) test_email = "test@example.com" shipping_address = graphql_address_data variables = { "checkoutInput": { "channel": channel_USD.slug, "lines": [{"quantity": 1, "variantId": variant_id}], "email": test_email, "shippingAddress": shipping_address, } } assert not Checkout.objects.exists() response = api_client.post_graphql(MUTATION_CHECKOUT_CREATE, variables) content = get_graphql_content(response)["data"]["checkoutCreate"] new_checkout = Checkout.objects.first() assert new_checkout is not None checkout_data = content["checkout"] assert checkout_data["token"] == str(new_checkout.token) assert new_checkout.lines.count() == 1 checkout_line = new_checkout.lines.first() assert checkout_line.variant == variant assert checkout_line.quantity == 1 assert new_checkout.shipping_address is not None assert new_checkout.shipping_address.first_name == shipping_address["firstName"] assert new_checkout.
42
444
test_checkout_create
68
0
1
22
tests/admin_inlines/tests.py
207,229
Refs #33476 -- Reformatted code with Black.
django
11
Python
38
tests.py
def test_help_text(self): response = self.client.get(reverse("admin:admin_inlines_holder4_add")) self.assertContains( response, '<div class="help">Awesome stacked help text is awesome.</div>', 4 ) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Awesome tabular help text is awesome.)" ' 'title="Awesome tabular help text is awesome.">', 1, ) # ReadOnly fields response = self.client.get(reverse("admin:admin_inlines_capofamiglia_add")) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Help text for ReadOnlyInline)" ' 'title="Help text for ReadOnlyInline">', 1, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
70
https://github.com/django/django.git
281
def test_help_text(self): response = self.client.get(reverse("admin:admin_inlines_holder4_add")) self.assertContains( response, '<div class="help">Awesome stacked help text is awesome.</div>', 4 ) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Awesome tabular help text is awesome.)" ' 'title="Awesome tabular help text is awesome.">', 1, ) # ReadOnly fields response = self.client.get(reverse("admin:admin_inlines_capofamiglia_add")) self.assertContains( response,
7
130
test_help_text
18
0
1
6
awx/api/views/debug.py
81,403
add debug views for task manager(s) implement https://github.com/ansible/awx/issues/12446 in development environment, enable set of views that run the task manager(s). Also introduce a setting that disables any calls to schedule() that do not originate from the debug views when in the development environment. With guards around both if we are in the development environment and the setting, I think we're pretty safe this won't get triggered unintentionally. use MODE to determine if we are in devel env Also, move test for skipping task managers to the tasks file
awx
8
Python
15
debug.py
def get(self, request, format=None): data = OrderedDict() data['task_manager'] = '/api/debug/task_manager/' data['dependency_manager'] = '/api/debug/dependency_manager/' data['workflow_manager'] = '/api/debug/workflow_manager/' return Response(data)
ad08eafb9a8ed775dc0cf21eb38e443651e11184
40
https://github.com/ansible/awx.git
60
def get(self, request, format=None): data = OrderedDict() data['task_manager'] = '/api/debug/task_manager/' data['dependency_manager'] = '/api/debug/dependency_manager/' data['workflow_manager'] =
7
76
get
106
1
7
15
ivy_tests/test_core/test_general.py
213,831
renamed dev_str arg to dev for all methods.
ivy
14
Python
76
test_general.py
def test_get_num_dims(object_in, dtype, as_tensor, tensor_fn, dev, call): # smoke test if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call: # mxnet does not support 0-dimensional variables pytest.skip() ret = ivy.get_num_dims(tensor_fn(object_in, dtype, dev), as_tensor) # type test if as_tensor: assert ivy.is_array(ret) else: assert isinstance(ret, int) ret = ivy.array(ret) # cardinality test assert list(ret.shape) == [] # value test assert np.array_equal(ivy.to_numpy(ret), np.asarray(len(np.asarray(object_in).shape), np.int32)) # compilation test if call in [helpers.torch_call]: # pytorch scripting does not support Union return if not ivy.wrapped_mode(): helpers.assert_compilable(ivy.shape) # minimum @pytest.mark.parametrize( "xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])]) @pytest.mark.parametrize( "dtype", ['float32']) @pytest.mark.parametrize( "tensor_fn", [ivy.array, helpers.var_fn])
d743336b1f3654cd0315f380f43eed4116997c1d
@pytest.mark.parametrize( "xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])]) @pytest.mark.parametrize( "dtype", ['float32']) @pytest.mark.parametrize( "tensor_fn", [ivy.array, helpers.var_fn])
153
https://github.com/unifyai/ivy.git
205
def test_get_num_dims(object_in, dtype, as_tensor, tensor_fn, dev, call): # smoke test if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call: # mxnet does not support 0-dimensional variables pytest.skip() ret = ivy.get_num_dims(tensor_fn(object_in, dtype, dev), as_tensor) # type test if as_tensor:
32
375
test_get_num_dims
13
0
1
4
modules/image/Image_editing/super_resolution/swinir_l_real_sr_x4/module.py
51,933
add swinir_l_real_sr_x4 (#2076) * git add swinir_l_real_sr_x4 * fix typo * fix typo Co-authored-by: chenjian <chenjian26@baidu.com>
PaddleHub
9
Python
12
module.py
def serving_method(self, image, **kwargs): image = base64_to_cv2(image) img_output = self.real_sr(image=image, **kwargs) return cv2_to_base64(img_output)
2e373966a7fd3119c205350fb14d0b7bfe74185d
35
https://github.com/PaddlePaddle/PaddleHub.git
41
def serving_method(self, image, **kwargs): image = base64_to_cv2(image) img_output = self.real_sr(image=image, **kwargs)
8
57
serving_method