ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
97,748
298,807
31
homeassistant/components/somfy/climate.py
10
9
def hvac_modes(self) -> list[HVACMode]: hvac_state = HVAC_MODES_MAPPING[self._climat
Use climate enums in somfy (#70739)
hvac_modes
9342a1b5777a0d0d5d289c7f5b90cf059152d6af
core
climate.py
10
8
https://github.com/home-assistant/core.git
1
31
0
10
50
Python
{ "docstring": "Return the list of available hvac operation modes.\n\n HEAT and COOL mode are exclusive. End user has to enable a mode manually within the Somfy application.\n So only one mode can be displayed. Auto mode is a scheduler.\n ", "language": "en", "n_whitespaces": 59, "n_words": 38, "vocab_size": 33 }
def hvac_modes(self) -> list[HVACMode]: hvac_state = HVAC_MODES_MAPPING[self._climate.get_hvac_state()] return [HVACMode.AUTO, hvac_state]
8,242
44,348
175
tests/operators/test_python.py
46
17
def _assert_expected_task_states(self, dagrun, expected_states): tis = dagrun.get_task_instances() for ti in tis: try: expected_state = expected_states[ti.task_id] except KeyError: raise ValueError(f"Invalid task id {ti.task_id} found!") else: assert ti.state == expected_state all_downstream_skipp
Add ShortCircuitOperator configurability for respecting downstream trigger rules (#20044) * Add short-circuit mode handling
_assert_expected_task_states
1970845c11ef0cfe4b41a8497a212aebc59bc1e2
airflow
test_python.py
15
9
https://github.com/apache/airflow.git
3
49
0
37
160
Python
{ "docstring": "Helper function that asserts `TaskInstances` of a given `task_id` are in a given state.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 12 }
def _assert_expected_task_states(self, dagrun, expected_states): tis = dagrun.get_task_instances() for ti in tis: try: expected_state = expected_states[ti.task_id] except KeyError: raise ValueError(f"Invalid task id {ti.task_id} found!") else: assert ti.state == expected_state all_downstream_skipped_states = { "short_circuit": State.SUCCESS, "op1": State.SKIPPED, "op2": State.SKIPPED, } all_success_states = {"short_circuit": State.SUCCESS, "op1": State.SUCCESS, "op2": State.SUCCESS}
@frappe.whitelist()
14,508
67,378
63
erpnext/selling/page/point_of_sale/point_of_sale.py
91
22
def set_customer_info(fieldname, customer, value=""): if fieldname == "loyalty_program": frappe.db.set_value("Customer", customer, "loyalty_program", value) contact = frappe.get_cached_value("Customer", customer, "customer_primary_contact") if not contact: contact = frappe.db.sql( , (customer), as_dict=1, ) contact = contact[0].get("parent") if contact else None if not contact: new_contact = frappe.new_doc("Contact") new_contact.is_primary_contact = 1 new_contact.first_name = customer new_contact.set("links", [{"link_doctype": "Customer", "link_name": customer}]) new_contact.save() contact = new_contact.name frappe.db.set_value("Customer", customer, "customer_primary_contact", contact) contact_doc = frappe.get_doc("Contact", contact) if fieldname == "email_id": contact_doc.se
style: format code with black
set_customer_info
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
point_of_sale.py
14
34
https://github.com/frappe/erpnext.git
7
233
1
57
411
Python
{ "docstring": "\n\t\t\tSELECT parent FROM `tabDynamic Link`\n\t\t\tWHERE\n\t\t\t\tparenttype = 'Contact' AND\n\t\t\t\tparentfield = 'links' AND\n\t\t\t\tlink_doctype = 'Customer' AND\n\t\t\t\tlink_name = %s\n\t\t\t", "language": "en", "n_whitespaces": 15, "n_words": 21, "vocab_size": 16 }
def set_customer_info(fieldname, customer, value=""): if fieldname == "loyalty_program": frappe.db.set_value("Customer", customer, "loyalty_program", value) contact = frappe.get_cached_value("Customer", customer, "customer_primary_contact") if not contact: contact = frappe.db.sql( , (customer), as_dict=1, ) contact = contact[0].get("parent") if contact else None if not contact: new_contact = frappe.new_doc("Contact") new_contact.is_primary_contact = 1 new_contact.first_name = customer new_contact.set("links", [{"link_doctype": "Customer", "link_name": customer}]) new_contact.save() contact = new_contact.name frappe.db.set_value("Customer", customer, "customer_primary_contact", contact) contact_doc = frappe.get_doc("Contact", contact) if fieldname == "email_id": contact_doc.set("email_ids", [{"email_id": value, "is_primary": 1}]) frappe.db.set_value("Customer", customer, "email_id", value) elif fieldname == "mobile_no": contact_doc.set("phone_nos", [{"phone": value, "is_primary_mobile_no": 1}]) frappe.db.set_value("Customer", customer, "mobile_no", value) contact_doc.save() @frappe.whitelist()
15,725
71,742
152
wagtail/admin/tests/pages/test_unpublish_page.py
46
19
def test_unpublish_not_include_children_view_post(self): # Post to the unpublish page response = self.client.post( reverse("wagtailadmin_pages:unpublish", args=(self.test_page.id,)), {} ) # Should be redirected to explorer page self.assertRedirects( response, reverse("wagtailadmin_explore", args=(self.root_page.id,)) ) # Check that the page was unpublished self.assertFalse(SimplePage.objects.get(id=self.test_page.id).live) # Check that the descendant pages wer
Reformat with black
test_unpublish_not_include_children_view_post
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_unpublish_page.py
14
10
https://github.com/wagtail/wagtail.git
1
118
0
34
192
Python
{ "docstring": "\n This posts to the unpublish view and checks that the page was unpublished but its descendants were not\n ", "language": "en", "n_whitespaces": 33, "n_words": 18, "vocab_size": 17 }
def test_unpublish_not_include_children_view_post(self): # Post to the unpublish page response = self.client.post( reverse("wagtailadmin_pages:unpublish", args=(self.test_page.id,)), {} ) # Should be redirected to explorer page self.assertRedirects( response, reverse("wagtailadmin_explore", args=(self.root_page.id,)) ) # Check that the page was unpublished self.assertFalse(SimplePage.objects.get(id=self.test_page.id).live) # Check that the descendant pages were not unpublished self.assertTrue(SimplePage.objects.get(id=self.test_child_page.id).live) self.assertTrue(SimplePage.objects.get(id=self.test_another_child_page.id).live)
39,919
167,015
368
pandas/io/json/_json.py
75
23
def _get_data_from_filepath(self, filepath_or_buffer): # if it is a string but the file does not exist, it might be a JSON string filepath_or_buffer = stringify_path(filepath_or_buffer) if ( not isinstance(filepath_or_buffer, str) or is_url(filepath_or_buffer) or is_fsspec_url(filepath_or_buffer) or file_exists(filepath_or_buffer) ): self.handles = get_handle( filepath_or_buffer, "r", encoding=self.encoding, compression=self.compression, storage_options=self.storage_options, errors=self.encoding_errors, ) filepath_or_buffer = self.handles.handle elif ( isinstance(filepath_or_buffer, str)
Raise `FileNotFoundError` in `read_json` if input looks like file path but file is missing (#46718) * raise FileNotFoundError in _get_data_from_filepath() * update tests test_read_non_existent + test_read_expands_user_home_dir * add changelog entry in doc/source/whatsnew/v1.5.0.rst * use pandas.io.common._compression_to_extension instead of hard-coded extensions * move changelog entry from IO to other API changes * fix ImportError from _compression_to_extension -> _extension_to_compression rename * add test read_json very long file path * remove extra period in extension checking Co-authored-by: Matthew Roeschke <emailformattr@gmail.com>
_get_data_from_filepath
67045903306ac4a1cab108177e92df30d99912b4
pandas
_json.py
16
26
https://github.com/pandas-dev/pandas.git
9
130
0
54
213
Python
{ "docstring": "\n The function read_json accepts three input types:\n 1. filepath (string-like)\n 2. file-like object (e.g. open file object, StringIO)\n 3. JSON string\n\n This method turns (1) into (2) to simplify the rest of the processing.\n It returns input types (2) and (3) unchanged.\n\n It raises FileNotFoundError if the input is a string ending in\n one of .json, .json.gz, .json.bz2, etc. but no such file exists.\n ", "language": "en", "n_whitespaces": 140, "n_words": 64, "vocab_size": 55 }
def _get_data_from_filepath(self, filepath_or_buffer): # if it is a string but the file does not exist, it might be a JSON string filepath_or_buffer = stringify_path(filepath_or_buffer) if ( not isinstance(filepath_or_buffer, str) or is_url(filepath_or_buffer) or is_fsspec_url(filepath_or_buffer) or file_exists(filepath_or_buffer) ): self.handles = get_handle( filepath_or_buffer, "r", encoding=self.encoding, compression=self.compression, storage_options=self.storage_options, errors=self.encoding_errors, ) filepath_or_buffer = self.handles.handle elif ( isinstance(filepath_or_buffer, str) and filepath_or_buffer.lower().endswith( (".json",) + tuple(f".json{c}" for c in _extension_to_compression) ) and not file_exists(filepath_or_buffer) ): raise FileNotFoundError(f"File {filepath_or_buffer} does not exist") return filepath_or_buffer
54,698
217,283
101
python3.10.4/Lib/ensurepip/__init__.py
58
10
def _run_pip(args, additional_paths=None): # Run the bootstraping in a subprocess to avoid leaking any state that happens # after pip has executed. Particulary, this avoids the case when pip holds onto # the files in *additional_paths*, preventing us to remove them at the end of the
add python 3.10.4 for windows
_run_pip
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
__init__.py
10
10
https://github.com/XX-net/XX-Net.git
1
38
0
48
77
Python
{ "docstring": "\nimport runpy\nimport sys\nsys.path = {additional_paths or []} + sys.path\nsys.argv[1:] = {args}\nrunpy.run_module(\"pip\", run_name=\"__main__\", alter_sys=True)\n", "language": "en", "n_whitespaces": 12, "n_words": 17, "vocab_size": 14 }
def _run_pip(args, additional_paths=None): # Run the bootstraping in a subprocess to avoid leaking any state that happens # after pip has executed. Particulary, this avoids the case when pip holds onto # the files in *additional_paths*, preventing us to remove them at the end of the # invocation. code = f return subprocess.run([sys.executable, '-W', 'ignore::DeprecationWarning', "-c", code], check=True).returncode
52,497
208,740
62
IPython/lib/tests/test_pretty.py
34
11
def test_pprint_heap_allocated_type(): module_name = "xxlimited" if sys.version_info < (3, 10) else "xxlimited_35" expected_output = ( "xxlimited.Null" if sys.version_info < (3, 11) else "xxlimited_35.Null" ) xxlimited = pyt
xxlimited_35 module now has the same name in repr in Py 3.11 See https://github.com/python/cpython/commit/a87c9b538fbfc42883417c4d5e69f1a5922690e3
test_pprint_heap_allocated_type
d858213d4088237e1481038865bc52ccdd074053
ipython
test_pretty.py
10
8
https://github.com/ipython/ipython.git
3
59
0
24
100
Python
{ "docstring": "\n Test that pprint works for heap allocated types.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
def test_pprint_heap_allocated_type(): module_name = "xxlimited" if sys.version_info < (3, 10) else "xxlimited_35" expected_output = ( "xxlimited.Null" if sys.version_info < (3, 11) else "xxlimited_35.Null" ) xxlimited = pytest.importorskip(module_name) output = pretty.pretty(xxlimited.Null) assert output == expected_output
19,226
95,663
219
tests/sentry/api/endpoints/test_organization_metrics.py
37
14
def test_orderby_percentile_with_many_fields_transactions_unsupported_fields(self): response = self.get_response( self.organization.slug, field=[ "p50(sentry.transactions.measurements.lcp)", "sum(user_misery)", ], statsPeriod="1h", interval="1h", datasource="snuba", groupBy=["project_id", "transaction"], orderBy="p50(sentry.transactions.measurements.lcp)", ) assert response.status_code == 400 assert ( response.json()["detail"] == "Multi-field select order by queries is not supported for metric user_misery" )
feat(metrics): Support multi-field orderby for performance [INGEST-805] (#31162) * feat(metrics): Support metrics multi-field orderby queries Adds support for the performance table to the metrics organization data endpoint
test_orderby_percentile_with_many_fields_transactions_unsupported_fields
9af098891a8243d08ee5ab6e51925a082135e3f2
sentry
test_organization_metrics.py
11
18
https://github.com/getsentry/sentry.git
1
71
0
34
123
Python
{ "docstring": "\n Test that contains a field in the `select` that is performance related but currently\n not supported should return a 400\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 18 }
def test_orderby_percentile_with_many_fields_transactions_unsupported_fields(self): response = self.get_response( self.organization.slug, field=[ "p50(sentry.transactions.measurements.lcp)", "sum(user_misery)", ], statsPeriod="1h", interval="1h", datasource="snuba", groupBy=["project_id", "transaction"], orderBy="p50(sentry.transactions.measurements.lcp)", ) assert response.status_code == 400 assert ( response.json()["detail"] == "Multi-field select order by queries is not supported for metric user_misery" )
78,547
266,736
180
test/lib/ansible_test/_internal/commands/integration/__init__.py
67
21
def generate_dependency_map(integration_targets): # type: (t.List[IntegrationTarget]) -> t.Dict[str, t.Set[IntegrationTarget]] targets_dict = dict((target.name, target) for target in integration_targets) target_dependencies = analyze_integration_target_dependencies(integration_targets) dependency_map = {} # type: t.Dict[str, t.Set[IntegrationTarget]] invalid_targets = set() for d
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
generate_dependency_map
a06fa496d3f837cca3c437ab6e9858525633d147
ansible
__init__.py
14
17
https://github.com/ansible/ansible.git
7
115
0
46
192
Python
{ "docstring": "Analyze the given list of integration test targets and return a dictionary expressing target names and the targets on which they depend.", "language": "en", "n_whitespaces": 21, "n_words": 22, "vocab_size": 19 }
def generate_dependency_map(integration_targets): # type: (t.List[IntegrationTarget]) -> t.Dict[str, t.Set[IntegrationTarget]] targets_dict = dict((target.name, target) for target in integration_targets) target_dependencies = analyze_integration_target_dependencies(integration_targets) dependency_map = {} # type: t.Dict[str, t.Set[IntegrationTarget]] invalid_targets = set() for dependency, dependents in target_dependencies.items(): dependency_target = targets_dict.get(dependency) if not dependency_target: invalid_targets.add(dependency) continue for dependent in dependents: if dependent not in dependency_map: dependency_map[dependent] = set() dependency_map[dependent].add(dependency_target) if invalid_targets: raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets))) return dependency_map
117,945
321,852
44
qutebrowser/misc/sql.py
12
5
def text(self) -> str: if self.error is None: return
sql: Add *all* primary sqlite result codes For three reasons: - There are only 31 of them, and we don't really expect any more to turn up (last happened in 2013, and we have a test for it happening) - It makes for nicer debug output - It always felt strange to only have a small subset in the enum
text
ee4d6e0396a6b570f4d5592a9c4c1a9fee1027b6
qutebrowser
sql.py
9
8
https://github.com/qutebrowser/qutebrowser.git
2
28
0
11
48
Python
{ "docstring": "Get a short text description of the error.\n\n This is a string suitable to show to the user as error message.\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 18 }
def text(self) -> str: if self.error is None: return str(self) return self.error.databaseText()
27,710
124,881
27
python/ray/serve/tests/fault_tolerance_tests/test_controller_recovery.py
18
2
def test_recover_start_from_replica_actor_names(serve_instance): # Test failed to deploy with tot
[Serve][Part2] Migrate the tests to use deployment graph api (#26507)
test_recover_start_from_replica_actor_names
09a6e5336ad6ab3c41e4a16e906c778aee2450bc
ray
test_controller_recovery.py
6
62
https://github.com/ray-project/ray.git
14
343
0
17
15
Python
{ "docstring": "Test controller is able to recover starting -> running replicas from\n actor names.\n ", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
def test_recover_start_from_replica_actor_names(serve_instance): # Test failed to deploy with total of 2 replicas, # but first constructor call fails.
113,185
314,579
190
homeassistant/components/zha/core/group.py
29
25
def associated_entities(self) -> list[dict[str, Any]]: ha_entity_registry = self.device.gateway.ha_entity_registry zha_device_registry = self.device.gateway.devic
Fix mypy issues in zha core modules (#74028) * Fix mypy issues in zha gateway, group and helpers * Cleanup device * Apply suggestion * Raise ValueError * Use hass.config.path
associated_entities
fb108533580d5f4c326ca970d8e6fd4998cc5593
core
group.py
17
16
https://github.com/home-assistant/core.git
3
107
0
28
164
Python
{ "docstring": "Return the list of entities that were derived from this endpoint.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def associated_entities(self) -> list[dict[str, Any]]: ha_entity_registry = self.device.gateway.ha_entity_registry zha_device_registry = self.device.gateway.device_registry return [ GroupEntityReference( ha_entity_registry.async_get(entity_ref.reference_id).name, ha_entity_registry.async_get(entity_ref.reference_id).original_name, entity_ref.reference_id, )._asdict() for entity_ref in zha_device_registry.get(self.device.ieee) if list(entity_ref.cluster_channels.values())[ 0 ].cluster.endpoint.endpoint_id == self.endpoint_id ]
54,896
217,714
96
python3.10.4/Lib/http/client.py
28
11
def getheader(self, name, default=None): if self.headers is None: raise ResponseNotReady() headers = self.headers.get_all(name) or default if isinstance(headers,
add python 3.10.4 for windows
getheader
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
client.py
11
8
https://github.com/XX-net/XX-Net.git
5
62
0
24
103
Python
{ "docstring": "Returns the value of the header matching *name*.\n\n If there are multiple matching headers, the values are\n combined into a single string separated by commas and spaces.\n\n If no matching header is found, returns *default* or None if\n the *default* is not specified.\n\n If the headers are unknown, raises http.client.ResponseNotReady.\n\n ", "language": "en", "n_whitespaces": 92, "n_words": 50, "vocab_size": 37 }
def getheader(self, name, default=None): if self.headers is None: raise ResponseNotReady() headers = self.headers.get_all(name) or default if isinstance(headers, str) or not hasattr(headers, '__iter__'): return headers else: return ', '.join(headers)
43,397
181,609
461
tests/export_tests.py
42
6
def test_generate_pipeline_code_2(): pipeline = [ 'KNeighborsClassifier', [ 'CombineDFs', [ 'GradientBoostingClassifier', 'input_matrix', 38.0, 5, 5, 5, 0.05, 0.5], [ 'CombineDFs', [ 'MinMaxScaler', 'input_matrix' ], ['ZeroCount', [
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
test_generate_pipeline_code_2
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
export_tests.py
12
46
https://github.com/EpistasisLab/tpot.git
1
78
0
27
119
Python
{ "docstring": "Assert that generate_pipeline_code() returns the correct code given a specific pipeline with two CombineDFs.make_pipeline(\n make_union(\n StackingEstimator(estimator=GradientBoostingClassifier(learning_rate=38.0, max_depth=5, max_features=5, min_samples_leaf=5, min_samples_split=0.05, n_estimators=0.5)),\n make_union(\n MinMaxScaler(),\n make_pipeline(\n MaxAbsScaler(),\n ZeroCount()\n )\n )\n ),\n KNeighborsClassifier(n_neighbors=18, p=\"uniform\", weights=2)\n)", "language": "en", "n_whitespaces": 124, "n_words": 33, "vocab_size": 30 }
def test_generate_pipeline_code_2(): pipeline = [ 'KNeighborsClassifier', [ 'CombineDFs', [ 'GradientBoostingClassifier', 'input_matrix', 38.0, 5, 5, 5, 0.05, 0.5], [ 'CombineDFs', [ 'MinMaxScaler', 'input_matrix' ], ['ZeroCount', [ 'MaxAbsScaler', 'input_matrix' ] ] ] ], 18, 'uniform', 2 ] expected_code = assert expected_code == generate_pipeline_code(pipeline, tpot_obj.operators)
12,400
61,054
134
.venv/lib/python3.8/site-packages/pip/_internal/req/req_uninstall.py
46
15
def _script_names(dist, script_name, is_gui): # type: (Distribution, str, bool) -> List[str
upd; format
_script_names
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
req_uninstall.py
14
15
https://github.com/jindongwang/transferlearning.git
4
87
0
32
153
Python
{ "docstring": "Create the fully qualified name of the files created by\n {console,gui}_scripts for the given ``dist``.\n Returns the list of file names\n ", "language": "en", "n_whitespaces": 30, "n_words": 21, "vocab_size": 17 }
def _script_names(dist, script_name, is_gui): # type: (Distribution, str, bool) -> List[str] if dist_in_usersite(dist): bin_dir = get_bin_user() else: bin_dir = get_bin_prefix() exe_name = os.path.join(bin_dir, script_name) paths_to_remove = [exe_name] if WINDOWS: paths_to_remove.append(exe_name + '.exe') paths_to_remove.append(exe_name + '.exe.manifest') if is_gui: paths_to_remove.append(exe_name + '-script.pyw') else: paths_to_remove.append(exe_name + '-script.py') return paths_to_remove
52,853
210,061
36
ppdet/modeling/bbox_utils.py
24
7
def bbox_center(boxes): boxes_cx = (boxes[..., 0] + boxes[..., 2]) / 2 boxes_cy = (boxes[..., 1] + box
Add PP-YOLOv3 code (#5281) * [ppyolov3] add ppyolov3 base code * add ppyolov3 s/m/x * modify ema * modify code to convert onnx successfully * support arbitrary shape * update config to use amp default * refine ppyolo_head code * modify reparameter code * refine act layer * adapter pico_head and tood_head code * remove ppyolov3 yaml * fix codestyle Co-authored-by: wangxinxin08 <wangxinxin08@baidu.com>
bbox_center
ef83ab8a3f7814e9886a7a22c8dcc55f506b6081
PaddleDetection
bbox_utils.py
10
4
https://github.com/PaddlePaddle/PaddleDetection.git
1
60
0
18
88
Python
{ "docstring": "Get bbox centers from boxes.\n Args:\n boxes (Tensor): boxes with shape (..., 4), \"xmin, ymin, xmax, ymax\" format.\n Returns:\n Tensor: boxes centers with shape (..., 2), \"cx, cy\" format.\n ", "language": "en", "n_whitespaces": 52, "n_words": 29, "vocab_size": 22 }
def bbox_center(boxes): boxes_cx = (boxes[..., 0] + boxes[..., 2]) / 2 boxes_cy = (boxes[..., 1] + boxes[..., 3]) / 2 return paddle.stack([boxes_cx, boxes_cy], axis=-1)
52,968
210,756
260
deploy/python/video_action_infer.py
58
36
def predict(self, input): input_names = self.predictor.get_input_names() input_tensor = self.predictor.get_input_handle(input_names[0]) output_names = self.predictor.get_output_names() output_tensor = self.predictor.get_output_handle(output_names[0])
Develop branch: add fight action for pphuman (#6160) * add fight for PP-Human * add short_size and target_size for fight recognition * add short_size and target_size for fight_infer * modify code according to the reviews * add the wrong deleted lines` * Update pipeline.py * Update infer_cfg.yml * visualize fight when fight action occur * 乱码修改 * delete useless parmas * delete useless code str2bool
predict
67f16ed9cac254612ddb141fcd8a14db3dbfd6d6
PaddleDetection
video_action_infer.py
13
23
https://github.com/PaddlePaddle/PaddleDetection.git
2
193
0
44
318
Python
{ "docstring": "\n Args:\n input (str) or (list): video file path or image data list\n Returns:\n results (dict): \n ", "language": "en", "n_whitespaces": 60, "n_words": 15, "vocab_size": 14 }
def predict(self, input): input_names = self.predictor.get_input_names() input_tensor = self.predictor.get_input_handle(input_names[0]) output_names = self.predictor.get_output_names() output_tensor = self.predictor.get_output_handle(output_names[0]) # preprocess self.recognize_times.preprocess_time_s.start() if type(input) == str: inputs = self.preprocess_video(input) else: inputs = self.preprocess_frames(input) self.recognize_times.preprocess_time_s.end() inputs = np.expand_dims( inputs, axis=0).repeat( self.batch_size, axis=0).copy() input_tensor.copy_from_cpu(inputs) # model prediction self.recognize_times.inference_time_s.start() self.predictor.run() self.recognize_times.inference_time_s.end() output = output_tensor.copy_to_cpu() # postprocess self.recognize_times.postprocess_time_s.start() classes, scores = self.postprocess(output) self.recognize_times.postprocess_time_s.end() return classes, scores
51,047
205,257
469
django/db/migrations/autodetector.py
121
25
def deep_deconstruct(self, obj): if isinstance(obj, list):
Refs #33476 -- Reformatted code with Black.
deep_deconstruct
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
autodetector.py
13
29
https://github.com/django/django.git
14
220
0
72
337
Python
{ "docstring": "\n Recursive deconstruction for a field and its arguments.\n Used for full comparison for rename/alter; sometimes a single-level\n deconstruction will not compare correctly.\n ", "language": "en", "n_whitespaces": 51, "n_words": 22, "vocab_size": 18 }
def deep_deconstruct(self, obj): if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return {key: self.deep_deconstruct(value) for key, value in obj.items()} elif isinstance(obj, functools.partial): return ( obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords), ) elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, "deconstruct"): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], {key: self.deep_deconstruct(value) for key, value in kwargs.items()}, ) else: return obj
71,509
247,147
163
tests/util/test_async_helpers.py
54
16
def test_cancellation(self): deferred: "Deferred[str]" = Deferred() wrapper_deferred = stop_cancellation(deferred) # Cancel the new `Deferred`. wrapper_deferred.cancel() self.assertTrue(wrapper_deferred.called) self.failureResultOf(wrapper_deferred, CancelledError) self.assertFalse( deferred.called, "Original `Deferre
Add `stop_cancellation` utility function (#12106)
test_cancellation
91bc15c772d22fbe814170ab2e0fdbfa50f9c372
synapse
test_async_helpers.py
10
11
https://github.com/matrix-org/synapse.git
1
69
0
46
126
Python
{ "docstring": "Test that cancellation of the new `Deferred` leaves the original running.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def test_cancellation(self): deferred: "Deferred[str]" = Deferred() wrapper_deferred = stop_cancellation(deferred) # Cancel the new `Deferred`. wrapper_deferred.cancel() self.assertTrue(wrapper_deferred.called) self.failureResultOf(wrapper_deferred, CancelledError) self.assertFalse( deferred.called, "Original `Deferred` was unexpectedly cancelled." ) # Now make the inner `Deferred` fail. # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed # in logs. deferred.errback(ValueError("abc")) self.assertIsNone(deferred.result, "`Failure` was not consumed")
55,315
218,447
74
python3.10.4/Lib/inspect.py
43
13
def getgeneratorlocals(generator): if not isgenerator(generator): raise TypeError("{!r} is not a Python generator".format(generator)) frame = getattr(generator, "gi_frame", None) if frame is not None: return generator.gi_frame.f_locals else: return {} # ------------------------------------------------ coroutine introspection CORO_CREATED = 'CORO_CREATED' CORO_RUNNING = 'CORO_RUNNING' CORO_SUSPENDED = 'CORO_SUSPENDED' CORO_CLOSED = 'CORO_CLOSED'
add python 3.10.4 for windows
getgeneratorlocals
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
inspect.py
12
8
https://github.com/XX-net/XX-Net.git
3
50
0
33
115
Python
{ "docstring": "\n Get the mapping of generator local variables to their current values.\n\n A dict is returned, with the keys the local variable names and values the\n bound values.", "language": "en", "n_whitespaces": 36, "n_words": 27, "vocab_size": 22 }
def getgeneratorlocals(generator): if not isgenerator(generator): raise TypeError("{!r} is not a Python generator".format(generator)) frame = getattr(generator, "gi_frame", None) if frame is not None: return generator.gi_frame.f_locals else: return {} # ------------------------------------------------ coroutine introspection CORO_CREATED = 'CORO_CREATED' CORO_RUNNING = 'CORO_RUNNING' CORO_SUSPENDED = 'CORO_SUSPENDED' CORO_CLOSED = 'CORO_CLOSED'
11,180
54,966
128
tests/orion/api/test_run_history.py
44
29
async def test_last_bin_contains_end_date(client, route): response = await client.post( f"/{route}/history", json=dict( history_start=str(dt), history_end=str(dt.add(days=1, minutes=30)), history_interval_seconds=timedelta(days=1).total_seconds(), ), ) assert r
Use status constants instead of hardcoded values Closes: PrefectHQ/orion#1673
test_last_bin_contains_end_date
37549d157007f6eef07ed8b1e2e14efb73134840
prefect
test_run_history.py
18
16
https://github.com/PrefectHQ/prefect.git
1
154
0
32
240
Python
{ "docstring": "The last bin contains the end date, so its own end could be after the history end", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 14 }
async def test_last_bin_contains_end_date(client, route): response = await client.post( f"/{route}/history", json=dict( history_start=str(dt), history_end=str(dt.add(days=1, minutes=30)), history_interval_seconds=timedelta(days=1).total_seconds(), ), ) assert response.status_code == status.HTTP_200_OK parsed = pydantic.parse_obj_as(List[responses.HistoryResponse], response.json()) assert len(parsed) == 2 assert parsed[0].interval_start == dt assert parsed[0].interval_end == dt.add(days=1) assert parsed[1].interval_start == dt.add(days=1) assert parsed[1].interval_end == dt.add(days=2)
27,644
124,648
659
python/ray/train/base_trainer.py
168
22
def _validate_attributes(self): # Run config if not isinstance(self.run_config, RunConfig): raise ValueError( f"`run_config` should be an instance of `ray.air.RunConfig`, " f"found {type(self.run_config)} with value `{self.run_config}`." ) # Scaling config # Todo: move to ray.air.ScalingConfig if not isinstance(self.scaling_config, dict): raise ValueError( f"`scaling_config` should be an instance of `dict`, " f"found {type(self.scaling_config)} with value `{self.scaling_config}`." ) # Datasets if not isinstance(self.datasets, dict): raise ValueError( f"`datasets` should be a dict mapping from a string to " f"`ray.data.Dataset` objects, " f"found {type(self.datasets)} with value `{self.datasets}`." ) elif any( not isinstance(ds, ray.data.Dataset) and not callable(ds) for ds in self.datasets.values() ): raise ValueError( f"At least one value in the `datasets` dict is not a " f"`ray.data.Dataset`: {self.datasets}" ) # P
[AIR] Fix `ResourceChangingScheduler` not working with AIR (#26307) This PR ensures that the new trial resources set by `ResourceChangingScheduler` are respected by the train loop logic by modifying the scaling config to match. Previously, even though trials had their resources updated, the scaling config was not modified which lead to eg. new workers not being spawned in the `DataParallelTrainer` even though resources were available. In order to accomplish this, `ScalingConfigDataClass` is updated to allow equality comparisons with other `ScalingConfigDataClass`es (using the underlying PGF) and to create a `ScalingConfigDataClass` from a PGF. Please note that this is an internal only change intended to actually make `ResourceChangingScheduler` work. In the future, `ResourceChangingScheduler` should be updated to operate on `ScalingConfigDataClass`es instead of PGFs as it is now. That will require a deprecation cycle.
_validate_attributes
b3878e26d765e28dd7c69abadbd856181037db97
ray
base_trainer.py
15
40
https://github.com/ray-project/ray.git
11
167
0
86
377
Python
{ "docstring": "Called on __init()__ to validate trainer attributes.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
def _validate_attributes(self): # Run config if not isinstance(self.run_config, RunConfig): raise ValueError( f"`run_config` should be an instance of `ray.air.RunConfig`, " f"found {type(self.run_config)} with value `{self.run_config}`." ) # Scaling config # Todo: move to ray.air.ScalingConfig if not isinstance(self.scaling_config, dict): raise ValueError( f"`scaling_config` should be an instance of `dict`, " f"found {type(self.scaling_config)} with value `{self.scaling_config}`." ) # Datasets if not isinstance(self.datasets, dict): raise ValueError( f"`datasets` should be a dict mapping from a string to " f"`ray.data.Dataset` objects, " f"found {type(self.datasets)} with value `{self.datasets}`." ) elif any( not isinstance(ds, ray.data.Dataset) and not callable(ds) for ds in self.datasets.values() ): raise ValueError( f"At least one value in the `datasets` dict is not a " f"`ray.data.Dataset`: {self.datasets}" ) # Preprocessor if self.preprocessor is not None and not isinstance( self.preprocessor, ray.data.Preprocessor ): raise ValueError( f"`preprocessor` should be an instance of `ray.data.Preprocessor`, " f"found {type(self.preprocessor)} with value `{self.preprocessor}`." ) if self.resume_from_checkpoint is not None and not isinstance( self.resume_from_checkpoint, ray.air.Checkpoint ): raise ValueError( f"`resume_from_checkpoint` should be an instance of " f"`ray.air.Checkpoint`, found {type(self.resume_from_checkpoint)} " f"with value `{self.resume_from_checkpoint}`." )
89,089
289,963
97
homeassistant/components/mqtt/device_tracker/schema_discovery.py
25
5
def longitude(self) -> float | None: if ( self.extra_state_attributes is not None and ATTR_LONG
Improve MQTT type hints part 8 (#81034) * Improve typing device_tracker discovery * Improve typing device_tracker yaml * Add test source_type attribute * Follow up comment * Initialize at `__init__` not at class level. * Use full name for return variable * Correct import, remove assert * Use AsyncSeeCallback
longitude
bcae6d604e2967c7475f0caa4b1b5e4e76ab88bf
core
schema_discovery.py
10
9
https://github.com/home-assistant/core.git
3
40
0
21
64
Python
{ "docstring": "Return longitude if provided in extra_state_attributes or None.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def longitude(self) -> float | None: if ( self.extra_state_attributes is not None and ATTR_LONGITUDE in self.extra_state_attributes ): longitude: float = self.extra_state_attributes[ATTR_LONGITUDE] return longitude return None
3,291
20,240
31
pipenv/patched/notpip/_vendor/platformdirs/windows.py
10
9
def user_cache_dir(self) -> str: path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA")) return self._append_parts(path, opinion_value="Cache")
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
user_cache_dir
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
windows.py
11
7
https://github.com/pypa/pipenv.git
1
32
0
10
63
Python
{ "docstring": "\n :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g.\n ``%USERPROFILE%\\\\AppData\\\\Local\\\\$appauthor\\\\$appname\\\\Cache\\\\$version``\n ", "language": "en", "n_whitespaces": 39, "n_words": 16, "vocab_size": 16 }
def user_cache_dir(self) -> str: path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA")) return self._append_parts(path, opinion_value="Cache")
18,254
87,220
47
src/sentry/relay/config/__init__.py
16
12
def get_project_config(project, full_config=True, project_keys=None): with sentry_sdk.push_scope() as scope: scope.set_tag("project", project.id) with metri
feat(dynamic-sampling): Add new bias for dev envs [TET-491] (#40382) This PR add new bias for dev envs. Also add common approach to adding new rules like: releases or health checks to `generate_rules()` function. Also enable mypy for `src/sentry/dynamic_sampling/` TODO (fix mypy issues after merge conflicts in) : - [x] src/sentry/dynamic_sampling/feature_multiplexer.py - [x] src/sentry/dynamic_sampling/utils.py
get_project_config
30e13df85cc296e8eee62eb376a0310c2e0d0261
sentry
__init__.py
12
5
https://github.com/getsentry/sentry.git
1
54
0
15
93
Python
{ "docstring": "Constructs the ProjectConfig information.\n :param project: The project to load configuration for. Ensure that\n organization is bound on this object; otherwise it will be loaded from\n the database.\n :param full_config: True if only the full config is required, False\n if only the restricted (for external relays) is required\n (default True, i.e. full configuration)\n :param project_keys: Pre-fetched project keys for performance. However, if\n no project keys are provided it is assumed that the config does not\n need to contain auth information (this is the case when used in\n python's StoreView)\n :return: a ProjectConfig object for the given project\n ", "language": "en", "n_whitespaces": 161, "n_words": 97, "vocab_size": 71 }
def get_project_config(project, full_config=True, project_keys=None): with sentry_sdk.push_scope() as scope: scope.set_tag("project", project.id) with metrics.timer("relay.config.get_project_config.duration"): return _get_project_config(project, full_config=full_config, project_keys=project_keys)
74,852
256,282
348
haystack/nodes/retriever/text2sparql.py
61
19
def _query_kg(self, sparql_query): try: response = self.knowledge_graph.query(sparql_query=sparql_query) # unpack different answer styles if isinstance(re
Apply black formatting (#2115) * Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
_query_kg
a59bca366174d9c692fa19750c24d65f47660ef7
haystack
text2sparql.py
20
20
https://github.com/deepset-ai/haystack.git
8
127
0
41
218
Python
{ "docstring": "\n Execute a single SPARQL query on the knowledge graph to retrieve an answer and unpack\n different answer styles for boolean queries, count queries, and list queries.\n\n :param sparql_query: SPARQL query that shall be executed on the knowledge graph\n ", "language": "en", "n_whitespaces": 67, "n_words": 38, "vocab_size": 29 }
def _query_kg(self, sparql_query): try: response = self.knowledge_graph.query(sparql_query=sparql_query) # unpack different answer styles if isinstance(response, list): if len(response) == 0: result = "" else: result = [] for x in response: for k, v in x.items(): result.append(v["value"]) elif isinstance(response, bool): result = str(response) elif "count" in response[0]: result = str(int(response[0]["count"]["value"])) else: result = "" except Exception: result = "" return result, sparql_query
55,135
218,107
197
python3.10.4/Lib/importlib/_bootstrap_external.py
47
11
def _path_importer_cache(cls, path): if path == '': try: path = _os.getcwd() except FileNotFoundError: # Don't cache the failure as the cwd can easily change to # a valid directory later on. return None try: finder = sys.path_importer_cache[path] except KeyError: finder = cls._path_hooks(path) sys.path_importer_cache[path] = finder return finder
add python 3.10.4 for windows
_path_importer_cache
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
_bootstrap_external.py
12
12
https://github.com/XX-net/XX-Net.git
4
58
0
34
100
Python
{ "docstring": "Get the finder for the path entry from sys.path_importer_cache.\n\n If the path entry is not in the cache, find the appropriate finder\n and cache it. If no finder is available, store None.\n\n ", "language": "en", "n_whitespaces": 53, "n_words": 32, "vocab_size": 22 }
def _path_importer_cache(cls, path): if path == '': try: path = _os.getcwd() except FileNotFoundError: # Don't cache the failure as the cwd can easily change to # a valid directory later on. return None try: finder = sys.path_importer_cache[path] except KeyError: finder = cls._path_hooks(path) sys.path_importer_cache[path] = finder return finder
51,857
207,077
95
tests/admin_docs/test_utils.py
28
15
def test_publish_parts(self): import docutils self.asser
Refs #33476 -- Reformatted code with Black.
test_publish_parts
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
test_utils.py
11
9
https://github.com/django/django.git
1
57
0
25
102
Python
{ "docstring": "\n Django shouldn't break the default role for interpreted text\n when ``publish_parts`` is used directly, by setting it to\n ``cmsreference`` (#6681).\n ", "language": "en", "n_whitespaces": 49, "n_words": 20, "vocab_size": 20 }
def test_publish_parts(self): import docutils self.assertNotEqual( docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE, "cmsreference" ) source = "reST, `interpreted text`, default role." markup = "<p>reST, <cite>interpreted text</cite>, default role.</p>\n" parts = docutils.core.publish_parts(source=source, writer_name="html4css1") self.assertEqual(parts["fragment"], markup)
72,997
249,564
240
tests/storage/test_event_federation.py
88
14
def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id depth_map = setup_info.depth_map # Try at "B" backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id, depth_map["B"], limit=100) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"] ) # Try at "A" backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_p
Limit and filter the number of backfill points to get from the database (#13879) There is no need to grab thousands of backfill points when we only need 5 to make the `/backfill` request with. We need to grab a few extra in case the first few aren't visible in the history. Previously, we grabbed thousands of backfill points from the database, then sorted and filtered them in the app. Fetching the 4.6k backfill points for `#matrix:matrix.org` from the database takes ~50ms - ~570ms so it's not like this saves a lot of time 🤷. But it might save us more time now that `get_backfill_points_in_room`/`get_insertion_event_backward_extremities_in_room` are more complicated after https://github.com/matrix-org/synapse/pull/13635 This PR moves the filtering and limiting to the SQL query so we just have less data to work with in the first place. Part of https://github.com/matrix-org/synapse/issues/13356
test_get_backfill_points_in_room
df8b91ed2bba4995c59a5b067e3b252ab90c9a5e
synapse
test_event_federation.py
12
16
https://github.com/matrix-org/synapse.git
3
131
0
62
219
Python
{ "docstring": "\n Test to make sure only backfill points that are older and come before\n the `current_depth` are returned.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 16 }
def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id depth_map = setup_info.depth_map # Try at "B" backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id, depth_map["B"], limit=100) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"] ) # Try at "A" backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] # Event "2" has a depth of 2 but is not included here because we only # know the approximate depth of 5 from our event "3". self.assertListEqual(backfill_event_ids, ["b3", "b2", "b1"])
48,149
196,753
74
sympy/printing/theanocode.py
28
13
def theano_code(expr, cache=None, **kwargs): sympy_deprecation_warning( , deprecated_since_version="1.8", active_deprecations_target='theanocode-deprecated') if not theano: raise Im
Update the deprecation warning for theanocode
theano_code
d54b0dc8170186cdd447bf40d55f805edd8a8d5a
sympy
theanocode.py
11
12
https://github.com/sympy/sympy.git
3
62
0
25
105
Python
{ "docstring": "\n Convert a SymPy expression into a Theano graph variable.\n\n .. deprecated:: 1.8\n\n ``sympy.printing.theanocode`` is deprecated. Theano has been renamed to\n Aesara. Use ``sympy.printing.aesaracode`` instead. See\n :ref:`theanocode-deprecated` for more information.\n\n Parameters\n ==========\n\n expr : sympy.core.expr.Expr\n SymPy expression object to convert.\n\n cache : dict\n Cached Theano variables (see :class:`TheanoPrinter.cache\n <TheanoPrinter>`). Defaults to the module-level global cache.\n\n dtypes : dict\n Passed to :meth:`.TheanoPrinter.doprint`.\n\n broadcastables : dict\n Passed to :meth:`.TheanoPrinter.doprint`.\n\n Returns\n =======\n\n theano.gof.graph.Variable\n A variable corresponding to the expression's value in a Theano symbolic\n expression graph.\n\n \n sympy.printing.theanocode is deprecated. Theano has been renamed to\n Aesara. Use sympy.printing.aesaracode instead.", "language": "en", "n_whitespaces": 209, "n_words": 94, "vocab_size": 63 }
def theano_code(expr, cache=None, **kwargs): sympy_deprecation_warning( , deprecated_since_version="1.8", active_deprecations_target='theanocode-deprecated') if not theano: raise ImportError("theano is required for theano_code") if cache is None: cache = global_cache return TheanoPrinter(cache=cache, settings={}).doprint(expr, **kwargs)
35,804
154,139
518
modin/core/dataframe/pandas/dataframe/dataframe.py
147
16
def _validate_axes_lengths(self): if self._row_lengths_cache is not None and len(self.index) > 0: # An empty frame can have 0 rows but a nonempty index. If the frame # does have rows, the number of rows must equal the size of the # index. num_rows = sum(self._row_lengths_cache) if num_rows > 0: ErrorMessage.catch_bugs_and_request_email( num_rows != len(self._index_cache), f"Row lengths: {num_rows} != {len(self._index_cache)}", ) ErrorMessage.catch_bugs_and_request_email( any(val < 0 for val in self._row_lengths_cache), f"Row lengths cannot be negative: {self._row_lengths_cache}", ) if self._column_widths_cache is not None and len(self.columns) > 0: # An empty frame can have 0 column but a nonempty column index. If # the frame does have columns, the number of columns must equal the # size of the columns. num_columns = sum(self._column_widths_cache) if num_columns > 0: ErrorMessage.catch_bugs_and_request_email( num_columns != len(self._columns_cache), f"Column widths: {num_columns} != {len(self._columns_cache)}", ) ErrorMessage.catch_bugs_and_request_email( any(val < 0 for val in self._column_widths_cache), f"Column widths cannot be negative: {self._column_widths_cache}", )
FEAT-#4725: Make index and columns lazy in Modin DataFrame (#4726) Co-authored-by: Mahesh Vashishtha <mvashishtha@users.noreply.github.com> Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Vasily Litvinov <fam1ly.n4me@yandex.ru>
_validate_axes_lengths
adb16a17f721048005520388080627975c6852d8
modin
dataframe.py
16
23
https://github.com/modin-project/modin.git
9
142
0
70
273
Python
{ "docstring": "Validate that labels are split correctly if split is known.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
def _validate_axes_lengths(self): if self._row_lengths_cache is not None and len(self.index) > 0: # An empty frame can have 0 rows but a nonempty index. If the frame # does have rows, the number of rows must equal the size of the # index. num_rows = sum(self._row_lengths_cache) if num_rows > 0: ErrorMessage.catch_bugs_and_request_email( num_rows != len(self._index_cache), f"Row lengths: {num_rows} != {len(self._index_cache)}", ) ErrorMessage.catch_bugs_and_request_email( any(val < 0 for val in self._row_lengths_cache), f"Row lengths cannot be negative: {self._row_lengths_cache}", ) if self._column_widths_cache is not None and len(self.columns) > 0: # An empty frame can have 0 column but a nonempty column index. If # the frame does have columns, the number of columns must equal the # size of the columns. num_columns = sum(self._column_widths_cache) if num_columns > 0: ErrorMessage.catch_bugs_and_request_email( num_columns != len(self._columns_cache), f"Column widths: {num_columns} != {len(self._columns_cache)}", ) ErrorMessage.catch_bugs_and_request_email( any(val < 0 for val in self._column_widths_cache), f"Column widths cannot be negative: {self._column_widths_cache}", )
54,129
215,735
24
tests/pytests/unit/utils/win_dacl/test_get_name.py
12
9
def test_get_name_capability_sid(): cap_sid = "S-1-15-3-1024-1065365936-1281604716-3511738428-1654721687-432734479-3232135806-4053264122-3456934681" sid_obj = win32security.ConvertStringSidToSid(cap_sid) assert salt.utils.win_dacl.get_name(sid_obj) is No
Add tests, migrate some tests to pytest
test_get_name_capability_sid
3bb43882e727b1d36abe2e501759c9c5e9048ecf
salt
test_get_name.py
10
4
https://github.com/saltstack/salt.git
1
29
0
11
52
Python
{ "docstring": "\n Test get_name with a compatibility SID. Should return `None` as we want to\n ignore these SIDs\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 16 }
def test_get_name_capability_sid(): cap_sid = "S-1-15-3-1024-1065365936-1281604716-3511738428-1654721687-432734479-3232135806-4053264122-3456934681" sid_obj = win32security.ConvertStringSidToSid(cap_sid) assert salt.utils.win_dacl.get_name(sid_obj) is None
36,798
156,894
65
dask/compatibility.py
17
8
def entry_points(group=None): eps = importlib.metadata.entry_points() if group: try: return eps.select(group=group) except AttributeError: return eps.get(group, []) return eps
Add `entry_points` compatibility utility (#9388)
entry_points
a9ee6c2fdf0a3093747e675997143e0dbe584bad
dask
compatibility.py
13
8
https://github.com/dask/dask.git
3
46
0
14
77
Python
{ "docstring": "Returns an iterable of entrypoints.\n\n For compatibility with Python 3.8/3.9.\n In 3.10 the return type changed from a dict to an ``importlib.metadata.EntryPoints``.\n This compatibility utility can be removed once Python 3.10 is the minimum.\n ", "language": "en", "n_whitespaces": 46, "n_words": 34, "vocab_size": 29 }
def entry_points(group=None): eps = importlib.metadata.entry_points() if group: try: return eps.select(group=group) except AttributeError: return eps.get(group, []) return eps
45,651
186,900
53
certbot/certbot/_internal/storage.py
14
10
def elliptic_curve(self) -> Optional[str]: key = self._private_key() if isinstance(key, EllipticCurvePrivateKey): return key.cu
error out when --reuse-key conflicts with other flags (#9262) * error out when --reuse-key conflicts with other flags * add unit test * add integration tests * lint
elliptic_curve
212c2ba990758cb9acd2b200e55302534988089a
certbot
storage.py
9
9
https://github.com/certbot/certbot.git
2
34
0
13
56
Python
{ "docstring": "\n :returns: If the private key is an elliptic key, the name of its curve.\n :rtype: str\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
def elliptic_curve(self) -> Optional[str]: key = self._private_key() if isinstance(key, EllipticCurvePrivateKey): return key.curve.name return None
16,691
77,682
55
wagtail/models/__init__.py
12
5
def page_type_display_name(self): if no
Add a page_type_display_name shortcut property
page_type_display_name
a3b1cb6c287a2a0c2957c8141c54453928e1b97e
wagtail
__init__.py
11
5
https://github.com/wagtail/wagtail.git
3
30
0
11
55
Python
{ "docstring": "\n A human-readable version of this page's type\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
def page_type_display_name(self): if not self.specific_class or self.is_root(): return "" else: return self.specific_class.get_verbose_name()
7,423
41,675
47
seaborn/_core/plot.py
19
6
def save(self, fname, **kwargs) -> Plot: # TODO expose important keyword arugments in our signature? se
Add some docstrings and basic API docs
save
6357619ec08a59e4ecf00c6b1300ac6e014a753f
seaborn
plot.py
9
13
https://github.com/mwaskom/seaborn.git
1
28
0
18
47
Python
{ "docstring": "\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n ", "language": "en", "n_whitespaces": 95, "n_words": 41, "vocab_size": 30 }
def save(self, fname, **kwargs) -> Plot: # TODO expose important keyword arugments in our signature? self.plot().save(fname, **kwargs) return self
12,270
60,741
141
.venv/lib/python3.8/site-packages/pip/_internal/index/package_finder.py
29
13
def get_install_candidate(self, link_evaluator, link): # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate] is_candidate, r
upd; format
get_install_candidate
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
package_finder.py
12
11
https://github.com/jindongwang/transferlearning.git
3
57
0
27
89
Python
{ "docstring": "\n If the link is a candidate for install, convert it to an\n InstallationCandidate and return it. Otherwise, return None.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 18 }
def get_install_candidate(self, link_evaluator, link): # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate] is_candidate, result = link_evaluator.evaluate_link(link) if not is_candidate: if result: self._log_skipped_link(link, reason=result) return None return InstallationCandidate( name=link_evaluator.project_name, link=link, version=result, )
40,165
168,029
26
pandas/plotting/_core.py
12
7
def bar(self, x=None, y=None, **kwargs) -> PlotAccessor: return self(kind="bar", x=x, y=y, **kwargs)
TYP: pandas/plotting annotations from pandas-stubs (#47827) * TYP: pandas/plotting annotations from pandas-stubs * xticks + pyright
bar
4d7cfc436f8a7bc65c11770aa16f05e875b74077
pandas
_core.py
9
11
https://github.com/pandas-dev/pandas.git
1
37
0
11
57
Python
{ "docstring": "\n Vertical bar plot.\n\n A bar plot is a plot that presents categorical data with\n rectangular bars with lengths proportional to the values that they\n represent. A bar plot shows comparisons among discrete categories. One\n axis of the plot shows the specific categories being compared, and the\n other axis represents a measured value.\n ", "language": "en", "n_whitespaces": 102, "n_words": 52, "vocab_size": 38 }
def bar(self, x=None, y=None, **kwargs) -> PlotAccessor: return self(kind="bar", x=x, y=y, **kwargs)
@keras_export("keras.backend.map_fn") @doc_controls.do_not_generate_docs
80,228
269,608
205
keras/backend.py
71
40
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): input_shape = shape(y_pred) num_samples, num_steps = input_shape[0], input_shape[1] y_pred = tf.math.log( tf.compat.v1.transpose(y_pred, perm=[1, 0, 2]) + epsilon() ) input_length = tf.cast(input_length, tf.int32) if greedy: (decoded, log_prob) = tf.nn.ctc_greedy_decoder( inputs=y_pred, sequence_length=input_length ) else: (decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder( inputs=y_pred, sequence_length=input_length, beam_width=beam_width, top_paths=top_paths, ) decoded_dense = [] for st in decoded: st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps)) decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1)) return (decoded_dense, log_prob) # HIGH
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
ctc_decode
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
backend.py
14
23
https://github.com/keras-team/keras.git
3
197
1
57
309
Python
{ "docstring": "Decodes the output of a softmax.\n\n Can use either greedy search (also known as best path)\n or a constrained dictionary search.\n\n Args:\n y_pred: tensor `(samples, time_steps, num_categories)`\n containing the prediction, or output of the softmax.\n input_length: tensor `(samples, )` containing the sequence length for\n each batch item in `y_pred`.\n greedy: perform much faster best-path search if `true`.\n This does not use a dictionary.\n beam_width: if `greedy` is `false`: a beam search decoder will be used\n with a beam of this width.\n top_paths: if `greedy` is `false`,\n how many of the most probable paths will be returned.\n\n Returns:\n Tuple:\n List: if `greedy` is `true`, returns a list of one element that\n contains the decoded sequence.\n If `false`, returns the `top_paths` most probable\n decoded sequences.\n Each decoded sequence has shape (samples, time_steps).\n Important: blank labels are returned as `-1`.\n Tensor `(top_paths, )` that contains\n the log probability of each decoded sequence.\n ", "language": "en", "n_whitespaces": 373, "n_words": 149, "vocab_size": 99 }
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): input_shape = shape(y_pred) num_samples, num_steps = input_shape[0], input_shape[1] y_pred = tf.math.log( tf.compat.v1.transpose(y_pred, perm=[1, 0, 2]) + epsilon() ) input_length = tf.cast(input_length, tf.int32) if greedy: (decoded, log_prob) = tf.nn.ctc_greedy_decoder( inputs=y_pred, sequence_length=input_length ) else: (decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder( inputs=y_pred, sequence_length=input_length, beam_width=beam_width, top_paths=top_paths, ) decoded_dense = [] for st in decoded: st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps)) decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1)) return (decoded_dense, log_prob) # HIGH ORDER FUNCTIONS @keras_export("keras.backend.map_fn") @doc_controls.do_not_generate_docs
35,376
153,340
10
modin/core/execution/ray/generic/modin_aqp.py
4
8
def display_time_updates(bar): threading.Thread(target
REFACTOR-#4251: define public interfaces in `modin.core.execution.ray` module (#3868) Signed-off-by: Anatoly Myachev <anatoly.myachev@intel.com>
display_time_updates
e7cb2e82f8b9c7a68f82abdd3b6011d661230b7e
modin
modin_aqp.py
11
2
https://github.com/modin-project/modin.git
1
25
0
4
42
Python
{ "docstring": "\n Start displaying the progress `bar` in a notebook.\n\n Parameters\n ----------\n bar : tqdm.tqdm\n The progress bar wrapper to display in a notebook cell.\n ", "language": "en", "n_whitespaces": 46, "n_words": 23, "vocab_size": 19 }
def display_time_updates(bar): threading.Thread(target=_show_time_updates, args=(bar,)).start()
53,792
215,074
513
salt/modules/aixpkg.py
157
38
def remove(name=None, pkgs=None, **kwargs): targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets) errors = [] # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the fileset or rpm package(s) for target in targets: try: named, versionpkg, rpmpkg = _check_pkg(target) except CommandExecutionError as exc: if exc.info: errors.append(exc.info["errors"]) continue if rpmpkg: # assume use dnf or yum cmdflags = " -y remove " if pathlib.Path("/opt/freeware/bin/dnf").is_file(): cmdexe = "/opt/freeware/bin/dnf" elif pathlib.Path("/opt/freeware/bin/yum").is_file(): cmdexe = "/opt/freeware/bin/yum" elif pathlib.Path("/usr/bin/yum").is_file(): cmdexe = "/usr/bin/yum" else: cmdexe = "/usr/bin/rpm" cmdflags = " -e " cmd = [cmdexe, cmdflags, named] out = __salt__["cmd.run_all"](cmd, python_shell=False) else: cmd = ["/usr/sbin/installp", "-u", named] out = __salt__["cmd.run_all"](cmd, python_shell=False) # Get a list of the packages after the uninstall __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( "Problems encountered removing filesets(s)/package(s)", info={"changes": ret, "errors": errors}, ) return ret
work in progress while resolve issue of python3_32 usage by dnf and yum
remove
fbcc707e76f11770712e6828155258ac61e00ff8
salt
aixpkg.py
16
40
https://github.com/saltstack/salt.git
12
256
0
106
443
Python
{ "docstring": "\n Remove specified fileset(s)/rpm package(s).\n\n name\n The name of the fileset or rpm package to be deleted.\n\n .. versionadded:: 3005\n\n preference to install rpm packages are to use in the following order:\n /opt/freeware/bin/dnf\n /opt/freeware/bin/yum\n /usr/bin/yum\n /usr/bin/rpm\n\n Multiple Package Options:\n\n pkgs\n A list of filesets and/or rpm packages to delete.\n Must be passed as a python list. The ``name`` parameter will be\n ignored if this option is passed.\n\n\n Returns a list containing the removed packages.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.remove <fileset/rpm package name>\n salt '*' pkg.remove tcsh\n salt '*' pkg.remove xlC.rte\n salt '*' pkg.remove Firefox.base.adt\n salt '*' pkg.remove pkgs='[\"foo\", \"bar\"]'\n ", "language": "en", "n_whitespaces": 243, "n_words": 101, "vocab_size": 72 }
def remove(name=None, pkgs=None, **kwargs): targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets) errors = [] # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the fileset or rpm package(s) for target in targets: try: named, versionpkg, rpmpkg = _check_pkg(target) except CommandExecutionError as exc: if exc.info: errors.append(exc.info["errors"]) continue if rpmpkg: # assume use dnf or yum cmdflags = " -y remove " if pathlib.Path("/opt/freeware/bin/dnf").is_file(): cmdexe = "/opt/freeware/bin/dnf" elif pathlib.Path("/opt/freeware/bin/yum").is_file(): cmdexe = "/opt/freeware/bin/yum" elif pathlib.Path("/usr/bin/yum").is_file(): cmdexe = "/usr/bin/yum" else: cmdexe = "/usr/bin/rpm" cmdflags = " -e " cmd = [cmdexe, cmdflags, named] out = __salt__["cmd.run_all"](cmd, python_shell=False) else: cmd = ["/usr/sbin/installp", "-u", named] out = __salt__["cmd.run_all"](cmd, python_shell=False) # Get a list of the packages after the uninstall __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( "Problems encountered removing filesets(s)/package(s)", info={"changes": ret, "errors": errors}, ) return ret
22,693
107,327
313
lib/matplotlib/dates.py
156
31
def _from_ordinalf(x, tz=None): tz = _get_tzinfo(tz) dt = (np.datetime64(get_epoch()) + np.timedelta64(int(np.round(x * MUSECONDS_PER_DAY)), 'us')) if dt < np.datetime64('0001-01-01') or dt >= np.datetime64('10000-01-01'): raise ValueError(f'Date ordinal {x} converts to {dt} (using ' f'epoch {get_epoch()}), but Matplotlib dates must be ' 'between year 0001 and 9999.') # c
All classes and methods in dates support both string and tzinfo as tz-argument
_from_ordinalf
115877861608a869be63110a1e3917c3d1dda04a
matplotlib
dates.py
15
18
https://github.com/matplotlib/matplotlib.git
5
169
0
107
346
Python
{ "docstring": "\n Convert Gregorian float of the date, preserving hours, minutes,\n seconds and microseconds. Return value is a `.datetime`.\n\n The input date *x* is a float in ordinal days at UTC, and the output will\n be the specified `.datetime` object corresponding to that time in\n timezone *tz*, or if *tz* is ``None``, in the timezone specified in\n :rc:`timezone`.\n ", "language": "en", "n_whitespaces": 79, "n_words": 56, "vocab_size": 43 }
def _from_ordinalf(x, tz=None): tz = _get_tzinfo(tz) dt = (np.datetime64(get_epoch()) + np.timedelta64(int(np.round(x * MUSECONDS_PER_DAY)), 'us')) if dt < np.datetime64('0001-01-01') or dt >= np.datetime64('10000-01-01'): raise ValueError(f'Date ordinal {x} converts to {dt} (using ' f'epoch {get_epoch()}), but Matplotlib dates must be ' 'between year 0001 and 9999.') # convert from datetime64 to datetime: dt = dt.tolist() # datetime64 is always UTC: dt = dt.replace(tzinfo=dateutil.tz.gettz('UTC')) # but maybe we are working in a different timezone so move. dt = dt.astimezone(tz) # fix round off errors if np.abs(x) > 70 * 365: # if x is big, round off to nearest twenty microseconds. # This avoids floating point roundoff error ms = round(dt.microsecond / 20) * 20 if ms == 1000000: dt = dt.replace(microsecond=0) + datetime.timedelta(seconds=1) else: dt = dt.replace(microsecond=ms) return dt # a version of _from_ordinalf that can operate on numpy arrays _from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf, otypes="O") # a version of dateutil.parser.parse that can operate on numpy arrays _dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
55,275
218,387
106
python3.10.4/Lib/inspect.py
31
10
def getdoc(object): try: doc = object.__doc__ except A
add python 3.10.4 for windows
getdoc
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
inspect.py
12
13
https://github.com/XX-net/XX-Net.git
5
56
0
20
93
Python
{ "docstring": "Get the documentation string for an object.\n\n All tabs are expanded to spaces. To clean up docstrings that are\n indented to line up with blocks of code, any whitespace than can be\n uniformly removed from the second line onwards is removed.", "language": "en", "n_whitespaces": 50, "n_words": 41, "vocab_size": 36 }
def getdoc(object): try: doc = object.__doc__ except AttributeError: return None if doc is None: try: doc = _finddoc(object) except (AttributeError, TypeError): return None if not isinstance(doc, str): return None return cleandoc(doc)
1,017
6,541
62
ludwig/marshmallow/marshmallow_schema_utils.py
37
12
def load_config_with_kwargs(cls, kwargs): assert_is_a_marshmallow_
feat: Modify Trainer to use marshmallow_dataclass syntax for handling hyperparameters. Add basic scripting for docstring extraction to marshmallow schema. Fix some existing marshmallow issues. (#1606)
load_config_with_kwargs
23a33eef3bc7ea3ba33ec56dc9b56ba38462648a
ludwig
marshmallow_schema_utils.py
13
7
https://github.com/ludwig-ai/ludwig.git
5
75
0
24
119
Python
{ "docstring": "Takes a marshmallow class and dict of parameter values and appropriately instantiantes the schema.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
def load_config_with_kwargs(cls, kwargs): assert_is_a_marshmallow_class(cls) schema = cls.Schema() fields = schema.fields.keys() return load_config(cls, **{k: v for k, v in kwargs.items() if k in fields}), { k: v for k, v in kwargs.items() if k not in fields }
@patch("saleor.payment.gateway.refund")
5,046
26,687
140
saleor/payment/tests/test_gateway.py
43
29
def test_payment_refund_or_void_refund_called_txn_exist(refund_mock, payment): # given payment.charge_status = ChargeStatus.FULLY_CHARGED payment.save(update_fi
Fix payment flow (#9504) * Do not capture payment again when it should be refunded or voided * Do not create order when then is ongoing refund
test_payment_refund_or_void_refund_called_txn_exist
0881beec1ac02dfa97525c5173687defb356d85c
saleor
test_gateway.py
11
19
https://github.com/saleor/saleor.git
1
120
1
37
202
Python
{ "docstring": "Ensure that the refund method is called when the refund process\n is already ongoing but not covered full payment captured amount.", "language": "en", "n_whitespaces": 23, "n_words": 21, "vocab_size": 18 }
def test_payment_refund_or_void_refund_called_txn_exist(refund_mock, payment): # given payment.charge_status = ChargeStatus.FULLY_CHARGED payment.save(update_fields=["charge_status"]) assert payment.can_refund() is True payment.captured_amount = payment.total payment.save(update_fields=["captured_amount"]) txn = payment.transactions.create( is_success=True, action_required=False, kind=TransactionKind.REFUND_ONGOING, amount=payment.captured_amount / 2, currency=payment.currency, token="test", gateway_response={}, ) # when gateway.payment_refund_or_void( payment, get_plugins_manager(), None, transaction_id=txn.token ) # then assert refund_mock.called_once() @patch("saleor.payment.gateway.refund")
55,214
218,222
29
python3.10.4/Lib/importlib/metadata/__init__.py
8
10
def _all(self): groups = super(Deprecated, self).values() return EntryPoin
add python 3.10.4 for windows
_all
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
__init__.py
10
3
https://github.com/XX-net/XX-Net.git
1
30
0
8
51
Python
{ "docstring": "\n Reconstruct a list of all entrypoints from the groups.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
def _all(self): groups = super(Deprecated, self).values() return EntryPoints(itertools.chain.from_iterable(groups))
56,380
221,366
30
python3.10.4/Lib/codecs.py
9
7
def readlines(self, sizehint=None, keepends=True): data = self.read() re
add python 3.10.4 for windows
readlines
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
codecs.py
8
3
https://github.com/XX-net/XX-Net.git
1
28
0
9
46
Python
{ "docstring": " Read all lines available on the input stream\n and return them as a list.\n\n Line breaks are implemented using the codec's decoder\n method and are included in the list entries.\n\n sizehint, if given, is ignored since there is no efficient\n way to finding the true end-of-line.\n\n ", "language": "en", "n_whitespaces": 109, "n_words": 46, "vocab_size": 40 }
def readlines(self, sizehint=None, keepends=True): data = self.read() return data.splitlines(keepends)
43,215
180,689
65
gradio/event_queue.py
14
8
async def notify_clients(cls) -> None: while not cls.STOP: await asyncio.sleep(cls.UPDATE_INTERVALS)
Release new queue beta (#1969) * queue-refactor-backend (#1489) * queue-refactor-backend - create a template for the new design * queue-refactor-backend - clean after the old queue * queue-refactor-backend - add basic test to websocket endpoint * queue-refactor-backend - small fix * queue-refactor-backend - debugs&fixes&finalizations - test the flow with postman * queue-refactor-backend - tweaks on websocket closing * queue-refactor-backend - cleanup * queue-refactor-backend - cleanup & tweaks * queue-refactor-backend - cleanup & tweaks * queue-refactor-backend - cleanup & tweaks - correct the exception handling * queue-refactor-backend - add websockets dependency * queue-refactor-backend - reformat * queue-refactor-backend - add single event test * queue-refactor-backend - tweaks - remove outdated tests * queue-refactor-backend - reformat * queue-refactor-backend - reformat * queue-refactor-backend - reformat * queue-refactor-backend - add Queue configurations to Blocks.launch() - add live_queue_update to send estimations whenever a job gets fetched from the Queue * queue-refactor-backend - add Queue configurations to Blocks.launch() - add live_queue_update to send estimations whenever a job gets fetched from the Queue * queue-refactor-backend - tweaks * queue-refactor-backend - make SLEEP_WHEN_FREE shorter Co-authored-by: Ali Abid <aabid94@gmail.com> * Add estimation parameters to queue (#1889) * - tweaks on Estimation * version * Revert "version" This reverts commit bd1f4d7bfe3658a4967b93126859a62a511a70e2. * some fix and tweaks * implement queue frontend (#1950) * implement queue frontend * fix types * fix ws endpoint in build mode * cleanup * Queue tweaks (#1909) * tweaks on estimation payload * Queue keep ws connections open (#1910) * 1. keep ws connections open after the event process is completed 2. do not send estimations periodically if live queue updates is open * fix calculation * 1. tweaks on event_queue * fix issue - create new ws for each request * format * fix * fix tests * fix tests * tets * test * changes * changes * changes * change' * wtf * changes * changes * file perms * Release queue beta v1 (#1971) * - release the new queue * - bypass the issue in the tests - rewrite the lost part in the codebase * - add concurrent queue example (#1978) * rank_eta calc * Queue fixes (#1981) * change * format * - comment out queue tests as they dont work well * - reformat * Update gradio/event_queue.py Co-authored-by: Ömer Faruk Özdemir <farukozderim@gmail.com> * changes * changes * change * weird fix Co-authored-by: Ömer Faruk Özdemir <farukozderim@gmail.com> * release-queue-v3 (#1988) * Fix frontend queuing to target secure WSS (#1996) * change * format * changes * queue-concurrency-tweaks (#2002) 1. make gather_data and broadcast_estimation sequential instead of concurrent because they were deleting elements at the same time and raising expections which was lowering the performance * Update Queue API, documentation (#2026) * changes * changes * fixes * changes * change * fix Co-authored-by: Ömer Faruk Özdemir <farukozderim@gmail.com> Co-authored-by: pngwn <hello@pngwn.io>
notify_clients
b1dfc9a172440e9c9736566f326ba339ff559604
gradio
event_queue.py
12
8
https://github.com/gradio-app/gradio.git
3
34
0
13
61
Python
{ "docstring": "\n Notify clients about events statuses in the queue periodically.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
async def notify_clients(cls) -> None: while not cls.STOP: await asyncio.sleep(cls.UPDATE_INTERVALS) if cls.EVENT_QUEUE: await cls.broadcast_estimations()
42,667
178,336
2,556
nuitka/nodes/ModuleNodes.py
244
32
def _readPyPIFile(self): # Complex stuff, pylint: disable=too-many-branches,too-many-statements if self.used_modules is None: pyi_filename = self.getPyIFilename() if os.path.exists(pyi_filename): pyi_deps = OrderedSet() # Flag signalling multiline import handling in_import = False in_import_part = "" for line in getFileContentByLine(pyi_filename): line = line.strip() if not in_import: if line.startswith("import "): imported = line[7:] pyi_deps.add(imported) elif line.startswith("from "): parts = line.split(None, 3) assert parts[0] == "from" assert parts[2] == "import" origin_name = parts[1] if origin_name == "typing": continue if origin_name == ".": origin_name = self.getFullName() else:
Fix, the parsing of ".pyi" files didn't handle relative imports
_readPyPIFile
1f5a2759dc7a3dda7baa4e599a803a34a0be5444
Nuitka
ModuleNodes.py
31
82
https://github.com/Nuitka/Nuitka.git
26
469
0
118
808
Python
{ "docstring": "Read the .pyi file if present and scan for dependencies.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def _readPyPIFile(self): # Complex stuff, pylint: disable=too-many-branches,too-many-statements if self.used_modules is None: pyi_filename = self.getPyIFilename() if os.path.exists(pyi_filename): pyi_deps = OrderedSet() # Flag signalling multiline import handling in_import = False in_import_part = "" for line in getFileContentByLine(pyi_filename): line = line.strip() if not in_import: if line.startswith("import "): imported = line[7:] pyi_deps.add(imported) elif line.startswith("from "): parts = line.split(None, 3) assert parts[0] == "from" assert parts[2] == "import" origin_name = parts[1] if origin_name == "typing": continue if origin_name == ".": origin_name = self.getFullName() else: dot_count = 0 while origin_name.startswith("."): origin_name = origin_name[1:] dot_count += 1 if dot_count > 0: if origin_name: origin_name = ( self.getFullName() .getRelativePackageName(level=dot_count + 1) .getChildNamed(origin_name) ) else: origin_name = ( self.getFullName().getRelativePackageName( level=dot_count + 1 ) ) if origin_name != self.getFullName(): pyi_deps.add(origin_name) imported = parts[3] if imported.startswith("("): # Handle multiline imports if not imported.endswith(")"): in_import = True imported = imported[1:] in_import_part = origin_name assert in_import_part, ( "Multiline part in file %s cannot be empty" % pyi_filename ) else: in_import = False imported = imported[1:-1] assert imported if imported == "*": continue for name in imported.split(","): if name: name = name.strip() pyi_deps.add(origin_name + "." + name) else: # In import imported = line if imported.endswith(")"): imported = imported[0:-1] in_import = False for name in imported.split(","): name = name.strip() if name: pyi_deps.add(in_import_part + "." + name) if "typing" in pyi_deps: pyi_deps.discard("typing") if "__future__" in pyi_deps: pyi_deps.discard("__future__") if self.getFullName() in pyi_deps: pyi_deps.discard(self.getFullName()) if self.getFullName().getPackageName() in pyi_deps: pyi_deps.discard(self.getFullName().getPackageName()) self.used_modules = tuple((pyi_dep, None) for pyi_dep in pyi_deps) else: self.used_modules = ()
12,774
61,951
378
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
108
19
def topological_sort(self): result = [] # Make a shallow copy of the adjacency list alist = {} for k, v in self.adjacency_list.items(): alist[k] = v[:] while True: # See what we can remove in this run to_remove = [] for k, v in list(alist.items())[:]: if not v: to_remove.append(k) del alist[k] if not to_rem
upd; format
topological_sort
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
database.py
13
19
https://github.com/jindongwang/transferlearning.git
10
155
0
71
252
Python
{ "docstring": "\n Perform a topological sort of the graph.\n :return: A tuple, the first element of which is a topologically sorted\n list of distributions, and the second element of which is a\n list of distributions that cannot be sorted because they have\n circular dependencies and so form a cycle.\n ", "language": "en", "n_whitespaces": 117, "n_words": 47, "vocab_size": 32 }
def topological_sort(self): result = [] # Make a shallow copy of the adjacency list alist = {} for k, v in self.adjacency_list.items(): alist[k] = v[:] while True: # See what we can remove in this run to_remove = [] for k, v in list(alist.items())[:]: if not v: to_remove.append(k) del alist[k] if not to_remove: # What's left in alist (if anything) is a cycle. break # Remove from the adjacency list of others for k, v in alist.items(): alist[k] = [(d, r) for d, r in v if d not in to_remove] logger.debug('Moving to result: %s', ['%s (%s)' % (d.name, d.version) for d in to_remove]) result.extend(to_remove) return result, list(alist.keys())
28,732
128,528
61
rllib/evaluation/episode.py
19
13
def soft_reset(self) -> None: self.length = 0 self.episode_id = random.randrange(
Convert floats to integers before using randrange (#28962) Signed-off-by: Ram Rachum <ram@rachum.com>
soft_reset
f448e33473c19854f47a93d7d55ccf72ad1b7fbf
ray
episode.py
10
11
https://github.com/ray-project/ray.git
1
49
0
15
79
Python
{ "docstring": "Clears rewards and metrics, but retains RNN and other state.\n\n This is used to carry state across multiple logical episodes in the\n same env (i.e., if `soft_horizon` is set).\n ", "language": "en", "n_whitespaces": 50, "n_words": 29, "vocab_size": 27 }
def soft_reset(self) -> None: self.length = 0 self.episode_id = random.randrange(int(2e9)) self.total_reward = 0.0 self.agent_rewards = defaultdict(float) self._agent_reward_history = defaultdict(list)
16,005
73,290
200
wagtail/contrib/modeladmin/views.py
46
14
def get_ordering_field(self, field_name): try: field = self.opts.get_field(field_name) return field.name
Reformat with black
get_ordering_field
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
views.py
15
12
https://github.com/wagtail/wagtail.git
4
77
0
35
126
Python
{ "docstring": "\n Returns the proper model field name corresponding to the given\n field_name to use for ordering. field_name may either be the name of a\n proper model field or the name of a method (on the admin or model) or a\n callable with the 'admin_order_field' attribute. Returns None if no\n proper model field name can be matched.\n ", "language": "en", "n_whitespaces": 98, "n_words": 55, "vocab_size": 32 }
def get_ordering_field(self, field_name): try: field = self.opts.get_field(field_name) return field.name except FieldDoesNotExist: # See whether field_name is a name of a non-field # that allows sorting. if callable(field_name): attr = field_name elif hasattr(self.model_admin, field_name): attr = getattr(self.model_admin, field_name) else: attr = getattr(self.model, field_name) return getattr(attr, "admin_order_field", None)
50,321
203,347
224
django/contrib/admin/checks.py
50
16
def _check_ordering(self, obj): # ordering = None if obj.ordering i
Refs #33476 -- Reformatted code with Black.
_check_ordering
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
checks.py
16
14
https://github.com/django/django.git
4
84
0
43
137
Python
{ "docstring": "Check that ordering refers to existing fields or is random.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def _check_ordering(self, obj): # ordering = None if obj.ordering is None: # The default value is None return [] elif not isinstance(obj.ordering, (list, tuple)): return must_be( "a list or tuple", option="ordering", obj=obj, id="admin.E031" ) else: return list( chain.from_iterable( self._check_ordering_item(obj, field_name, "ordering[%d]" % index) for index, field_name in enumerate(obj.ordering) ) )
45,524
186,612
24
certbot-nginx/certbot_nginx/_internal/parser_obj.py
10
7
def parsing_hooks(cls) -> Tuple[Type["Block"], Type["Sentence"], Type["Statements"]]: return Block, Sentence, Statements
Fully type certbot-nginx module (#9124) * Work in progress * Fix type * Work in progress * Work in progress * Work in progress * Work in progress * Work in progress * Oups. * Fix typing in UnspacedList * Fix logic * Finish typing * List certbot-nginx as fully typed in tox * Fix lint * Fix checks * Organize imports * Fix typing for Python 3.6 * Fix checks * Fix lint * Update certbot-nginx/certbot_nginx/_internal/configurator.py Co-authored-by: alexzorin <alex@zor.io> * Update certbot-nginx/certbot_nginx/_internal/configurator.py Co-authored-by: alexzorin <alex@zor.io> * Fix signature of deploy_cert regarding the installer interface * Update certbot-nginx/certbot_nginx/_internal/obj.py Co-authored-by: alexzorin <alex@zor.io> * Fix types * Update certbot-nginx/certbot_nginx/_internal/parser.py Co-authored-by: alexzorin <alex@zor.io> * Precise type * Precise _coerce possible inputs/outputs * Fix type * Update certbot-nginx/certbot_nginx/_internal/http_01.py Co-authored-by: ohemorange <ebportnoy@gmail.com> * Fix type * Remove an undesirable implementation. * Fix type Co-authored-by: alexzorin <alex@zor.io> Co-authored-by: ohemorange <ebportnoy@gmail.com>
parsing_hooks
16aad35d31a887dab157f9d4f5e0fe9218d06064
certbot
parser_obj.py
7
8
https://github.com/certbot/certbot.git
1
30
0
10
50
Python
{ "docstring": "Returns object types that this class should be able to `parse` recusrively.\n The order of the objects indicates the order in which the parser should\n try to parse each subitem.\n :returns: A list of Parsable classes.\n :rtype list:\n ", "language": "en", "n_whitespaces": 73, "n_words": 38, "vocab_size": 32 }
def parsing_hooks(cls) -> Tuple[Type["Block"], Type["Sentence"], Type["Statements"]]: return Block, Sentence, Statements
40,371
169,009
98
pandas/core/computation/ops.py
28
13
def _cast_inplace(terms, acceptable_dtypes, dtype) -> None: dt = np.dtype(dtype
TYP: Autotyping (#48191) * annotate-magics * annotate-imprecise-magics * none-return * scalar-return * pyi files * ignore vendored file * manual changes * ignore pyright in pickle_compat (these errors would be legit if the current __new__ methods were called but I think these pickle tests call older __new__ methods which allowed providing multiple positional arguments) * run autotyping in pre-commit * remove final and expand safe (and add annotate-imprecise-magics)
_cast_inplace
54347fe684e0f7844bf407b1fb958a5269646825
pandas
ops.py
14
22
https://github.com/pandas-dev/pandas.git
4
64
0
24
104
Python
{ "docstring": "\n Cast an expression inplace.\n\n Parameters\n ----------\n terms : Op\n The expression that should cast.\n acceptable_dtypes : list of acceptable numpy.dtype\n Will not cast if term's dtype in this list.\n dtype : str or numpy.dtype\n The dtype to cast to.\n ", "language": "en", "n_whitespaces": 82, "n_words": 39, "vocab_size": 31 }
def _cast_inplace(terms, acceptable_dtypes, dtype) -> None: dt = np.dtype(dtype) for term in terms: if term.type in acceptable_dtypes: continue try: new_value = term.value.astype(dt) except AttributeError: new_value = dt.type(term.value) term.update(new_value)
18,567
89,746
1,928
src/sentry/integrations/vercel/webhook.py
360
60
def _deployment_created(self, external_id, request): payload = request.data["payload"] vercel_project_id = ( payload["projectId"] if payload.get("projectId") else payload["project"]["id"] ) # Only create releases for production deploys for now if payload["target"] != "production": logger.info( f"Ignoring deployment for environment: {payload['target']}", extra={"external_id": external_id, "vercel_project_id": vercel_project_id}, ) return self.respond(status=204) logging_params = {"external_id": external_id, "vercel_project_id": vercel_project_id} org_integrations = OrganizationIntegration.objects.select_related("organization").filter( integration__external_id=external_id, integration__provider=self.provider ) if not org_integrations: logger.info("Integration not found", extra=logging_params) return self.respond({"detail": "Integration not found"}, status=404) # for each org integration, search the configs to find one that matches the vercel project of the webhook for org_integration in org_integrations: project_mappings = org_integration.config.get("project_mappings") or [] matched_mappings = list(filter(lambda x: x[1] == vercel_project_id, project_mappings)) if matched_mappings: organization = org_integration.organization sentry_project_id = matched_mappings[0][0] logging_params["organization_id"] = organization.id logging_params["project_id"] = sentry_project_id try: release_payload, token = get_payload_and_token( payload, organization.id, sentry_project_id ) except Project.DoesNotExist: logger.info("Project not found", extra=logging_params) return self.respond({"detail": "Project not found"}, status=404) except SentryAppInstallationForProvider.DoesNotExist: logger.info("Installation not found", extra=logging_params) return self.respond({"detail": "Installation not found"}, status=404) except SentryAppInstallationToken.DoesNotExist: logger.info("Token not found", extra=logging_params) return self.respond({"detail": "Token not found"}, status=404) except NoCommitFoundError: logger.info("No commit found", extra=logging_params) return self.respond({"detail": "No commit found"}, status=404) except MissingRepositoryError: logger.info("Could not determine repository", extra=logging_params) return self.respond({"detail": "Could not determine repository"}, status=400) url = absolute_uri(f"/api/0/organizations/{organization.slug}/releases/") headers = { "Accept": "application/json", "Authorization": f"Bearer {token}", "User-Agent": f"sentry_vercel/{VERSION}", } json_error = None # create the basic release payload without refs no_ref_payload = release_payload.copy() del no_ref_payload["refs"] with http.build_session() as session: try:
fix: Add functionality for new Vercel webhook payloads (#42340) Fixes WOR-2493
_deployment_created
199dee4680dcecac0c485576f3933d9de49d6e44
sentry
webhook.py
20
94
https://github.com/getsentry/sentry.git
14
571
0
200
1,010
Python
{ "docstring": "\n Steps:\n 1. Find all org integrations that match the external id\n 2. Search the configs to find one that matches the vercel project of the webhook\n 3. Look up the Sentry project that matches\n 4. Look up the connected internal integration\n 5. Find the token associated with that installation\n 6. Determine the commit sha and repo based on what provider is used\n 7. Create the release using the token WITHOUT refs\n 8. Update the release with refs\n ", "language": "en", "n_whitespaces": 180, "n_words": 77, "vocab_size": 55 }
def _deployment_created(self, external_id, request): payload = request.data["payload"] vercel_project_id = ( payload["projectId"] if payload.get("projectId") else payload["project"]["id"] ) # Only create releases for production deploys for now if payload["target"] != "production": logger.info( f"Ignoring deployment for environment: {payload['target']}", extra={"external_id": external_id, "vercel_project_id": vercel_project_id}, ) return self.respond(status=204) logging_params = {"external_id": external_id, "vercel_project_id": vercel_project_id} org_integrations = OrganizationIntegration.objects.select_related("organization").filter( integration__external_id=external_id, integration__provider=self.provider ) if not org_integrations: logger.info("Integration not found", extra=logging_params) return self.respond({"detail": "Integration not found"}, status=404) # for each org integration, search the configs to find one that matches the vercel project of the webhook for org_integration in org_integrations: project_mappings = org_integration.config.get("project_mappings") or [] matched_mappings = list(filter(lambda x: x[1] == vercel_project_id, project_mappings)) if matched_mappings: organization = org_integration.organization sentry_project_id = matched_mappings[0][0] logging_params["organization_id"] = organization.id logging_params["project_id"] = sentry_project_id try: release_payload, token = get_payload_and_token( payload, organization.id, sentry_project_id ) except Project.DoesNotExist: logger.info("Project not found", extra=logging_params) return self.respond({"detail": "Project not found"}, status=404) except SentryAppInstallationForProvider.DoesNotExist: logger.info("Installation not found", extra=logging_params) return self.respond({"detail": "Installation not found"}, status=404) except SentryAppInstallationToken.DoesNotExist: logger.info("Token not found", extra=logging_params) return self.respond({"detail": "Token not found"}, status=404) except NoCommitFoundError: logger.info("No commit found", extra=logging_params) return self.respond({"detail": "No commit found"}, status=404) except MissingRepositoryError: logger.info("Could not determine repository", extra=logging_params) return self.respond({"detail": "Could not determine repository"}, status=400) url = absolute_uri(f"/api/0/organizations/{organization.slug}/releases/") headers = { "Accept": "application/json", "Authorization": f"Bearer {token}", "User-Agent": f"sentry_vercel/{VERSION}", } json_error = None # create the basic release payload without refs no_ref_payload = release_payload.copy() del no_ref_payload["refs"] with http.build_session() as session: try: resp = session.post(url, json=no_ref_payload, headers=headers) json_error = safe_json_parse(resp) resp.raise_for_status() except RequestException as e: # errors here should be uncommon but we should be aware of them logger.error( f"Error creating release: {e} - {json_error}", extra=logging_params, exc_info=True, ) # 400 probably isn't the right status code but oh well return self.respond({"detail": f"Error creating release: {e}"}, status=400) # set the refs try: resp = session.post( url, json=release_payload, headers=headers, ) json_error = safe_json_parse(resp) resp.raise_for_status() except RequestException as e: # errors will probably be common if the user doesn't have repos set up logger.info( f"Error setting refs: {e} - {json_error}", extra=logging_params, exc_info=True, ) # 400 probably isn't the right status code but oh well return self.respond({"detail": f"Error setting refs: {e}"}, status=400) # we are going to quit after the first project match as there shouldn't be multiple matches return self.respond(status=201) return self.respond(status=204)
18,378
88,354
27
src/sentry/auth/helper.py
13
5
def _app_user(self) -> User | None: return self.user if isinstance(self.user, Us
ref(auth): Type hints on auth/helper.py and related modules (#41158)
_app_user
e451a0a5b06d082b9515406d933c78e5a3f6253a
sentry
helper.py
9
3
https://github.com/getsentry/sentry.git
2
25
0
13
40
Python
{ "docstring": "The user, if they are represented persistently in our app.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def _app_user(self) -> User | None: return self.user if isinstance(self.user, User) else None
23,790
109,881
33
lib/mpl_toolkits/axes_grid1/axes_divider.py
19
5
def new_locator(self, nx, nx1=None): return AxesL
Improve mpl_toolkit documentation
new_locator
df6f95703b60348e01603f98a439b133da2938a0
matplotlib
axes_divider.py
9
2
https://github.com/matplotlib/matplotlib.git
2
34
0
17
48
Python
{ "docstring": "\n Create a new `.AxesLocator` for the specified cell.\n\n Parameters\n ----------\n nx, nx1 : int\n Integers specifying the column-position of the\n cell. When *nx1* is None, a single *nx*-th column is\n specified. Otherwise, location of columns spanning between *nx*\n to *nx1* (but excluding *nx1*-th column) is specified.\n ", "language": "en", "n_whitespaces": 126, "n_words": 46, "vocab_size": 37 }
def new_locator(self, nx, nx1=None): return AxesLocator(self, nx, 0, nx1 if nx1 is not None else nx + 1, 1)
17,602
83,152
1,529
zerver/tests/test_message_edit.py
310
35
def test_edit_cases(self) -> None: self.login("hamlet") hamlet = self.example_user("hamlet") msg_id = self.send_stream_message( self.example_user("hamlet"), "Denmark", topic_name="topic 1", content="content 1" ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 2", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 1") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual( set(history[0].keys()), { "timestamp", "prev_content", "user_id", "prev_rendered_content", "prev_rendered_content_version", }, ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "topic": "topic 2", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 1") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual(set(history[0].keys()), {"timestamp", LEGACY_PREV_TOPIC, "user_id"}) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 3", "topic": "topic 3", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 2") self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 2") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual( set(history[0].keys()), { "timestamp", LEGACY_PREV_TOPIC, "prev_content",
python: Replace string concatenations with f-strings.
test_edit_cases
d560d124a304a2f6dd467200aab7f070a78bf155
zulip
test_message_edit.py
13
128
https://github.com/zulip/zulip.git
4
1,019
0
136
1,737
Python
{ "docstring": "This test verifies the accuracy of construction of Zulip's edit\n history data structures.", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 12 }
def test_edit_cases(self) -> None: self.login("hamlet") hamlet = self.example_user("hamlet") msg_id = self.send_stream_message( self.example_user("hamlet"), "Denmark", topic_name="topic 1", content="content 1" ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 2", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 1") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual( set(history[0].keys()), { "timestamp", "prev_content", "user_id", "prev_rendered_content", "prev_rendered_content_version", }, ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "topic": "topic 2", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 1") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual(set(history[0].keys()), {"timestamp", LEGACY_PREV_TOPIC, "user_id"}) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 3", "topic": "topic 3", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 2") self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 2") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual( set(history[0].keys()), { "timestamp", LEGACY_PREV_TOPIC, "prev_content", "user_id", "prev_rendered_content", "prev_rendered_content_version", }, ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 4", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 3") self.assertEqual(history[0]["user_id"], hamlet.id) self.login("iago") result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "topic": "topic 4", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 3") self.assertEqual(history[0]["user_id"], self.example_user("iago").id) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 3") self.assertEqual(history[2][LEGACY_PREV_TOPIC], "topic 2") self.assertEqual(history[3][LEGACY_PREV_TOPIC], "topic 1") self.assertEqual(history[1]["prev_content"], "content 3") self.assertEqual(history[2]["prev_content"], "content 2") self.assertEqual(history[4]["prev_content"], "content 1") # Now, we verify that the edit history data sent back has the # correct filled-out fields message_edit_history = self.client_get(f"/json/messages/{msg_id}/history") json_response = orjson.loads(message_edit_history.content) # We reverse the message history view output so that the IDs line up with the above. message_history = list(reversed(json_response["message_history"])) i = 0 for entry in message_history: expected_entries = {"content", "rendered_content", "topic", "timestamp", "user_id"} if i in {0, 2, 3}: expected_entries.add("prev_topic") if i in {1, 2, 4}: expected_entries.add("prev_content") expected_entries.add("prev_rendered_content") expected_entries.add("content_html_diff") i += 1 self.assertEqual(expected_entries, set(entry.keys())) self.assert_length(message_history, 6) self.assertEqual(message_history[0]["prev_topic"], "topic 3") self.assertEqual(message_history[0]["topic"], "topic 4") self.assertEqual(message_history[1]["topic"], "topic 3") self.assertEqual(message_history[2]["topic"], "topic 3") self.assertEqual(message_history[2]["prev_topic"], "topic 2") self.assertEqual(message_history[3]["topic"], "topic 2") self.assertEqual(message_history[3]["prev_topic"], "topic 1") self.assertEqual(message_history[4]["topic"], "topic 1") self.assertEqual(message_history[0]["content"], "content 4") self.assertEqual(message_history[1]["content"], "content 4") self.assertEqual(message_history[1]["prev_content"], "content 3") self.assertEqual(message_history[2]["content"], "content 3") self.assertEqual(message_history[2]["prev_content"], "content 2") self.assertEqual(message_history[3]["content"], "content 2") self.assertEqual(message_history[4]["content"], "content 2") self.assertEqual(message_history[4]["prev_content"], "content 1") self.assertEqual(message_history[5]["content"], "content 1") self.assertEqual(message_history[5]["topic"], "topic 1")
@frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
14,971
69,102
47
erpnext/controllers/queries.py
69
28
def get_project_name(doctype, txt, searchfield, start, page_len, filters): doctype = "Project" cond = "" if filters and filters.get("customer"): cond = % ( frappe.db.escape(filters.get("customer")) ) fields = get_fields(doctype, ["name", "project_name"]) searchfields = frappe.get_meta(doctype).get_search_fields() searchfields = " or ".join(["`tabProject`." + field + " li
fix: specify allowed doctype in queries (#31761)
get_project_name
9baa2229761c5415f29646a1a5bed4a3f4981e05
erpnext
queries.py
16
30
https://github.com/frappe/erpnext.git
5
171
1
56
304
Python
{ "docstring": "(`tabProject`.customer = %s or\n\t\t\tifnull(`tabProject`.customer,\"\")=\"\") andselect {fields} from `tabProject`\n\t\twhere\n\t\t\t`tabProject`.status not in ('Completed', 'Cancelled')\n\t\t\tand {cond} {scond} {match_cond}\n\t\torder by\n\t\t\t(case when locate(%(_txt)s, `tabProject`.name) > 0 then locate(%(_txt)s, `tabProject`.name) else 99999 end),\n\t\t\t`tabProject`.idx desc,\n\t\t\t`tabProject`.name asc\n\t\tlimit {page_len} offset {start}", "language": "en", "n_whitespaces": 31, "n_words": 41, "vocab_size": 39 }
def get_project_name(doctype, txt, searchfield, start, page_len, filters): doctype = "Project" cond = "" if filters and filters.get("customer"): cond = % ( frappe.db.escape(filters.get("customer")) ) fields = get_fields(doctype, ["name", "project_name"]) searchfields = frappe.get_meta(doctype).get_search_fields() searchfields = " or ".join(["`tabProject`." + field + " like %(txt)s" for field in searchfields]) return frappe.db.sql( .format( fields=", ".join(["`tabProject`.{0}".format(f) for f in fields]), cond=cond, scond=searchfields, match_cond=get_match_cond(doctype), start=start, page_len=page_len, ), {"txt": "%{0}%".format(txt), "_txt": txt.replace("%", "")}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
52,508
208,754
60
IPython/tests/test_shortcuts.py
21
13
def test_autosuggest_at_EOL(text, cursor, suggestion, called): event = make_event(text, cursor, suggestion) event.current_buffer.insert_text = Mock() _apply_autosuggest(event) if called: event.current_buffer.insert_t
Apply autosuggestion only at EOL. As they are displayed only at EOL. Fixes #13724
test_autosuggest_at_EOL
517a92f878588484116edd6b88dfc738dcfe3cfb
ipython
test_shortcuts.py
12
8
https://github.com/ipython/ipython.git
2
58
0
19
95
Python
{ "docstring": "\n test that autosuggest is only applied at end of line.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
def test_autosuggest_at_EOL(text, cursor, suggestion, called): event = make_event(text, cursor, suggestion) event.current_buffer.insert_text = Mock() _apply_autosuggest(event) if called: event.current_buffer.insert_text.assert_called() else: event.current_buffer.insert_text.assert_not_called() # event.current_buffer.document.get_end_of_line_position.assert_called()
@keras_export( "keras.applications.resnet50.ResNet50", "keras.applications.resnet.ResNet50", "keras.applications.ResNet50", )
80,067
269,419
131
keras/applications/resnet.py
43
14
def stack3(x, filters, blocks, stride1=2, groups=32, name=None): x = block3(x, filters, stride=stride1, groups=groups, name=name + "_block1") for i in range(2, blocks + 1): x = block3( x, filters, groups=groups, conv_shortcut=False, name=name + "_block" + s
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
stack3
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
resnet.py
14
11
https://github.com/keras-team/keras.git
2
86
1
32
145
Python
{ "docstring": "A set of stacked residual blocks.\n\n Args:\n x: input tensor.\n filters: integer, filters of the bottleneck layer in a block.\n blocks: integer, blocks in the stacked blocks.\n stride1: default 2, stride of the first layer in the first block.\n groups: default 32, group size for grouped convolution.\n name: string, stack label.\n\n Returns:\n Output tensor for the stacked blocks.\n ", "language": "en", "n_whitespaces": 102, "n_words": 58, "vocab_size": 40 }
def stack3(x, filters, blocks, stride1=2, groups=32, name=None): x = block3(x, filters, stride=stride1, groups=groups, name=name + "_block1") for i in range(2, blocks + 1): x = block3( x, filters, groups=groups, conv_shortcut=False, name=name + "_block" + str(i), ) return x @keras_export( "keras.applications.resnet50.ResNet50", "keras.applications.resnet.ResNet50", "keras.applications.ResNet50", )
40,247
168,235
100
pandas/core/indexes/base.py
28
11
def is_mixed(self) -> bool: warnings.warn( "Index.is_m
PERF cache find_stack_level (#48023) cache stacklevel
is_mixed
2f8d0a36703e81e4dca52ca9fe4f58c910c1b304
pandas
base.py
12
36
https://github.com/pandas-dev/pandas.git
1
37
0
27
66
Python
{ "docstring": "\n Check if the Index holds data with mixed data types.\n\n Returns\n -------\n bool\n Whether or not the Index holds data with mixed data types.\n\n See Also\n --------\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n\n Examples\n --------\n >>> idx = pd.Index(['a', np.nan, 'b'])\n >>> idx.is_mixed()\n True\n\n >>> idx = pd.Index([1.0, 2.0, 3.0, 5.0])\n >>> idx.is_mixed()\n False\n ", "language": "en", "n_whitespaces": 284, "n_words": 118, "vocab_size": 56 }
def is_mixed(self) -> bool: warnings.warn( "Index.is_mixed is deprecated and will be removed in a future version. " "Check index.inferred_type directly instead.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.inferred_type in ["mixed"]
@pytest.mark.parametrize( "responder, read_method, parquet_engine", [ (CSVUserAgentResponder, pd.read_csv, None), (JSONUserAgentResponder, pd.read_json, None), (ParquetPyArrowUserAgentResponder, pd.read_parquet, "pyarrow"), pytest.param( ParquetFastParquetUserAgentResponder, pd.read_parquet, "fastparquet", # TODO(ArrayManager) fastparquet marks=[ td.skip_array_manager_not_yet_implemented, pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10"), ], ), (PickleUserAgentResponder, pd.read_pickle, None), (StataUserAgentResponder, pd.read_stata, None), (GzippedCSVUserAgentResponder, pd.read_csv, None), (GzippedJSONUserAgentResponder, pd.read_json, None), ], indirect=["responder"], )
39,497
163,775
380
pandas/tests/io/test_user_agent.py
93
48
def responder(request): # Find an available port with socket.socket() as sock: sock.bind(("localhost", 0)) port = sock.getsockname()[1] server_process = multiprocessing.Process( target=process_server, args=(request.param, port) ) server_process.start() yield port server_process.join(10) server_process.terminate() kill_time = 5 wait_time = 0 while server_process.is_alive(): if wait_time > kill_time: server_process.kill() break
CI/TST: Call join on server process test (#45628)
responder
c5ff649b11bd625ca36ad218539badb1c2057668
pandas
test_user_agent.py
15
21
https://github.com/pandas-dev/pandas.git
3
117
1
75
366
Python
{ "docstring": "\n Fixture that starts a local http server in a separate process on localhost\n and returns the port.\n\n Running in a separate process instead of a thread to allow termination/killing\n of http server upon cleanup.\n ", "language": "en", "n_whitespaces": 50, "n_words": 34, "vocab_size": 25 }
def responder(request): # Find an available port with socket.socket() as sock: sock.bind(("localhost", 0)) port = sock.getsockname()[1] server_process = multiprocessing.Process( target=process_server, args=(request.param, port) ) server_process.start() yield port server_process.join(10) server_process.terminate() kill_time = 5 wait_time = 0 while server_process.is_alive(): if wait_time > kill_time: server_process.kill() break else: wait_time += 0.1 time.sleep(0.1) server_process.close() @pytest.mark.parametrize( "responder, read_method, parquet_engine", [ (CSVUserAgentResponder, pd.read_csv, None), (JSONUserAgentResponder, pd.read_json, None), (ParquetPyArrowUserAgentResponder, pd.read_parquet, "pyarrow"), pytest.param( ParquetFastParquetUserAgentResponder, pd.read_parquet, "fastparquet", # TODO(ArrayManager) fastparquet marks=[ td.skip_array_manager_not_yet_implemented, pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10"), ], ), (PickleUserAgentResponder, pd.read_pickle, None), (StataUserAgentResponder, pd.read_stata, None), (GzippedCSVUserAgentResponder, pd.read_csv, None), (GzippedJSONUserAgentResponder, pd.read_json, None), ], indirect=["responder"], )
51,525
206,452
419
django/test/testcases.py
72
29
def _pre_setup(self): super()._pre_setup() if self.available_apps is not None: apps.set_available_apps(self.available_apps) setting_changed.send( sender=settings._wrapped.__class__, setting="INSTALLED_APPS", value=self.available_apps, enter=True, ) for db_name in self._databases_names(include_mirrors=False):
Refs #33476 -- Reformatted code with Black.
_pre_setup
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
testcases.py
15
26
https://github.com/django/django.git
6
155
0
56
247
Python
{ "docstring": "\n Perform pre-test setup:\n * If the class has an 'available_apps' attribute, restrict the app\n registry to these applications, then fire the post_migrate signal --\n it must run with the correct set of applications for the test case.\n * If the class has a 'fixtures' attribute, install those fixtures.\n ", "language": "en", "n_whitespaces": 95, "n_words": 48, "vocab_size": 38 }
def _pre_setup(self): super()._pre_setup() if self.available_apps is not None: apps.set_available_apps(self.available_apps) setting_changed.send( sender=settings._wrapped.__class__, setting="INSTALLED_APPS", value=self.available_apps, enter=True, ) for db_name in self._databases_names(include_mirrors=False): emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name) try: self._fixture_setup() except Exception: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send( sender=settings._wrapped.__class__, setting="INSTALLED_APPS", value=settings.INSTALLED_APPS, enter=False, ) raise # Clear the queries_log so that it's less likely to overflow (a single # test probably won't execute 9K queries). If queries_log overflows, # then assertNumQueries() doesn't work. for db_name in self._databases_names(include_mirrors=False): connections[db_name].queries_log.clear()
51,970
207,475
173
tests/admin_views/test_actions.py
51
14
def test_multiple_actions_form(self): action_data = { ACTION_CHECKBOX_NAME: [self.s1.pk], # Two differen
Refs #33476 -- Reformatted code with Black.
test_multiple_actions_form
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
test_actions.py
11
11
https://github.com/django/django.git
1
73
0
46
126
Python
{ "docstring": "\n Actions come from the form whose submit button was pressed (#10618).\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
def test_multiple_actions_form(self): action_data = { ACTION_CHECKBOX_NAME: [self.s1.pk], # Two different actions selected on the two forms... "action": ["external_mail", "delete_selected"], # ...but "go" was clicked on the top form. "index": 0, } self.client.post( reverse("admin:admin_views_externalsubscriber_changelist"), action_data ) # The action sends mail rather than deletes. self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, "Greetings from a function action")
17,659
83,350
90
zerver/tests/test_message_send.py
26
9
def test_empty_message(self) -> None: self.login("hamlet") othello = self.example_user("othello") result = se
tests: Remove `client` parameter if test can use default `User-Agent`. Removes `client` parameter from backend tests using the `POST /messages` endpoint when the test can use the default `User-Agent` as the client, which is set to `ZulipMobile` for API requests and a browser user agent string for web app requests.
test_empty_message
47056ef06fff67388ebe1bd09846280fc84f9660
zulip
test_message_send.py
11
11
https://github.com/zulip/zulip.git
1
55
0
25
104
Python
{ "docstring": "\n Sending a message that is empty or only whitespace should fail\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
def test_empty_message(self) -> None: self.login("hamlet") othello = self.example_user("othello") result = self.client_post( "/json/messages", {"type": "private", "content": " ", "to": othello.email}, ) self.assert_json_error(result, "Message must not be empty")
35,160
151,914
50
freqtrade/templates/FreqaiExampleStrategy.py
15
5
def freqai_feature_engineering_generic(self, dataframe, **kwargs): dataframe["%-pct-change"] = dataframe["close"].pct_change() dataframe["%-raw_volume"] = dataframe["volume"] dataframe["%-raw_price"] = dataframe["close"] return dat
freqAI Strategy - improve user experience
freqai_feature_engineering_generic
8227b4aafe51b30e5942d293e8d0052c968442dd
freqtrade
FreqaiExampleStrategy.py
10
5
https://github.com/freqtrade/freqtrade.git
1
44
0
13
80
Python
{ "docstring": "\n This optional function will be called for all include_timeframes (including corr_pairs).\n After that, the features will be shifted by the number of candles in the\n include_shifted_candles.\n :param df: strategy dataframe which will receive the features\n dataframe[\"%-pct-change\"] = dataframe[\"close\"].pct_change()\n ", "language": "en", "n_whitespaces": 81, "n_words": 38, "vocab_size": 31 }
def freqai_feature_engineering_generic(self, dataframe, **kwargs): dataframe["%-pct-change"] = dataframe["close"].pct_change() dataframe["%-raw_volume"] = dataframe["volume"] dataframe["%-raw_price"] = dataframe["close"] return dataframe
@keras_export("keras.utils.GeneratorEnqueuer")
81,723
276,754
10
keras/utils/data_utils.py
5
5
def next_sample(uid): return next(_SHARED_SEQUENCES[uid]) @keras_export("keras.utils.G
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
next_sample
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
data_utils.py
8
2
https://github.com/keras-team/keras.git
1
14
1
5
36
Python
{ "docstring": "Gets the next value from the generator `uid`.\n\n To allow multiple generators to be used at the same time, we use `uid` to\n get a specific one. A single generator would cause the validation to\n overwrite the training generator.\n\n Args:\n uid: int, generator identifier\n\n Returns:\n The next value of generator `uid`.\n ", "language": "en", "n_whitespaces": 83, "n_words": 51, "vocab_size": 39 }
def next_sample(uid): return next(_SHARED_SEQUENCES[uid]) @keras_export("keras.utils.GeneratorEnqueuer")
50,250
203,215
234
django/contrib/staticfiles/utils.py
83
10
def check_settings(base_url=None): if base_url is None: base_url = settings.STATIC_URL if not base_url: raise ImproperlyConfigured( "You're using the staticfiles app " "without having set the required STATIC_URL setting.") if settings.MEDIA_URL == base_url: raise ImproperlyConfigured( "The MEDIA_URL and STATIC_URL
Refs #33476 -- Refactored problematic code before reformatting by Black. In these cases Black produces unexpected results, e.g. def make_random_password( self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789', ): or cursor.execute(""" SELECT ... """, [table name], )
check_settings
c5cd8783825b5f6384417dac5f3889b4210b7d08
django
utils.py
11
21
https://github.com/django/django.git
11
99
0
49
169
Python
{ "docstring": "\n Check if the staticfiles settings have sane values.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
def check_settings(base_url=None): if base_url is None: base_url = settings.STATIC_URL if not base_url: raise ImproperlyConfigured( "You're using the staticfiles app " "without having set the required STATIC_URL setting.") if settings.MEDIA_URL == base_url: raise ImproperlyConfigured( "The MEDIA_URL and STATIC_URL settings must have different values" ) if (settings.DEBUG and settings.MEDIA_URL and settings.STATIC_URL and settings.MEDIA_URL.startswith(settings.STATIC_URL)): raise ImproperlyConfigured( "runserver can't serve media if MEDIA_URL is within STATIC_URL." ) if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and (settings.MEDIA_ROOT == settings.STATIC_ROOT)): raise ImproperlyConfigured( "The MEDIA_ROOT and STATIC_ROOT settings must have different values" )
@PublicAPI(stability="beta")
28,782
128,704
471
python/ray/serve/schema.py
89
16
def kubernetes_dict(self, **kwargs) -> Dict: config = self.dict(**kwargs) for idx, deployment in enumerate(config["deployments"]): if isinstance(deployment.get("ray_actor_options"), dict): # JSON-serialize ray_actor_options' resources dictionary if isinstance(deployment["ray_actor_options"].get("resources"), dict): deployment["ray_actor_options"]["resources"] = json.dumps( deployment["ray_actor_options"]["resources"] ) # JSON-serialize ray_actor_options' runtime_env dictionary if isinstance(deployment["ray_actor_options"].get("runtime_env"), dict): deployment["ray_actor_options"]["runtime_env"] = json.dumps( deployment["ray_actor_options"]["runtime_env"] ) # Convert ray_actor_options' keys deployment["ray_actor_options"] = dict_keys_snake_to_camel_case( deployment["ray_actor_options"] ) # JSON-serialize user_config dictionary if isinstance(deployment.get("user_config"), dict): deployment["user_config"] = json.dumps(deployment["user_config"]) # Convert deployment's keys config["deployments"][idx] = dict_keys_snake_to_camel_case(deployment) # Convert top-level runtime_env if isinstance(config.get("runtime_env"), dict): config["runtime_env"] = json.dumps(config["runtime_env"]) # Convert top-level option's keys config = dict_keys_snake_to_camel_case(config) return config @PublicAPI(stability="b
[Serve] [KubeRay] Add flag that allows `serve build` to print Kubernetes-formatted output (#28918)
kubernetes_dict
05e623ecc22788cfe3b8ebe7933135350d3e0a2d
ray
schema.py
17
29
https://github.com/ray-project/ray.git
7
204
1
47
378
Python
{ "docstring": "Returns dictionary in Kubernetes format.\n\n Dictionary can be yaml-dumped to a Serve config file directly and then\n copy-pasted into a RayService Kubernetes config.\n\n Args: all kwargs are passed directly into schema's dict() function.\n ", "language": "en", "n_whitespaces": 61, "n_words": 33, "vocab_size": 29 }
def kubernetes_dict(self, **kwargs) -> Dict: config = self.dict(**kwargs) for idx, deployment in enumerate(config["deployments"]): if isinstance(deployment.get("ray_actor_options"), dict): # JSON-serialize ray_actor_options' resources dictionary if isinstance(deployment["ray_actor_options"].get("resources"), dict): deployment["ray_actor_options"]["resources"] = json.dumps( deployment["ray_actor_options"]["resources"] ) # JSON-serialize ray_actor_options' runtime_env dictionary if isinstance(deployment["ray_actor_options"].get("runtime_env"), dict): deployment["ray_actor_options"]["runtime_env"] = json.dumps( deployment["ray_actor_options"]["runtime_env"] ) # Convert ray_actor_options' keys deployment["ray_actor_options"] = dict_keys_snake_to_camel_case( deployment["ray_actor_options"] ) # JSON-serialize user_config dictionary if isinstance(deployment.get("user_config"), dict): deployment["user_config"] = json.dumps(deployment["user_config"]) # Convert deployment's keys config["deployments"][idx] = dict_keys_snake_to_camel_case(deployment) # Convert top-level runtime_env if isinstance(config.get("runtime_env"), dict): config["runtime_env"] = json.dumps(config["runtime_env"]) # Convert top-level option's keys config = dict_keys_snake_to_camel_case(config) return config @PublicAPI(stability="beta")
20,689
101,270
430
tools/sort/sort.py
135
34
def reload_images(self, group_method, img_list): logger.info("Preparing to group...") if group_method == 'group_blur': filename_list, image_list = self._get_images() blurs = [self.estimate_blur(img) for img in image_list] temp_list = list(zip(filename_list, blurs)) elif group_method == 'group_blur_fft': filename_list, image_list = self._get_images() fft_bl
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
reload_images
5e73437be47f2410439a3c6716de96354e6a0c94
faceswap
sort.py
19
29
https://github.com/deepfakes/faceswap.git
12
301
0
64
480
Python
{ "docstring": "\n Reloads the image list by replacing the comparative values with those\n that the chosen grouping method expects.\n :param group_method: str name of the grouping method that will be used.\n :param img_list: image list that has been sorted by one of the sort\n methods.\n :return: img_list but with the comparative values that the chosen\n grouping method expects.\n ", "language": "en", "n_whitespaces": 113, "n_words": 56, "vocab_size": 33 }
def reload_images(self, group_method, img_list): logger.info("Preparing to group...") if group_method == 'group_blur': filename_list, image_list = self._get_images() blurs = [self.estimate_blur(img) for img in image_list] temp_list = list(zip(filename_list, blurs)) elif group_method == 'group_blur_fft': filename_list, image_list = self._get_images() fft_blurs = [self.estimate_blur_fft(img) for img in image_list] temp_list = list(zip(filename_list, fft_blurs)) elif group_method == 'group_face_cnn': filename_list, image_list, landmarks = self._get_landmarks() temp_list = list(zip(filename_list, landmarks)) elif group_method == 'group_face_yaw': filename_list, image_list, landmarks = self._get_landmarks() yaws = [self.calc_landmarks_face_yaw(mark) for mark in landmarks] temp_list = list(zip(filename_list, yaws)) elif group_method == 'group_hist': filename_list, image_list = self._get_images() histograms = [cv2.calcHist([img], [0], None, [256], [0, 256]) for img in image_list] temp_list = list(zip(filename_list, histograms)) elif group_method == 'group_black_pixels': filename_list, image_list = self._get_images() black_pixels = [np.ndarray.all(img == [0, 0, 0], axis=2).sum()/img.size*100*3 for img in image_list] temp_list = list(zip(filename_list, black_pixels)) else: raise ValueError(f"{group_method} group_method not found.") return self.splice_lists(img_list, temp_list)
14,529
67,455
17
erpnext/selling/report/territory_wise_sales/territory_wise_sales.py
27
13
def get_sales_orders(quotations): if not quotations: return [] quotation_names = [q.name for q in quotations] return frappe.db.sql( .form
style: format code with black
get_sales_orders
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
territory_wise_sales.py
14
15
https://github.com/frappe/erpnext.git
3
59
0
26
98
Python
{ "docstring": "\n\tSELECT so.`name`, so.`base_grand_total`, soi.prevdoc_docname as quotation\n\tFROM `tabSales Order` so, `tabSales Order Item` soi\n\tWHERE so.docstatus=1 AND so.name = soi.parent AND soi.prevdoc_docname in ({0})\n\t", "language": "en", "n_whitespaces": 21, "n_words": 24, "vocab_size": 21 }
def get_sales_orders(quotations): if not quotations: return [] quotation_names = [q.name for q in quotations] return frappe.db.sql( .format( ", ".join(["%s"] * len(quotation_names)) ), tuple(quotation_names), as_dict=1, ) # nosec
74,310
253,927
95
d2l/mxnet.py
47
19
def download_extract(name, folder=None): fname = download(name) base_dir = os.path.dirname(fname) data_dir, ext = os.path.splitext(fname) if ext == '.zip': fp = zipfile.ZipFile(fname, 'r') elif ext in ('.t
JAX: Fix CI bug; enable build all sections
download_extract
2b1acfbfe84b6c9c4756a615620f9b376d48085a
d2l-en
mxnet.py
12
12
https://github.com/d2l-ai/d2l-en.git
4
99
0
38
166
Python
{ "docstring": "Download and extract a zip/tar file.\n\n Defined in :numref:`sec_utils`", "language": "en", "n_whitespaces": 11, "n_words": 9, "vocab_size": 9 }
def download_extract(name, folder=None): fname = download(name) base_dir = os.path.dirname(fname) data_dir, ext = os.path.splitext(fname) if ext == '.zip': fp = zipfile.ZipFile(fname, 'r') elif ext in ('.tar', '.gz'): fp = tarfile.open(fname, 'r') else: assert False, 'Only zip/tar files can be extracted.' fp.extractall(base_dir) return os.path.join(base_dir, folder) if folder else data_dir
80,913
271,957
112
keras/engine/training_v1.py
48
4
def sample_weights_mismatch(self): # If there is a mismatch between sample weight mode and the placeholders # created, then recompile the sub-graphs that depend on sample weights. return ( self.sample_weight_mode is not None and self.sample_weight is N
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
sample_weights_mismatch
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training_v1.py
10
6
https://github.com/keras-team/keras.git
4
36
0
31
59
Python
{ "docstring": "Check if the sample weight and the mode match or not.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def sample_weights_mismatch(self): # If there is a mismatch between sample weight mode and the placeholders # created, then recompile the sub-graphs that depend on sample weights. return ( self.sample_weight_mode is not None and self.sample_weight is None ) or ( self.sample_weight_mode is None and self.sample_weight is not None )
52,324
208,426
241
IPython/core/magics/script.py
69
16
def shebang(self, line, cell): # Create the event loop in which to run script magics # this operates on a background thread if self.event_loop is None: if sys.platform == "win32": # don't override the current policy, # just create an event loop event_loop = asyncio.WindowsProactorEventLoopPolicy().new_event_loop() else: event_loop = asyncio.new_event_loop() self.event_loop =
avoid deprecated get_event_loop use our own `async_helpers.get_asyncio_loop` to track the global event loop script magics use dedicated background asyncio loop instead of trying to work on the main loop, which may or may not exist _AsyncIOProxy wraps background script objects to transfer awaitables across loops only works for coroutine methods, which might be good enough? Works for read, etc.
shebang
ce62a7a4b2c97bf8a30e8074e8fc18103a0718a0
ipython
script.py
14
79
https://github.com/ipython/ipython.git
18
515
0
45
131
Python
{ "docstring": "Run a cell via a shell command\n\n The `%%script` line is like the #! line of script,\n specifying a program (bash, perl, ruby, etc.) with which to run.\n\n The rest of the cell is run by that program.\n\n Examples\n --------\n ::\n\n In [1]: %%script bash\n ...: for i in 1 2 3; do\n ...: echo $i\n ...: done\n 1\n 2\n 3\n ", "language": "en", "n_whitespaces": 198, "n_words": 61, "vocab_size": 49 }
def shebang(self, line, cell): # Create the event loop in which to run script magics # this operates on a background thread if self.event_loop is None: if sys.platform == "win32": # don't override the current policy, # just create an event loop event_loop = asyncio.WindowsProactorEventLoopPolicy().new_event_loop() else: event_loop = asyncio.new_event_loop() self.event_loop = event_loop # start the loop in a background thread asyncio_thread = Thread(target=event_loop.run_forever, daemon=True) asyncio_thread.start() else: event_loop = self.event_loop
13,503
63,781
66
.venv/lib/python3.8/site-packages/pip/_vendor/tenacity/__init__.py
12
4
def statistics(self): try: return self._local.statistics
upd; format
statistics
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
__init__.py
11
6
https://github.com/jindongwang/transferlearning.git
2
31
0
9
53
Python
{ "docstring": "Return a dictionary of runtime statistics.\n\n This dictionary will be empty when the controller has never been\n ran. When it is running or has ran previously it should have (but\n may not) have useful and/or informational keys and values when\n running is underway and/or completed.\n\n .. warning:: The keys in this dictionary **should** be some what\n stable (not changing), but there existence **may**\n change between major releases as new statistics are\n gathered or removed so before accessing keys ensure that\n they actually exist and handle when they do not.\n\n .. note:: The values in this dictionary are local to the thread\n running call (so if multiple threads share the same retrying\n object - either directly or indirectly) they will each have\n there own view of statistics they have collected (in the\n future we may provide a way to aggregate the various\n statistics from each thread).\n ", "language": "en", "n_whitespaces": 359, "n_words": 145, "vocab_size": 103 }
def statistics(self): try: return self._local.statistics except AttributeError: self._local.statistics = {} return self._local.statistics
51,281
205,919
116
django/dispatch/dispatcher.py
26
11
def send(self, sender, **named): if ( not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS ): return [] return [ (receiver, receiver(signal=self, sender=sender, **named)) for receiver in self._live_receivers(sender) ]
Refs #33476 -- Reformatted code with Black.
send
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
dispatcher.py
11
10
https://github.com/django/django.git
4
62
0
25
95
Python
{ "docstring": "\n Send signal from sender to all connected receivers.\n\n If any receiver raises an error, the error propagates back through send,\n terminating the dispatch loop. So it's possible that all receivers\n won't be called if an error is raised.\n\n Arguments:\n\n sender\n The sender of the signal. Either a specific object or None.\n\n named\n Named arguments which will be passed to receivers.\n\n Return a list of tuple pairs [(receiver, response), ... ].\n ", "language": "en", "n_whitespaces": 172, "n_words": 70, "vocab_size": 58 }
def send(self, sender, **named): if ( not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS ): return [] return [ (receiver, receiver(signal=self, sender=sender, **named)) for receiver in self._live_receivers(sender) ]
35,293
153,192
95
modin/core/execution/ray/implementations/pandas_on_ray/partitioning/virtual_partition.py
55
14
def deploy_ray_func(func, *args): # pragma: no cover result = func(*args) ip = get_node_ip_address() if isinstance(result, pandas.DataFrame): return result, len(result), len(result.columns), ip elif all(isinstance(r, pandas.DataFrame) for r in result): return [i for r in result for i in [r, len(r), l
FIX-#3675: Expand virtual partitioning utility (#3886) Co-authored-by: mvashishtha <mahesh@ponder.io> Co-authored-by: jeffreykennethli <jkli@ponder.io> Co-authored-by: Anatoly Myachev <anatoly.myachev@intel.com> Co-authored-by: Vasily Litvinov <vasilij.n.litvinov@intel.com> Co-authored-by: Alexey Prutskov <alexey.prutskov@intel.com> Co-authored-by: Mahesh Vashishtha <mvashishtha@users.noreply.github.com> Co-authored-by: Naren Krishna <92325366+naren-ponder@users.noreply.github.com> Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Co-authored-by: Dmitry Chigarev <62142979+dchigarev@users.noreply.github.com> Co-authored-by: Yaroslav Igoshev <yaroslav.igoshev@intel.com> Co-authored-by: Doris Lee <dorisjunglinlee@gmail.com> Co-authored-by: Aditya Parameswaran <adityagp@gmail.com> Co-authored-by: Rehan Sohail Durrani <rehan@ponder.io> Co-authored-by: Susmit Vengurlekar <susmit.py@gmail.com> Signed-off-by: Devin Petersohn <devin.petersohn@gmail.com>
deploy_ray_func
8d1004fdbdaa05700613c8e6287641a732acf606
modin
virtual_partition.py
14
9
https://github.com/modin-project/modin.git
8
114
0
34
169
Python
{ "docstring": "\n Execute a function on an axis partition in a worker process.\n\n Parameters\n ----------\n func : callable\n Function to be executed on an axis partition.\n *args : iterable\n Additional arguments that need to passed in ``func``.\n\n Returns\n -------\n list\n The result of the function ``func`` and metadata for it.\n\n Notes\n -----\n Ray functions are not detected by codecov (thus pragma: no cover).\n ", "language": "en", "n_whitespaces": 119, "n_words": 61, "vocab_size": 53 }
def deploy_ray_func(func, *args): # pragma: no cover result = func(*args) ip = get_node_ip_address() if isinstance(result, pandas.DataFrame): return result, len(result), len(result.columns), ip elif all(isinstance(r, pandas.DataFrame) for r in result): return [i for r in result for i in [r, len(r), len(r.columns), ip]] else: return [i for r in result for i in [r, None, None, ip]]
@add_start_docstrings( "The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.", VILT_START_DOCSTRING, )
6,254
34,307
54
src/transformers/models/vilt/modeling_vilt.py
36
11
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ViltEncoder): module.gradient_checkpointing = value VILT_START_DOCSTRING = r VILT_INPUTS_DOCST
Add ViLT (#14895) * First commit * Add conversion script * Make conversion script work for base model * More improvements * Update conversion script, works for vqa * Add indexing argument to meshgrid * Make conversion script work for ViltForPreTraining * Add ViltForPreTraining to docs * Fix device issue * Add processor * Add MinMaxResize to feature extractor * Implement call method of ViltProcessor * Fix tests * Add integration test * Add loss calculation for VQA * Improve tests * Improve some more tests * Debug tests * Small improvements * Add support for attention_mask * Remove mask_it * Add pixel_mask * Add tests for ViltFeatureExtractor * Improve tests * Add ViltForNaturalLanguageVisualReasoning * Add ViltForNaturalLanguageVisualReasoning to conversion script * Minor fixes * Add support for image_embeds, update docstrings to markdown * Update docs to markdown * Improve conversion script * Rename ViltForPreTraining to ViltForMaskedLM * Improve conversion script * Convert docstrings to markdown * Fix code example of retrieval model * Properly convert masked language model * Add integration test for nlvr * Fix code quality * Apply suggestions from code review * Add copied from statements * Fix pretrained_config_archive_map * Fix docs * Add model to README * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Apply more suggestions from code review * Make code more readable * Add ViltForNaturalLanguageVisualReasoning to the tests * Rename ViltForVisualQuestionAnswering to ViltForQuestionAnswering * Replace pixel_values_2 by single tensor * Add hidden_states and attentions * Fix one more test * Fix all tests * Update year * Fix rebase issues * Fix another rebase issue * Remove ViltForPreTraining from auto mapping * Rename ViltForImageRetrievalTextRetrieval to ViltForImageAndTextRetrieval * Make it possible to use BertTokenizerFast in the processor * Use BertTokenizerFast by default * Rename ViltForNaturalLanguageVisualReasoning, define custom model output Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
_set_gradient_checkpointing
ac227093e41cecb07c7e0f2fc9a504850907bd06
transformers
modeling_vilt.py
9
3
https://github.com/huggingface/transformers.git
2
24
1
31
71
Python
{ "docstring": "\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ subclass. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ViltConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See\n [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input\n IDs?](../glossary#input-ids)\n\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n [What are token type IDs?](../glossary#token-type-ids)\n\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See\n [`ViltFeatureExtractor.__call__`] for details.\n\n pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):\n Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):\n Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `pixel_values` into patch embeddings.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See\n [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input\n IDs?](../glossary#input-ids)\n\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n [What are token type IDs?](../glossary#token-type-ids)\n\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_images, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See\n [`ViltFeatureExtractor.__call__`] for details.\n\n pixel_mask (`torch.LongTensor` of shape `(batch_size, num_images, height, width)`, *optional*):\n Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):\n Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `pixel_values` into patch embeddings.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n", "language": "en", "n_whitespaces": 1685, "n_words": 802, "vocab_size": 200 }
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ViltEncoder): module.gradient_checkpointing = value VILT_START_DOCSTRING = r VILT_INPUTS_DOCSTRING = r VILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.", VILT_START_DOCSTRING, )
117,818
321,601
122
qutebrowser/browser/greasemonkey.py
29
13
def needs_document_end_workaround(self): if objects.backend == usertypes.Backend.QtWebKit: return False assert
Drop Qt < 5.15 Fixes #7091 TODO: Add changelog
needs_document_end_workaround
c5a51eb0bcbab0b68cdfbf3eba2e681cff2adf7a
qutebrowser
greasemonkey.py
10
10
https://github.com/qutebrowser/qutebrowser.git
3
71
0
25
112
Python
{ "docstring": "Check whether to force @run-at document-end.\n\n This needs to be done on QtWebEngine for known-broken scripts.\n\n On Qt 5.12, accessing the DOM isn't possible with \"@run-at\n document-start\". It was documented to be impossible before, but seems\n to work fine.\n\n However, some scripts do DOM access with \"@run-at document-start\". Fix\n those by forcing them to use document-end instead.\n ", "language": "en", "n_whitespaces": 106, "n_words": 57, "vocab_size": 48 }
def needs_document_end_workaround(self): if objects.backend == usertypes.Backend.QtWebKit: return False assert objects.backend == usertypes.Backend.QtWebEngine, objects.backend broken_scripts = [ ('http://userstyles.org', None), ('https://github.com/ParticleCore', 'Iridium'), ] return any(self._matches_id(namespace=namespace, name=name) for namespace, name in broken_scripts)
112,812
314,204
67
homeassistant/components/weather/__init__.py
17
6
def _temperature_unit(self) -> str: if ( weather_option_temperature_unit := self._weather_o
Weather unit conversion (#73441) Co-authored-by: Erik <erik@montnemery.com>
_temperature_unit
90e1fb6ce2faadb9a35fdbe1774fce7b4456364f
core
__init__.py
9
10
https://github.com/home-assistant/core.git
2
26
0
15
43
Python
{ "docstring": "Return the converted unit of measurement for temperature.\n\n Should not be set by integrations.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 14 }
def _temperature_unit(self) -> str: if ( weather_option_temperature_unit := self._weather_option_temperature_unit ) is not None: return weather_option_temperature_unit return self._default_temperature_unit
23,476
109,202
327
lib/matplotlib/backends/backend_pdf.py
78
23
def fontName(self, fontprop): if isinstance(fontprop, str): filenames = [fontprop] elif mpl.rcParams['pdf.use14corefonts']: filenames = _fontManager._find_fonts_by_props( fontprop, fontext='afm', directory=RendererPdf._afm_font_dir ) else: filenames = _fontManager._find_fonts_by_props(fontprop) first_Fx = None for fname
ENH: implement font fallback for PDF
fontName
c5fd8804204ee715ee008c35f96d6e95f8dfcc29
matplotlib
backend_pdf.py
13
21
https://github.com/matplotlib/matplotlib.git
7
122
0
53
200
Python
{ "docstring": "\n Select a font based on fontprop and return a name suitable for\n Op.selectfont. If fontprop is a string, it will be interpreted\n as the filename of the font.\n ", "language": "en", "n_whitespaces": 57, "n_words": 28, "vocab_size": 24 }
def fontName(self, fontprop): if isinstance(fontprop, str): filenames = [fontprop] elif mpl.rcParams['pdf.use14corefonts']: filenames = _fontManager._find_fonts_by_props( fontprop, fontext='afm', directory=RendererPdf._afm_font_dir ) else: filenames = _fontManager._find_fonts_by_props(fontprop) first_Fx = None for fname in filenames: Fx = self.fontNames.get(fname) if not first_Fx: first_Fx = Fx if Fx is None: Fx = next(self._internal_font_seq) self.fontNames[fname] = Fx _log.debug('Assigning font %s = %r', Fx, fname) if not first_Fx: first_Fx = Fx # find_fontsprop's first value always adheres to # findfont's value, so technically no behaviour change return first_Fx
5,912
32,353
45
src/transformers/models/owlvit/feature_extraction_owlvit.py
33
11
def center_to_corners_format(x): x_center, y_center, width, he
Add OWL-ViT model for zero-shot object detection (#17938) * add owlvit model skeleton * add class and box predictor heads * convert modified flax clip to pytorch * fix box and class predictors * add OwlViTImageTextEmbedder * convert class and box head checkpoints * convert image text embedder checkpoints * add object detection head * fix bugs * update conversion script * update conversion script * fix q,v,k,out weight conversion conversion * add owlvit object detection output * fix bug in image embedder * fix bugs in text embedder * fix positional embeddings * fix bug in inference mode vision pooling * update docs, init tokenizer and processor files * support batch processing * add OwlViTProcessor * remove merge conflicts * readd owlvit imports * fix bug in OwlViTProcessor imports * fix bugs in processor * update docs * fix bugs in processor * update owlvit docs * add OwlViTFeatureExtractor * style changes, add postprocess method to feature extractor * add feature extractor and processor tests * add object detection tests * update conversion script * update config paths * update config paths * fix configuration paths and bugs * fix bugs in OwlViT tests * add import checks to processor * fix docs and minor issues * fix docs and minor issues * fix bugs and issues * fix bugs and issues * fix bugs and issues * fix bugs and issues * update docs and examples * fix bugs and issues * update conversion script, fix positional embeddings * process 2D input ids, update tests * fix style and quality issues * update docs * update docs and imports * update OWL-ViT index.md * fix bug in OwlViT feature ext tests * fix code examples, return_dict by default * return_dict by default * minor fixes, add tests to processor * small fixes * add output_attentions arg to main model * fix bugs * remove output_hidden_states arg from main model * update self.config variables * add option to return last_hidden_states * fix bug in config variables * fix copied from statements * fix small issues and bugs * fix bugs * fix bugs, support greyscale images * run fixup * update repo name * merge OwlViTImageTextEmbedder with obj detection head * fix merge conflict * fix merge conflict * make fixup * fix bugs * fix bugs * add additional processor test
center_to_corners_format
12d66b47012c9258f9557e6d3a0c13bcd1c72871
transformers
feature_extraction_owlvit.py
10
4
https://github.com/huggingface/transformers.git
1
76
0
22
103
Python
{ "docstring": "\n Converts a PyTorch tensor of bounding boxes of center format (center_x, center_y, width, height) to corners format\n (left, top, right, bottom).\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 19 }
def center_to_corners_format(x): x_center, y_center, width, height = x.unbind(-1) boxes = [(x_center - 0.5 * width), (y_center - 0.5 * height), (x_center + 0.5 * width), (y_center + 0.5 * height)] return torch.stack(boxes, dim=-1)
50,726
204,395
49
django/core/cache/backends/base.py
17
8
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): raise NotImplementedError( "subclasses of BaseCache must pro
Refs #33476 -- Reformatted code with Black.
add
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
base.py
8
4
https://github.com/django/django.git
1
23
0
17
37
Python
{ "docstring": "\n Set a value in the cache if the key does not already exist. If\n timeout is given, use that timeout for the key; otherwise use the\n default cache timeout.\n\n Return True if the value was stored, False otherwise.\n ", "language": "en", "n_whitespaces": 74, "n_words": 38, "vocab_size": 29 }
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): raise NotImplementedError( "subclasses of BaseCache must provide an add() method" )
42,351
177,332
156
networkx/linalg/laplacianmatrix.py
94
30
def normalized_laplacian_matrix(G, nodelist=None, weight="weight"): r import numpy as np import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") n, m = A.shape diags = A.sum(axis=1) # TODO: rm csr_array wrapper when spdiags can produce arrays D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, m, n, format="csr")) L = D - A with sp.errstate(divide="ignore"): diags_sqrt = 1.0 / np.sqrt(diags) diags_sqrt[np.isinf(diags_sqrt)] = 0 # TODO: rm csr_array wrapper when spdi
Use scipy.sparse array datastructure (#6037) * Use scipy.sparse array datastructure * Add reminder to rm wrapper when scipy adds creation fns. * Rm mention of np matrix from code comment. * Update networkx/algorithms/bipartite/matrix.py Co-authored-by: Stefan van der Walt <sjvdwalt@gmail.com> Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> Co-authored-by: Stefan van der Walt <sjvdwalt@gmail.com>
normalized_laplacian_matrix
8a325d26aa7fdd3a72580c4720fa97f971bbefcb
networkx
laplacianmatrix.py
12
66
https://github.com/networkx/networkx.git
2
175
0
61
276
Python
{ "docstring": "Returns the normalized Laplacian matrix of G.\n\n The normalized graph Laplacian is the matrix\n\n .. math::\n\n N = D^{-1/2} L D^{-1/2}\n\n where `L` is the graph Laplacian and `D` is the diagonal matrix of\n node degrees [1]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default='weight')\n The edge data key used to compute each value in the matrix.\n If None, then each edge has weight 1.\n\n Returns\n -------\n N : SciPy sparse array\n The normalized Laplacian matrix of G.\n\n Notes\n -----\n For MultiGraph, the edges weights are summed.\n See :func:`to_numpy_array` for other options.\n\n If the Graph contains selfloops, D is defined as ``diag(sum(A, 1))``, where A is\n the adjacency matrix [2]_.\n\n See Also\n --------\n laplacian_matrix\n normalized_laplacian_spectrum\n\n References\n ----------\n .. [1] Fan Chung-Graham, Spectral Graph Theory,\n CBMS Regional Conference Series in Mathematics, Number 92, 1997.\n .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized\n Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98,\n March 2007.\n ", "language": "en", "n_whitespaces": 331, "n_words": 190, "vocab_size": 126 }
def normalized_laplacian_matrix(G, nodelist=None, weight="weight"): r import numpy as np import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") n, m = A.shape diags = A.sum(axis=1) # TODO: rm csr_array wrapper when spdiags can produce arrays D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, m, n, format="csr")) L = D - A with sp.errstate(divide="ignore"): diags_sqrt = 1.0 / np.sqrt(diags) diags_sqrt[np.isinf(diags_sqrt)] = 0 # TODO: rm csr_array wrapper when spdiags can produce arrays DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format="csr")) return DH @ (L @ DH)
76,282
260,486
293
sklearn/feature_extraction/image.py
136
28
def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None): i_h, i_w = image.shape[:2] p_h, p_w = patch_size if p_h > i_h: raise ValueError( "Height of the patch should be less than the height of the image." ) if p_w > i_w: raise ValueError( "Width of the patch should be less than the width of the image." ) image = check_array(image, allow_nd=True) image = image.reshape((i_h, i_w, -1)) n_colors = image.shape[-1] extracted_patches = _extract_patches( image, patch_shape=(p_h, p_w, n_colors), extraction_step=1 ) n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches) if max_patches: rng = check_random_state(random_state) i_s = rng.randint(i_h - p_h + 1, size=n_patches) j_s = rng.randint(i_w - p_w + 1, size=n_patches) patches = extracted_patches[i_s, j_s, 0] else: patches = extracted_patches patches = patches.reshape(-1, p_h, p_w, n_colors) # remove the color dimension if useless if patches.s
DOC Ensures that extract_patches_2d passes numpydoc validation (#23926) Co-authored-by: Olivor Holman <olivorholman@Olivors-MacBook-Air.local>
extract_patches_2d
01e6449e653a058206e7a2a1aa3270f851769c4b
scikit-learn
image.py
12
30
https://github.com/scikit-learn/scikit-learn.git
5
221
0
80
336
Python
{ "docstring": "Reshape a 2D image into a collection of patches.\n\n The resulting patches are allocated in a dedicated array.\n\n Read more in the :ref:`User Guide <image_feature_extraction>`.\n\n Parameters\n ----------\n image : ndarray of shape (image_height, image_width) or \\\n (image_height, image_width, n_channels)\n The original image data. For color images, the last dimension specifies\n the channel: a RGB image would have `n_channels=3`.\n\n patch_size : tuple of int (patch_height, patch_width)\n The dimensions of one patch.\n\n max_patches : int or float, default=None\n The maximum number of patches to extract. If `max_patches` is a float\n between 0 and 1, it is taken to be a proportion of the total number\n of patches.\n\n random_state : int, RandomState instance, default=None\n Determines the random number generator used for random sampling when\n `max_patches` is not None. Use an int to make the randomness\n deterministic.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n patches : array of shape (n_patches, patch_height, patch_width) or \\\n (n_patches, patch_height, patch_width, n_channels)\n The collection of patches extracted from the image, where `n_patches`\n is either `max_patches` or the total number of patches that can be\n extracted.\n\n Examples\n --------\n >>> from sklearn.datasets import load_sample_image\n >>> from sklearn.feature_extraction import image\n >>> # Use the array data from the first image in this dataset:\n >>> one_image = load_sample_image(\"china.jpg\")\n >>> print('Image shape: {}'.format(one_image.shape))\n Image shape: (427, 640, 3)\n >>> patches = image.extract_patches_2d(one_image, (2, 2))\n >>> print('Patches shape: {}'.format(patches.shape))\n Patches shape: (272214, 2, 2, 3)\n >>> # Here are just two of these patches:\n >>> print(patches[1])\n [[[174 201 231]\n [174 201 231]]\n [[173 200 230]\n [173 200 230]]]\n >>> print(patches[800])\n [[[187 214 243]\n [188 215 244]]\n [[187 214 243]\n [188 215 244]]]\n ", "language": "en", "n_whitespaces": 483, "n_words": 266, "vocab_size": 165 }
def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None): i_h, i_w = image.shape[:2] p_h, p_w = patch_size if p_h > i_h: raise ValueError( "Height of the patch should be less than the height of the image." ) if p_w > i_w: raise ValueError( "Width of the patch should be less than the width of the image." ) image = check_array(image, allow_nd=True) image = image.reshape((i_h, i_w, -1)) n_colors = image.shape[-1] extracted_patches = _extract_patches( image, patch_shape=(p_h, p_w, n_colors), extraction_step=1 ) n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches) if max_patches: rng = check_random_state(random_state) i_s = rng.randint(i_h - p_h + 1, size=n_patches) j_s = rng.randint(i_w - p_w + 1, size=n_patches) patches = extracted_patches[i_s, j_s, 0] else: patches = extracted_patches patches = patches.reshape(-1, p_h, p_w, n_colors) # remove the color dimension if useless if patches.shape[-1] == 1: return patches.reshape((n_patches, p_h, p_w)) else: return patches
51,124
205,422
694
django/db/models/base.py
165
37
def refresh_from_db(self, using=None, fields=None): if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, "_prefetched_objects_cache", ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if a
Refs #33476 -- Reformatted code with Black.
refresh_from_db
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
base.py
15
43
https://github.com/django/django.git
17
278
0
101
448
Python
{ "docstring": "\n Reload field values from the database.\n\n By default, the reloading happens from the database this instance was\n loaded from, or by the read router if this instance wasn't loaded from\n any database. The using parameter will override the default.\n\n Fields can be used to specify which fields to reload. The fields\n should be an iterable of field attnames. If fields is None, then\n all non-deferred fields are reloaded.\n\n When accessing deferred fields of an instance, the deferred loading\n of the field will call this method.\n ", "language": "en", "n_whitespaces": 156, "n_words": 85, "vocab_size": 58 }
def refresh_from_db(self, using=None, fields=None): if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, "_prefetched_objects_cache", ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' "are not allowed in fields." % LOOKUP_SEP ) hints = {"instance": self} db_instance_qs = self.__class__._base_manager.db_manager( using, hints=hints ).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [ f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields ] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db
54,894
217,711
95
python3.10.4/Lib/http/client.py
27
12
def set_tunnel(self, host, port=None, headers=None): if self.sock: raise RuntimeError("Can't set up tunnel for established connection") self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers
add python 3.10.4 for windows
set_tunnel
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
client.py
11
8
https://github.com/XX-net/XX-Net.git
3
59
0
25
96
Python
{ "docstring": "Set up host and port for HTTP CONNECT tunnelling.\n\n In a connection that uses HTTP CONNECT tunneling, the host passed to the\n constructor is used as a proxy server that relays all communication to\n the endpoint passed to `set_tunnel`. This done by sending an HTTP\n CONNECT request to the proxy server when the connection is established.\n\n This method must be called before the HTTP connection has been\n established.\n\n The headers argument should be a mapping of extra HTTP headers to send\n with the CONNECT request.\n ", "language": "en", "n_whitespaces": 148, "n_words": 85, "vocab_size": 54 }
def set_tunnel(self, host, port=None, headers=None): if self.sock: raise RuntimeError("Can't set up tunnel for established connection") self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear()
26,780
120,111
31
jax/_src/config.py
19
7
def explicit_device_get_scope() -> Iterator[None]: state = transfer_
Bump minimum jaxlib version to 0.3.2 and remove transfer guard compatibility code
explicit_device_get_scope
36df8619d74672b0072e7880bcdd257c4a83e9f1
jax
config.py
10
9
https://github.com/google/jax.git
2
37
0
13
66
Python
{ "docstring": "Indicates that the current context is an explicit device_get() call.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def explicit_device_get_scope() -> Iterator[None]: state = transfer_guard_lib.thread_local_state() prev = state.explicit_device_get state.explicit_device_get = True try: yield finally: state.explicit_device_get = prev
102,302
303,482
40
homeassistant/components/homekit_controller/entity.py
8
9
def accessory_info(self) -> Service: return self.accessory.services.first( service_type=ServicesTypes.ACCESSORY_INFORMATION )
Move HKC entity classes into entity.py (#76333)
accessory_info
c580bce879b6c2f68c4ea45707b5a05ee88c6ecc
core
entity.py
9
5
https://github.com/home-assistant/core.git
1
23
0
8
39
Python
{ "docstring": "Information about the make and model of an accessory.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def accessory_info(self) -> Service: return self.accessory.services.first( service_type=ServicesTypes.ACCESSORY_INFORMATION )
13,188
63,181
29
.venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py
13
3
def _always_object(classes): if object not in classes: return classes + (o
upd; format
_always_object
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
__init__.py
9
4
https://github.com/jindongwang/transferlearning.git
2
21
0
11
35
Python
{ "docstring": "\n Ensure object appears in the mro even\n for old-style classes.\n ", "language": "en", "n_whitespaces": 20, "n_words": 10, "vocab_size": 10 }
def _always_object(classes): if object not in classes: return classes + (object,) return classes
42,857
178,909
20
nuitka/freezer/IncludedDataFiles.py
7
4
def addIncludedDataFilesFromFileOptions(): for included_datafile in _addIncludedDataFilesFromFileOptions(): ad
Plugins: Massive cleanup of data file handling * Move data file handling out of standalone only, allowing support for other modes as well. * Attach logger and tags to data file objects.
addIncludedDataFilesFromFileOptions
abfb99b0a05dd76d2ecc6ebc20732a271857c6c8
Nuitka
IncludedDataFiles.py
9
3
https://github.com/Nuitka/Nuitka.git
2
16
0
7
30
Python
{ "docstring": "Early data files, from user options that work with file system.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def addIncludedDataFilesFromFileOptions(): for included_datafile in _addIncludedDataFilesFromFileOptions(): addIncludedDataFile(included_datafile)
78,617
266,837
24
lib/ansible/utils/_junit_xml.py
18
7
def _attributes(**kwargs) -> dict[str, str]: return {key: str(value) for key, valu
Simplify existing type hints.
_attributes
871b2ca73adcba3a35551247cf839246cf121231
ansible
_junit_xml.py
9
3
https://github.com/ansible/ansible.git
3
38
0
17
60
Python
{ "docstring": "Return the given kwargs as a dictionary with values converted to strings. Items with a value of None will be omitted.", "language": "en", "n_whitespaces": 20, "n_words": 21, "vocab_size": 19 }
def _attributes(**kwargs) -> dict[str, str]: return {key: str(value) for key, value in kwargs.items() if value is not None}
56,530
222,132
105
python3.10.4/Lib/ctypes/test/test_pointers.py
35
16
def test_charpp(self): dll = CDLL(_ctypes_test.__file__) func = dll._testfunc_c_p_p func.restype = c_char_p argv = (c_char_p * 2)() argc = c_int( 2 ) arg
add python 3.10.4 for windows
test_charpp
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
test_pointers.py
10
10
https://github.com/XX-net/XX-Net.git
1
73
0
26
120
Python
{ "docstring": "Test that a character pointer-to-pointer is correctly passed", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_charpp(self): dll = CDLL(_ctypes_test.__file__) func = dll._testfunc_c_p_p func.restype = c_char_p argv = (c_char_p * 2)() argc = c_int( 2 ) argv[0] = b'hello' argv[1] = b'world' result = func( byref(argc), argv ) self.assertEqual(result, b'world')
38,792
160,904
145
numpy/testing/tests/test_utils.py
50
18
def test_error_message_unsigned(self): # Ensure to test for potential overflow in the case of: # x - y
TST: Add a failing test case to demonstrate the bug gh2176
test_error_message_unsigned
57d04d883e874c611091933c4c36e1cd43ea0e04
numpy
test_utils.py
11
7
https://github.com/numpy/numpy.git
1
84
0
39
143
Python
{ "docstring": "Check the the message is formatted correctly when overflow can occur\n (gh21768)", "language": "en", "n_whitespaces": 21, "n_words": 12, "vocab_size": 11 }
def test_error_message_unsigned(self): # Ensure to test for potential overflow in the case of: # x - y # and # y - x x = np.asarray([0, 1, 8], dtype='uint8') y = np.asarray([4, 4, 4], dtype='uint8') with pytest.raises(AssertionError) as exc_info: assert_allclose(x, y, atol=3) msgs = str(exc_info.value).split('\n') assert_equal(msgs[4], 'Max absolute difference: 4')
92,712
293,655
292
homeassistant/components/matrix/__init__.py
64
18
def _join_or_get_room(self, room_id_or_alias): rooms = self._client.get_rooms() if room_id_or_alias in rooms: _LOGGER.debug("Already in room %s", room_id_or_alias) return rooms[room_id_or_alias] for r
Fix finding matrix room that is already joined (#67967) After some debugging, it seems room.canonical_alias contains the room alias that matches the room_id_or_alias value but is not contained in room.aliases (which is empty). As a result, the matrix component thought the room wasn't alread joined, joins again, and this replaces the previous room which had the listener. This resulted in the component callback not being called for new messages in the room. This fixes #66372
_join_or_get_room
2aaeb1fa99f3f691a5c4adfff984e25bf96d787d
core
__init__.py
12
20
https://github.com/home-assistant/core.git
6
122
0
39
196
Python
{ "docstring": "Join a room or get it, if we are already in the room.\n\n We can't just always call join_room(), since that seems to crash\n the client if we're already in the room.\n ", "language": "en", "n_whitespaces": 53, "n_words": 32, "vocab_size": 26 }
def _join_or_get_room(self, room_id_or_alias): rooms = self._client.get_rooms() if room_id_or_alias in rooms: _LOGGER.debug("Already in room %s", room_id_or_alias) return rooms[room_id_or_alias] for room in rooms.values(): if room.room_id not in self._aliases_fetched_for: room.update_aliases() self._aliases_fetched_for.add(room.room_id) if ( room_id_or_alias in room.aliases or room_id_or_alias == room.canonical_alias ): _LOGGER.debug( "Already in room %s (known as %s)", room.room_id, room_id_or_alias ) return room room = self._client.join_room(room_id_or_alias) _LOGGER.info("Joined room %s (known as %s)", room.room_id, room_id_or_alias) return room
48,321
197,067
192
sympy/solvers/solveset.py
52
18
def _is_function_class_equation(func_class, f, symbol): if f.is_Mul or f.is_A
Refactored import ordering in functions
_is_function_class_equation
e0dc14eca132f37c5f49369eb4051eae37c9b119
sympy
solveset.py
14
19
https://github.com/sympy/sympy.git
9
119
0
35
185
Python
{ "docstring": " Tests whether the equation is an equation of the given function class.\n\n The given equation belongs to the given function class if it is\n comprised of functions of the function class which are multiplied by\n or added to expressions independent of the symbol. In addition, the\n arguments of all such functions must be linear in the symbol as well.\n\n Examples\n ========\n\n >>> from sympy.solvers.solveset import _is_function_class_equation\n >>> from sympy import tan, sin, tanh, sinh, exp\n >>> from sympy.abc import x\n >>> from sympy.functions.elementary.trigonometric import TrigonometricFunction\n >>> from sympy.functions.elementary.hyperbolic import HyperbolicFunction\n >>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x)\n False\n >>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x)\n True\n >>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x)\n False\n >>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x)\n True\n >>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x)\n True\n ", "language": "en", "n_whitespaces": 190, "n_words": 123, "vocab_size": 73 }
def _is_function_class_equation(func_class, f, symbol): if f.is_Mul or f.is_Add: return all(_is_function_class_equation(func_class, arg, symbol) for arg in f.args) if f.is_Pow: if not f.exp.has(symbol): return _is_function_class_equation(func_class, f.base, symbol) else: return False if not f.has(symbol): return True if isinstance(f, func_class): try: g = Poly(f.args[0], symbol) return g.degree() <= 1 except PolynomialError: return False else: return False
75,349
258,647
30
sklearn/isotonic.py
9
11
def get_feature_names_out(self, input_features=None): class_name = self.__class__.__name__.lower() return np.as
ENH Adds get_feature_names to isotonic module (#22249)
get_feature_names_out
8991c3d7870df692fe01510e0fe6de62ea550cad
scikit-learn
isotonic.py
10
3
https://github.com/scikit-learn/scikit-learn.git
1
35
0
9
61
Python
{ "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Ignored.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n An ndarray with one string i.e. [\"isotonicregression0\"].\n ", "language": "en", "n_whitespaces": 103, "n_words": 32, "vocab_size": 28 }
def get_feature_names_out(self, input_features=None): class_name = self.__class__.__name__.lower() return np.asarray([f"{class_name}0"], dtype=object)
50,555
203,847
376
django/contrib/gis/db/backends/postgis/operations.py
80
17
def get_distance(self, f, dist_val, lookup_type): # Getting
Refs #33476 -- Reformatted code with Black.
get_distance
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
operations.py
18
21
https://github.com/django/django.git
5
99
0
57
167
Python
{ "docstring": "\n Retrieve the distance parameters for the given geometry field,\n distance lookup value, and the distance lookup type.\n\n This is the most complex implementation of the spatial backends due to\n what is supported on geodetic geometry columns vs. what's available on\n projected geometry columns. In addition, it has to take into account\n the geography column type.\n ", "language": "en", "n_whitespaces": 106, "n_words": 55, "vocab_size": 41 }
def get_distance(self, f, dist_val, lookup_type): # Getting the distance parameter value = dist_val[0] # Shorthand boolean flags. geodetic = f.geodetic(self.connection) geography = f.geography if isinstance(value, Distance): if geography: dist_param = value.m elif geodetic: if lookup_type == "dwithin": raise ValueError( "Only numeric values of degree units are " "allowed on geographic DWithin queries." ) dist_param = value.m else: dist_param = getattr( value, Distance.unit_attname(f.units_name(self.connection)) ) else: # Assuming the distance is in the units of the field. dist_param = value return [dist_param]