n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
46
0
2
6
bokeh/models/callbacks.py
212,384
Generalize tooltips and add description to widgets (#12154) * Generalize tooltips and add description to widgets * Fail in old compiler if TS had errors * Update font-awesome icon's implementation * Add models representing element queries * Introduce a ViewManager for a Document * Make LayoutDOM a UIElement * Expose Tooltip's target property * Make embed_items (etc.) return ViewManager * Ignore Undefined/Intrinsic initializer values * Update visual baselines * Use explicit __all__ exports in models.ui * Use the correct DOM element in mousedown * Allow attach tooltips to UIElements * Redesign and generalize menus * Add HelpButton and BuiltinIcon models * Fix eager eval in Instance's sphinx_link * Add LayoutDOM.context_menu property * Let TooltipView handle initial rendering * Add position to a Tooltip to display it * Move icons to the ui module * Make menu items UI elements * Allow to reference property values in DOM * Redesign stylesheet management, avoid style * Add experimental support for third-party icons * Add a stub Dialog UI component implementation * Add SetValue(obj, attr, value) callback model * Allow Callback models in js_{event,property}_callbacks * Allow to display Inspector in a dialog box * Rename Separator->Divider for consistent naming * Preliminary support for icon sizing * Split up models/dom/index.ts into sub-modules * Update font-awesome example * Add more unit tests for core/util/iterator * Add SVGIcon and update docstrings * Add/Improve docstrings in models/dom.py * More docstrings * Apply review suggestions Co-authored-by: Bryan Van de Ven <bryan@bokeh.org> Co-authored-by: Bryan Van de Ven <bryan@bokeh.org>
bokeh
11
Python
32
callbacks.py
def _check_if_provided_a_valid_value(self): descriptor = self.obj.lookup(self.attr) if descriptor.property.is_valid(self.value): return None else: return f"{self.value!r} is not a valid value for {self.obj}.{self.attr}" # TODO: class Show(Callback): target = Required(Either(Instance(DOMNode), Instance(UIElement))) # TODO: class Hide(Callback): ... #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
700b5be79008ec576dc9994ee48850da43277d3e
36
https://github.com/bokeh/bokeh.git
77
def _check_if_provided_a_valid_value(self): descriptor = self.obj.lookup(self.attr) if descriptor.property.is_valid(self.value): return None else: return f"{self.value!r} is not a valid value for {self.obj}.{self.attr}" # TODO: class Show(Callback): target = Required(Either(Instance(DOMNode), Instance(UIElement))) # TODO: class Hide(Callback): ... #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #---
9
92
_check_if_provided_a_valid_value
8
0
1
3
homeassistant/components/lock/__init__.py
290,849
Adjust type hints for LockEntityFeature (#82256)
core
6
Python
8
__init__.py
def supported_features(self) -> LockEntityFeature | int: return self._attr_supported_features
8570d3aabeb0bde1345659087849acd9de946ce5
14
https://github.com/home-assistant/core.git
22
def supported_features(self) -> LockEntityFeature | int: return self._attr_supported_features
5
25
supported_features
36
1
1
17
tests/components/samsungtv/conftest.py
292,958
Use async rest api in SamsungTV (#67369) Co-authored-by: epenet <epenet@users.noreply.github.com>
core
13
Python
35
conftest.py
def rest_api_fixture() -> Mock: with patch( "homeassistant.components.samsungtv.bridge.SamsungTVAsyncRest", autospec=True, ) as rest_api_class: rest_api_class.return_value.rest_device_info.return_value = { "id": "uuid:be9554b9-c9fb-41f4-8920-22da015376a4", "device": { "modelName": "82GXARRS", "wifiMac": "aa:bb:cc:dd:ee:ff", "name": "[TV] Living Room", "type": "Samsung SmartTV", "networkType": "wireless", }, } yield rest_api_class.return_value @pytest.fixture(name="remotews")
1556868d562b0426effd0d556b9665d2865a8018
@pytest.fixture(name="remotews")
63
https://github.com/home-assistant/core.git
187
def rest_api_fixture() -> Mock: with patch( "homeassistant.components.samsungtv.bridge.SamsungTVAsyncRest",
10
142
rest_api_fixture
29
0
1
10
Lib/test/test_posix.py
175,829
bpo-46426: Improve tests for the dir_fd argument (GH-30668) Ensure that directory file descriptors refer to directories different from the current directory, and that src_dir_fd and dst_dir_fd refer to different directories. Add context manager open_dir_fd() in test.support.os_helper.
cpython
10
Python
27
test_posix.py
def prepare(self): TestPosixDirFd.count += 1 name = f'{os_helper.TESTFN}_{self.count}' base_dir = f'{os_helper.TESTFN}_{self.count}base' posix.mkdir(base_dir) self.addCleanup(posix.rmdir, base_dir) fullname = os.path.join(base_dir, name) assert not os.path.exists(fullname) with os_helper.open_dir_fd(base_dir) as dir_fd: yield (dir_fd, name, fullname)
54610bb448a9cf5be77d53b66169fca4c11be6cb
74
https://github.com/python/cpython.git
95
def prepare(self): TestPosixDirFd.count
19
148
prepare
83
0
1
4
d2l/mxnet.py
253,721
Refactoring Attention scoring functions and Bahdanau Attention (#2095) * Attention scoring funcs * attn * sequence mask * remove accents
d2l-en
10
Python
36
mxnet.py
def transpose_qkv(X, num_heads): # Shape of input X: (batch_size, no. of queries or key-value pairs, # num_hiddens). Shape of output X: (batch_size, no. of queries or # key-value pairs, num_heads, num_hiddens / num_heads) X = X.reshape(X.shape[0], X.shape[1], num_heads, -1) # Shape of output X: (batch_size, num_heads, no. of queries or key-value # pairs, num_hiddens / num_heads) X = X.transpose(0, 2, 1, 3) # Shape of output: (batch_size * num_heads, no. of queries or key-value # pairs, num_hiddens / num_heads) return X.reshape(-1, X.shape[2], X.shape[3])
098975b3dce69956e5ebc5f95a04589d2bfc8c22
69
https://github.com/d2l-ai/d2l-en.git
116
def transpose_qkv(X, num_heads): # Shape of input X: (batch_size, no. of queries or key-value pairs,
6
107
transpose_qkv
24
0
2
9
lib/mpl_toolkits/axisartist/tests/test_axislines.py
110,601
Add facecolor to axisline style
matplotlib
11
Python
23
test_axislines.py
def test_axisline_style_size_color(): fig = plt.figure(figsize=(2, 2)) ax = fig.add_subplot(axes_class=AxesZero) ax.axis["xzero"].set_axisline_style("-|>", size=2.0, facecolor='r') ax.axis["xzero"].set_visible(True) ax.axis["yzero"].set_axisline_style("->, size=1.5") ax.axis["yzero"].set_visible(True) for direction in ("left", "right", "bottom", "top"): ax.axis[direction].set_visible(False)
3fee7584e2f03049f4dec0ccdf4055cb0e38e05b
106
https://github.com/matplotlib/matplotlib.git
51
def test_axisline_style_size_color(): f
15
178
test_axisline_style_size_color
22
1
1
9
tests/components/zwave_js/conftest.py
303,343
Fix zwave_js addon info (#76044) * Add add-on store info command * Use add-on store info command in zwave_js * Fix init tests * Update tests * Fix method for addon store info * Fix response parsing * Fix store addon installed response parsing * Remove addon info log that can contain network keys * Add supervisor store addon info test * Default to version None if add-on not installed Co-authored-by: Mike Degatano <michael.degatano@gmail.com> Co-authored-by: Mike Degatano <michael.degatano@gmail.com>
core
9
Python
19
conftest.py
def mock_addon_running(addon_store_info, addon_info): addon_store_info.return_value = { "installed": "1.0.0", "state": "started", "version": "1.0.0", } addon_info.return_value["state"] = "started" addon_info.return_value["version"] = "1.0.0" return addon_info @pytest.fixture(name="addon_installed")
842cc060f80a632032dacbe1e2eaa8ca6421eda0
@pytest.fixture(name="addon_installed")
44
https://github.com/home-assistant/core.git
60
def mock_addon_running(addon_store_info, addon_info): addon_store_info.return_value = { "installed": "1.0.0", "state": "started", "version": "1.0.0", } addon_info.return_value["state"] = "started" addon_info.return_value["version"] = "1.0.0" retur
7
104
mock_addon_running
54
0
4
17
release/tune_tests/cloud_tests/workloads/_tune_script.py
133,618
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
19
Python
41
_tune_script.py
def fn_trainable(config, checkpoint_dir=None): if checkpoint_dir: with open(os.path.join(checkpoint_dir, "checkpoint.json"), "rt") as fp: state = json.load(fp) else: state = {"internal_iter": 0} for i in range(state["internal_iter"], config["max_iterations"]): state["internal_iter"] = i time.sleep(config["sleep_time"]) if i % config["checkpoint_freq"] == 0: with tune.checkpoint_dir(step=i) as cd: with open(os.path.join(cd, "checkpoint.json"), "wt") as fp: json.dump(state, fp) tune.report( score=i * 10 * config["score_multiplied"], internal_iter=state["internal_iter"], )
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
151
https://github.com/ray-project/ray.git
189
def fn_trainable(config, checkpoint_dir=None): if checkpoint_dir: with open(os.path.join(checkpoint_dir, "checkpoint.json"), "rt") as fp: state = json.load(fp) else: state = {"internal_iter": 0} for i in range(state["internal_iter"], config["max_iterations"]): state["internal_iter"] = i time.sleep(config["sleep_time"]) if i % config["checkpoint_freq"] == 0: with tune.checkpoint_dir(step=i) as cd: with open(os.path.join(cd, "checkpoint.json"), "wt") as fp:
22
260
fn_trainable
8
0
1
2
wagtail/search/backends/elasticsearch5.py
75,569
Reformat with black
wagtail
10
Python
7
elasticsearch5.py
def get_content_type(self): return self.model._meta.app_label + "." + self.model.__name__
d10f15e55806c6944827d801cd9c2d53f5da4186
22
https://github.com/wagtail/wagtail.git
22
def get_content_type(self): return self.model.
6
39
get_content_type
22
0
1
6
pandas/tests/arrays/test_datetimes.py
171,848
BUG: DatetimeArray.astype(Sparse) (#50082) * BUG: DatetimeArray.astype(Sparse) * GH ref
pandas
9
Python
18
test_datetimes.py
def test_astype_to_sparse_dt64(self): # GH#50082 dti = pd.date_range("2016-01-01", periods=4) dta = dti._data result = dta.astype("Sparse[datetime64[ns]]") assert result.dtype == "Sparse[datetime64[ns]]" assert (result == dta).all()
7c208c8907f5ab18f807366c0c5e26ae1dbca299
46
https://github.com/pandas-dev/pandas.git
63
def test_astype_to_sparse_dt64(self): # GH#50082 dti = pd.date_range("2016-01-01", periods=4) dta = dti._data result =
12
80
test_astype_to_sparse_dt64
42
0
2
12
src/pip/_vendor/colorama/tests/ansitowin32_test.py
175,059
Upgrade colorama to 0.4.6
pip
12
Python
34
ansitowin32_test.py
def testCallWin32UsesLookup(self): listener = Mock() stream = AnsiToWin32(listener) stream.win32_calls = { 1: (lambda *_, **__: listener(11),), 2: (lambda *_, **__: listener(22),), 3: (lambda *_, **__: listener(33),), } stream.call_win32('m', (3, 1, 99, 2)) self.assertEqual( [a[0][0] for a in listener.call_args_list], [33, 11, 22] )
8cda1c34eb865c1be5b72e8295f7ca7adfdeb113
117
https://github.com/pypa/pip.git
138
def testCallWin32UsesLookup(self): listener = Mock() stream = AnsiToWin32(listener) stream.win32_calls = { 1: (lambda *_, **__: listener(11),), 2: (lambda *_, **__: listener(22),), 3: (lambda *_, **__: listener(33),), } stream.call_win32('m', (3, 1, 99, 2)) self.assertEqual( [a[0][0] for a in listener.call_args_list], [33, 11
13
174
testCallWin32UsesLookup
8
0
1
5
wagtail/core/models/__init__.py
73,776
Reformat with black
wagtail
12
Python
8
__init__.py
def active(self): return self.filter( Q(status=WorkflowState.STATUS_IN_PROGRESS) | Q(status=WorkflowState.STATUS_NEEDS_CHANGES) )
d10f15e55806c6944827d801cd9c2d53f5da4186
29
https://github.com/wagtail/wagtail.git
51
def active(self): return self.filter( Q(status=
8
49
active
58
0
6
27
homeassistant/components/traccar/device_tracker.py
317,883
Bump pytraccar to 1.0.0 (#75671)
core
19
Python
49
device_tracker.py
async def import_events(self): start_intervel = datetime.utcnow() events = await self._api.get_reports_events( devices=[device.id for device in self._devices], start_time=start_intervel, end_time=start_intervel - self._scan_interval, event_types=self._event_types.keys(), ) if events is not None: for event in events: self._hass.bus.async_fire( f"traccar_{self._event_types.get(event.type)}", { "device_traccar_id": event.device_id, "device_name": next( ( dev.name for dev in self._devices if dev.id == event.device_id ), None, ), "type": event.type, "serverTime": event.event_time, "attributes": event.attributes, }, )
e87c2b9e2590eadcaa14f7256388675fcc64918d
137
https://github.com/home-assistant/core.git
547
async def import_events(self): start_intervel = datetime.utcnow() events = await self._api.get_reports_events( devices=[device.id for device in self._devices], start_time=start_intervel, end_time=start_intervel - self._scan_interval, event_types=self._event_types.keys(), ) if events is not None: for event in events: self._hass.bus.async_fire( f"traccar_{self._event_types.get(event.type)}", { "device_traccar_id": event.device_id, "device_name": next( ( dev.name for dev in self._devices
30
231
import_events
26
0
3
9
sandbox/vertical_container.py
183,261
[layouts] Fix vertical layout bug with centered content
textual
14
Python
24
vertical_container.py
async def action_remove_placeholder(self): placeholders = self.query("Placeholder") placeholders_count = len(placeholders) for i, placeholder in enumerate(placeholders): if i == placeholders_count - 1: await self.remove(placeholder) placeholder.parent.children._nodes.remove(placeholder) self.refresh(repaint=True, layout=True) self.refresh_css()
2d25807f495d2fbd64eb0909b1320ad0cc8e7b7d
72
https://github.com/Textualize/textual.git
101
async def action_remove_placeholder(self): placeholders = self.query(
17
119
action_remove_placeholder
51
0
1
9
doc/source/tutorial/examples/morphology_binary_dilation_erosion.py
241,816
DOC: added example for morphology: generate_binary_structure, binary_dilation and erosion (#15244) Co-authored-by: Pamphile Roy <roy.pamphile@gmail.com>
scipy
9
Python
28
morphology_binary_dilation_erosion.py
def ball(radius, dtype=np.uint8): n = 2 * radius + 1 Z, Y, X = np.mgrid[ -radius: radius: n * 1j, -radius: radius: n * 1j, -radius: radius: n * 1j ] s = X ** 2 + Y ** 2 + Z ** 2 return np.array(s <= radius * radius, dtype=dtype)
a0d44d5be19a84b4cb1984eb795dd0266f665c76
83
https://github.com/scipy/scipy.git
86
def ball(radius, dtype=np.uint8): n = 2 * radius + 1 Z, Y, X = np.mgrid[ -radius: radius: n * 1j, -radius: radius: n * 1j, -radius: radius: n * 1j ] s = X ** 2 + Y ** 2 + Z ** 2 return np.array(s
12
121
ball
21
0
1
7
tests/unit_tests/chains/test_react.py
191,405
Harrison/add react chain (#24) from https://arxiv.org/abs/2210.03629 still need to think if docstore abstraction makes sense
langchain
10
Python
19
test_react.py
def test_predict_until_observation_error() -> None: outputs = ["foo\nAction 1: foo"] fake_llm = FakeListLLM(outputs) fake_llm_chain = LLMChain(llm=fake_llm, prompt=_FAKE_PROMPT) with pytest.raises(ValueError): predict_until_observation(fake_llm_chain, "", 1)
ce7b14b84381c766ae42a0f71953b2a56c024dbb
46
https://github.com/hwchase17/langchain.git
43
def test_predict_until_observation_error() -> None: outputs = ["foo\nAction
13
82
test_predict_until_observation_error
108
0
6
31
tests/onnx/test_onnx_v2.py
30,917
Add support for Perceiver ONNX export (#17213) * Start adding perceiver support for ONNX * Fix pad token bug for fast tokenizers * Fix formatting * Make get_preprocesor more opinionated (processor priority, otherwise tokenizer/feature extractor) * Clean docs format * Minor cleanup following @sgugger's comments * Fix typo in docs * Fix another docs typo * Fix one more typo in docs * Update src/transformers/onnx/utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/onnx/utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/onnx/utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
transformers
15
Python
89
test_onnx_v2.py
def _onnx_export(self, test_name, name, model_name, feature, onnx_config_class_constructor, device="cpu"): from transformers.onnx import export model_class = FeaturesManager.get_model_class_for_feature(feature) config = AutoConfig.from_pretrained(model_name) model = model_class.from_config(config) onnx_config = onnx_config_class_constructor(model.config) if is_torch_available(): from transformers.utils import torch_version if torch_version < onnx_config.torch_onnx_minimum_version: pytest.skip( "Skipping due to incompatible PyTorch version. Minimum required is" f" {onnx_config.torch_onnx_minimum_version}, got: {torch_version}" ) preprocessor = get_preprocessor(model_name) # Useful for causal lm models that do not use pad tokens. if isinstance(preprocessor, PreTrainedTokenizerBase) and not getattr(config, "pad_token_id", None): config.pad_token_id = preprocessor.eos_token_id with NamedTemporaryFile("w") as output: try: onnx_inputs, onnx_outputs = export( preprocessor, model, onnx_config, onnx_config.default_onnx_opset, Path(output.name), device=device ) validate_model_outputs( onnx_config, preprocessor, model, Path(output.name), onnx_outputs, onnx_config.atol_for_validation, ) except (RuntimeError, ValueError) as e: self.fail(f"{name}, {feature} -> {e}")
babeff5524bf3d5d62cfa70e1297158a755b0810
189
https://github.com/huggingface/transformers.git
508
def _onnx_export(self, test_name, name, model_name, feature, onnx_config_class_constructor, device="cpu"): from transformers.onnx import export model_class = FeaturesManager.get_model_class_for_feature(feature) config = AutoConfig.from_pretrained(model_name) model = model_class.from_config(config) onnx_config = onnx_config_class_constructor(model.config) if is_torch_available(): from transformers.utils import torch_version if torch_version < onnx_config.torch_onnx_minimum_version: pytest.skip( "Skipping due to incompatible PyTorch version. Minimum required is" f" {onnx_config.torch_onnx_minimum_version}, got: {torch_version}" ) preprocessor = get_preprocessor(model_name) # Useful for causal lm models that do not use pad tokens. if isinstance(preprocessor, PreTrainedTokenizerBase) and not getattr(config, "pad_token_id", None): config.pad_token_id = preprocessor.eos_token_id with NamedTemporaryFile("w") as output: try: onnx_inputs, onnx_outputs = export( preprocessor, model, onnx_config, onnx_config.default_onnx_opset, Path(output.name), device=device ) validate_model_outputs( onnx_config, preprocessor, model, Path(output.name), onnx_outputs, onnx_config.atol_for_validation, ) except (RuntimeError, Valu
45
319
_onnx_export
16
0
2
4
.venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/candidates.py
61,094
upd; format
transferlearning
10
Python
15
candidates.py
def __eq__(self, other): # type: (Any) -> bool if isinstance(other, self.__class__): return links_equivalent(self._link, other._link) return False
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
30
https://github.com/jindongwang/transferlearning.git
47
def __eq__(self, other): # type: (Any) -> bool if isinstance(other, self.__class__): return links_equivalent(self._link, other._link) ret
7
46
__eq__
46
0
2
16
wagtail/admin/tests/tests.py
78,648
Refine dashboard design - summary panels * Update colours and icons for summary panels * add h1 id for aria referencing * rework layout to use flex box & not floats * move summary styles to own component scss file * now functions correctly in RTL mode
wagtail
11
Python
38
tests.py
def test_summary_items(self): response = self.client.get(reverse("wagtailadmin_home")) self.assertEqual(response.status_code, 200) self.assertContains(response, "<li>0 broken links</li>") # check that media attached to summary items is correctly pulled in if DJANGO_VERSION >= (4, 1): self.assertContains( response, '<link href="/static/testapp/css/broken-links.css" media="all" rel="stylesheet">', html=True, ) else: self.assertContains( response, '<link href="/static/testapp/css/broken-links.css" type="text/css" media="all" rel="stylesheet">', html=True, )
22e904fb01e0206f95019f0c8ea0db80c7d4efc8
73
https://github.com/wagtail/wagtail.git
221
def test_summary_items(self): response = self.client.get(reverse("wagtailadmin_home")) self.assertEqual(response.status_code, 200) self.assertContains(response, "<li>0 broken links</li>") #
11
119
test_summary_items
48
0
3
8
src/pip/_internal/metadata/importlib.py
174,321
Try to cover Path interface differences
pip
10
Python
39
importlib.py
def canonical_name(self) -> NormalizedName: # Try to get the name from the metadata directory name. This is much # faster than reading metadata. if self._info_location is None: return self._get_dist_normalized_name() stem, suffix = os.path.splitext(self._info_location.name) if suffix not in (".dist-info", ".egg-info"): return self._get_dist_normalized_name() name, _, _ = stem.partition("-") return canonicalize_name(name)
846d8e59654078ecc020933ca82e6f1ff2bb44ca
69
https://github.com/pypa/pip.git
118
def canonical_name(self) -> NormalizedName: # Try to get the name from the metadata directory name. This is much # faster than reading metadata. if self._info_location is None: return self._get_dist_normalized_name() stem, suffix = os.path.splitext(self._info_location.name) if suffix not in (".dist-info", ".egg-info"): return self._get_dist_normalized_name() name, _, _ = stem.partition("-")
14
116
canonical_name
455
1
33
122
cps/web.py
172,809
Better epub cover parsing with multiple cover-image items Code cosmetics renamed variables refactored xml page generation refactored prepare author
calibre-web
23
Python
251
web.py
def render_adv_search_results(term, offset=None, order=None, limit=None): sort_param = order[0] if order else [db.Books.sort] pagination = None cc = get_cc_columns(filter_config_custom_read=True) calibre_db.session.connection().connection.connection.create_function("lower", 1, db.lcase) if not config.config_read_column: query = (calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, ub.ReadBook).select_from(db.Books) .outerjoin(ub.ReadBook, and_(db.Books.id == ub.ReadBook.book_id, int(current_user.id) == ub.ReadBook.user_id))) else: try: read_column = cc[config.config_read_column] query = (calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, read_column.value) .select_from(db.Books) .outerjoin(read_column, read_column.book == db.Books.id)) except (KeyError, AttributeError): log.error("Custom Column No.%d is not existing in calibre database", config.config_read_column) # Skip linking read column query = calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, None) query = query.outerjoin(ub.ArchivedBook, and_(db.Books.id == ub.ArchivedBook.book_id, int(current_user.id) == ub.ArchivedBook.user_id)) q = query.outerjoin(db.books_series_link, db.Books.id == db.books_series_link.c.book) \ .outerjoin(db.Series) \ .filter(calibre_db.common_filters(True)) # parse multiselects to a complete dict tags = dict() elements = ['tag', 'serie', 'shelf', 'language', 'extension'] for element in elements: tags['include_' + element] = term.get('include_' + element) tags['exclude_' + element] = term.get('exclude_' + element) author_name = term.get("author_name") book_title = term.get("book_title") publisher = term.get("publisher") pub_start = term.get("publishstart") pub_end = term.get("publishend") rating_low = term.get("ratinghigh") rating_high = term.get("ratinglow") description = term.get("comment") read_status = term.get("read_status") if author_name: author_name = author_name.strip().lower().replace(',', '|') if book_title: book_title = book_title.strip().lower() if publisher: publisher = publisher.strip().lower() search_term = [] cc_present = False for c in cc: if c.datatype == "datetime": column_start = term.get('custom_column_' + str(c.id) + '_start') column_end = term.get('custom_column_' + str(c.id) + '_end') if column_start: search_term.extend([u"{} >= {}".format(c.name, format_date(datetime.strptime(column_start, "%Y-%m-%d").date(), format='medium', locale=get_locale()) )]) cc_present = True if column_end: search_term.extend([u"{} <= {}".format(c.name, format_date(datetime.strptime(column_end, "%Y-%m-%d").date(), format='medium', locale=get_locale()) )]) cc_present = True elif term.get('custom_column_' + str(c.id)): search_term.extend([(u"{}: {}".format(c.name, term.get('custom_column_' + str(c.id))))]) cc_present = True if any(tags.values()) or author_name or book_title or \ publisher or pub_start or pub_end or rating_low or rating_high \ or description or cc_present or read_status: search_term, pub_start, pub_end = extend_search_term(search_term, author_name, book_title, publisher, pub_start, pub_end, tags, rating_high, rating_low, read_status) # q = q.filter() if author_name: q = q.filter(db.Books.authors.any(func.lower(db.Authors.name).ilike("%" + author_name + "%"))) if book_title: q = q.filter(func.lower(db.Books.title).ilike("%" + book_title + "%")) if pub_start: q = q.filter(func.datetime(db.Books.pubdate) > func.datetime(pub_start)) if pub_end: q = q.filter(func.datetime(db.Books.pubdate) < func.datetime(pub_end)) q = adv_search_read_status(q, read_status) if publisher: q = q.filter(db.Books.publishers.any(func.lower(db.Publishers.name).ilike("%" + publisher + "%"))) q = adv_search_text(q, tags['include_tag'], tags['exclude_tag'], db.Tags.id) q = adv_search_text(q, tags['include_serie'], tags['exclude_serie'], db.Series.id) q = adv_search_text(q, tags['include_extension'], tags['exclude_extension'], db.Data.format) q = adv_search_shelf(q, tags['include_shelf'], tags['exclude_shelf']) q = adv_search_language(q, tags['include_language'], tags['exclude_language'], ) q = adv_search_ratings(q, rating_high, rating_low, ) if description: q = q.filter(db.Books.comments.any(func.lower(db.Comments.text).ilike("%" + description + "%"))) # search custom culumns try: q = adv_search_custom_columns(cc, term, q) except AttributeError as ex: log.error_or_exception(ex) flash(_("Error on search for custom columns, please restart Calibre-Web"), category="error") q = q.order_by(*sort_param).all() flask_session['query'] = json.dumps(term) ub.store_combo_ids(q) result_count = len(q) if offset is not None and limit is not None: offset = int(offset) limit_all = offset + int(limit) pagination = Pagination((offset / (int(limit)) + 1), limit, result_count) else: offset = 0 limit_all = result_count entries = calibre_db.order_authors(q[offset:limit_all], list_return=True, combined=True) return render_title_template('search.html', adv_searchterm=search_term, pagination=pagination, entries=entries, result_count=result_count, title=_(u"Advanced Search"), page="advsearch", order=order[1]) @web.route("/advsearch", methods=['GET']) @login_required_if_no_ano
4545f4a20d9ff90b99bbd4e3e34b6de4441d6367
@web.route("/advsearch", methods=['GET']) @login_required_if_no_ano
1,247
https://github.com/janeczku/calibre-web.git
2,465
def render_adv_search_results(term, offset=None, order=None, limit=None): sort_param = order[0] if order else [db.Books.sort] pagination = None cc = get_cc_columns(filter_config_custom_read=True) calibre_db.session.connection().connection.connection.create_function("lower", 1, db.lcase) if not config.config_read_column: query = (calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, ub.ReadBook).select_from(db.Books) .outerjoin(ub.ReadBook, and_(db.Books.id == ub.ReadBook.book_id, int(current_user.id) == ub.ReadBook.user_id))) else: try: read_column = cc[config.config_read_column] query = (calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, read_column.value) .select_from(db.Books) .outerjoin(read_column, read_column.book == db.Books.id)) except (KeyError, AttributeError): log.error("Custom Column No.%d is not existing in calibre database", config.config_read_column) # Skip linking read column query = calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, None) query = query.outerjoin(ub.ArchivedBook, and_(db.Books.id == ub.ArchivedBook.book_id, int(current_user.id) == ub.ArchivedBook.user_id)) q = query.outerjoin(db.books_series_link, db.Books.id == db.books_series_link.c.book) \ .outerjoin(db.Series) \ .filter(calibre_db.common_filters(True)) # parse multiselects to a complete dict tags = dict() elements = ['tag', 'serie', 'shelf', 'language', 'extension'] for element in elements: tags['include_' + element] = term.get('include_' + element) tags['exclude_' + element] = term.get('exclude_' + element) author_name = term.get("author_name") book_title = term.get("book_title") publisher = term.get("publisher") pub_start = term.get("publishstart") pub_end = term.get("publishend") rating_low = term.get("ratinghigh") rating_high = term.get("ratinglow") description = term.get("comment") read_status = term.get("read_status") if author_name: author_name = author_name.strip().lower().replace(',', '|') if book_title: book_title = book_title.strip().lower() if publisher: publisher = publisher.strip().lower() s
126
2,034
render_adv_search_results
141
0
16
39
django/core/management/commands/migrate.py
204,673
Refs #33476 -- Reformatted code with Black.
django
19
Python
54
migrate.py
def migration_progress_callback(self, action, migration=None, fake=False): if self.verbosity >= 1: compute_time = self.verbosity > 1 if action == "apply_start": if compute_time: self.start = time.monotonic() self.stdout.write(" Applying %s..." % migration, ending="") self.stdout.flush() elif action == "apply_success": elapsed = ( " (%.3fs)" % (time.monotonic() - self.start) if compute_time else "" ) if fake: self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed)) else: self.stdout.write(self.style.SUCCESS(" OK" + elapsed)) elif action == "unapply_start": if compute_time: self.start = time.monotonic() self.stdout.write(" Unapplying %s..." % migration, ending="") self.stdout.flush() elif action == "unapply_success": elapsed = ( " (%.3fs)" % (time.monotonic() - self.start) if compute_time else "" ) if fake: self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed)) else: self.stdout.write(self.style.SUCCESS(" OK" + elapsed)) elif action == "render_start": if compute_time: self.start = time.monotonic() self.stdout.write(" Rendering model states...", ending="") self.stdout.flush() elif action == "render_success": elapsed = ( " (%.3fs)" % (time.monotonic() - self.start) if compute_time else "" ) self.stdout.write(self.style.SUCCESS(" DONE" + elapsed))
9c19aff7c7561e3a82978a272ecdaad40dda5c00
314
https://github.com/django/django.git
717
def migration_progress_callback(self, action, migration=None, fake=False): if self.verbosity >= 1: compute_time = self.verbosity > 1 if action == "apply_start": if compute_time: self.start = time.monotonic() self.stdout.write(" Applying %s..." % migration, ending="") self.stdout.flush() elif action == "apply_success": elapsed = ( " (%.3fs)" % (time.monotonic() - self.start) if compute_time else "" ) if fake: self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed)) else: self.stdout.write(self.style.SUCCESS(" OK" + elapsed)) elif action == "unapply_start": if compute_time: self.start = time.monotonic() self.stdout.write(" Unapplying %s..." % migration, ending="") self.stdout.flush() elif action == "unapply_success": elapsed = ( " (%.3fs)" % (time.monotonic() - self.start) if compute_time else "" ) if fake: self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed)) else: self.stdout.write(self.style.SUCCESS(" OK" + elapsed)) elif action == "render_start": if compute_time: self.start = time.monotonic() self.stdout.write(" Rendering model states...", ending="") self.stdout.flush() elif action == "render_success": elapsed = ( " (%.3fs)" % (time.monotonic() - self.start) if compute_time else "" ) self.stdout.write(self.style.SUCCESS(" DONE" + elapsed))
17
542
migration_progress_callback
19
0
1
3
test/test_pipeline.py
256,737
Refactor Pipeline peripherals (#2253) * move peripheral stuff to utils, add more and better tests * Update Documentation & Code Style * move config related peripherals to config module, fix tests * Update Documentation & Code Style * remove unnecessary list comprehensions * apply ZanSara's feedback * remove classes in pipeline utils Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
haystack
17
Python
18
test_pipeline.py
def test_validate_pipeline_config_invalid_component_param_key(): with pytest.raises(ValueError, match="is not a valid config variable name"): validate_config({"components": [{"name": "test", "type": "test", "params": {"\btest": "test"}}]})
f7a01624e0581a05800ed31ddb1bbb32b169b256
42
https://github.com/deepset-ai/haystack.git
28
def test_validate_pipeline_config_invalid_component_param_key(): with pytest.raises(ValueError, match="is not a valid config variable name"): validate_config({"components"
6
85
test_validate_pipeline_config_invalid_component_param_key
105
0
5
41
erpnext/payroll/doctype/additional_salary/additional_salary.py
66,870
style: format code with black
erpnext
18
Python
82
additional_salary.py
def get_additional_salaries(employee, start_date, end_date, component_type): comp_type = "Earning" if component_type == "earnings" else "Deduction" additional_sal = frappe.qb.DocType("Additional Salary") component_field = additional_sal.salary_component.as_("component") overwrite_field = additional_sal.overwrite_salary_structure_amount.as_("overwrite") additional_salary_list = ( frappe.qb.from_(additional_sal) .select( additional_sal.name, component_field, additional_sal.type, additional_sal.amount, additional_sal.is_recurring, overwrite_field, additional_sal.deduct_full_tax_on_selected_payroll_date, ) .where( (additional_sal.employee == employee) & (additional_sal.docstatus == 1) & (additional_sal.type == comp_type) ) .where( additional_sal.payroll_date[start_date:end_date] | ((additional_sal.from_date <= end_date) & (additional_sal.to_date >= end_date)) ) .run(as_dict=True) ) additional_salaries = [] components_to_overwrite = [] for d in additional_salary_list: if d.overwrite: if d.component in components_to_overwrite: frappe.throw( _( "Multiple Additional Salaries with overwrite property exist for Salary Component {0} between {1} and {2}." ).format(frappe.bold(d.component), start_date, end_date), title=_("Error"), ) components_to_overwrite.append(d.component) additional_salaries.append(d) return additional_salaries
494bd9ef78313436f0424b918f200dab8fc7c20b
228
https://github.com/frappe/erpnext.git
64
def get_additional_salaries(employee, start_date, end_date, component_type): comp_type = "Earning" if component_type == "earnings" else "Deduction" additional_sal = frappe.qb.DocType("Additional Salary") component_field = additional_sal.salary_component.as_("component") overwrite_field = additional_sal.overwrite_salary_structure_amount.as_("overwrite") additional_salary_list = ( frappe.qb.from_(additional_sal) .select( additional_sal.name, component_field, additional_sal.type, additional_sal.amount, additional_sal.is_recurring, overwrite_field, additional_sal.deduct_full_tax_on_selected_payroll_date, ) .where( (additional_sal.employee == employee) & (additional_sal.docstatus == 1) & (additional_sal.type == comp_type) ) .where( additional_sal.payroll_date[start_date:end_date] |
41
359
get_additional_salaries
12
0
1
5
tests/admin_scripts/management/commands/label_command.py
207,274
Refs #33476 -- Reformatted code with Black.
django
13
Python
12
label_command.py
def handle_label(self, label, **options): print( "EXECUTE:LabelCommand label=%s, options=%s" % (label, sorted(options.items())) )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
27
https://github.com/django/django.git
47
def handle_label(self, label, **options):
7
44
handle_label
11
0
1
3
youtube_dl/jsinterp.py
106,429
[jsinterp] Improve JS language support (#31175) * operator ?? * operator ?. * operator ** * accurate operator functions * `undefined` handling * object literals {a: 1, "b": expr} * more tests for weird JS comparisons: see https://github.com/ytdl-org/youtube-dl/issues/31173#issuecomment-1217854397.
youtube-dl
7
Python
11
jsinterp.py
def _all_operators(): return itertools.chain( # Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence _SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS)
b0a60ce2032172aeaaf27fe3866ab72768f10cb2
17
https://github.com/ytdl-org/youtube-dl.git
39
def _all_operators(): return itertools.chain( # Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence
7
26
_all_operators
78
0
9
13
tests/end2end/fixtures/quteprocess.py
321,575
qt6 tests: Fix remaining PyQt5 references
qutebrowser
12
Python
54
quteprocess.py
def wait_scroll_pos_changed(self, x=None, y=None): __tracebackhide__ = (lambda e: e.errisinstance(testprocess.WaitForTimeout)) if (x is None and y is not None) or (y is None and x is not None): raise ValueError("Either both x/y or neither must be given!") if x is None and y is None: point = 'Py*.QtCore.QPoint(*, *)' # not counting 0/0 here elif x == '0' and y == '0': point = 'Py*.QtCore.QPoint()' else: point = 'Py*.QtCore.QPoint({}, {})'.format(x, y) self.wait_for(category='webview', message='Scroll position changed to ' + point)
deb21acdebd77c6dc6d5fe4d8cad75e4ca074138
107
https://github.com/qutebrowser/qutebrowser.git
221
def wait_scroll_pos_changed(self, x=None, y=None): __tracebackhide__ = (lambda e: e.errisinstance(testprocess.WaitForTimeout)) if (x is None and y is not None) or (y is None and x is not None): raise ValueError("Either both x/y or neither must be given!") if x is None and y is None: point = 'Py*.QtCore.QPoint(*, *)' # not counting 0/0 here elif x == '0' and y
15
184
wait_scroll_pos_changed
56
0
1
22
tests/plugins/test_rtve.py
187,882
plugins.rtve: fix ZTNR.translate
streamlink
10
Python
43
test_rtve.py
def test_translate_has_streams(): # real payload with modified end (IEND chunk of size 0), to reduce test size data = \ "iVBORw0KGgoAAAANSUhEUgAAAVQAAAFUCAIAAAD08FPiAAACr3RFWHRXczlVSWdtM2ZPTGY4b2R4" \ "dWo5aHZnRlRhOndvZEtxN3pLOG5oNGRpbT1vREBTWHhOMGtzUVomNndAWkV5cz1GOUlCSiYxdDcy" \ "QmdDOFM2NGFVJmh1Nzk2bUpwOFVJOE1DJlpAY2lzdGcmbEUmRE5DZFV4SHpEOFgvLmppZ1l4b3M1" \ "QU1lOnl3ZS04VlBwQkZvLlFMUWZHTy1vQjNVeHhfVDF1JkRSQTpPP2J4Wm0zbFlxS3IjAEhEX1JF" \ "QURZJSUwNTYwNzI4Mjg4MzUyNjQyMzUxMTA0Mzg0NzI4NzY4NDEyODAzODU0ODMwMDQ3NzcwNDEx" \ "MDAyODE1MzM3NDU3ODAxMDg3MjgxNTg1MzMzNDE3MTYxMTE4NzQ1MTU3MjYxOTUwNzI4NzEyNDgw" \ "MzI4NTM1ODM1ODU3MzQyNzE0NjcyODE2NTgzNDI4NTE0NTg1MzIwMzgxODU3NDY0NzUwODI3OTQ0" \ "ODg3NjEzMTUzNDMxMTUxNzYzNDU1NzE0MDA1MDUzNDIxODE0ODYyNDIzODM2MTczMzQ0NjAwNTIw" \ "NTU2NDYyNDgxODYzNDA2MzA4MTE0ODUxMTQ2Mzg2MzYyMjQ4Mjc3MjIyMjUzNjMxMjI1MjEzMTU0" \ "NjI1NjIyMjM3MTA4NjEwNjI0NTYyNTMxNTA2ODEyMjQ2MzYzNzE0MzY4MDU1MTgxNTQ2NTU3MTMx" \ "NTI0NzU4MTU2NjAxMjY0MjA1MDU2MzcwMDM3NzcwMjA0MTYxMzE3MjQxMTI2NzYzMzUyNjY3NTQ1" \ "NTA1MTUxNTc2NTEzMTUwNjcxNDcyMDI2MTQyMjczNTI4NzExNjA4NTU3NjIzMzMxMzU0NDM1Mzgw" \ "MTI0MTQzMTU1MTMyNzc4ODI1MjcyMjUwMjY4MzYyMDUzMjQzNjA0MTYyMzkhB8fSAAAAAElFTkQAAAAACg==" assert list(ZTNR.translate(data)) == [ ( "HD_READY", "https://rtvehlsvodlote7modo2.rtve.es/mediavodv2/resources/TE_NGVA/mp4/5/3/1656573649835.mp4/video.m3u8" + "?hls_no_audio_only=true&idasset=6638770" ), ]
8bd58c047ca38e9bb639a6948e45b4387fc5d147
53
https://github.com/streamlink/streamlink.git
205
def test_translate_has_streams(): # real payload with modified end (IEND chunk of size 0), to reduce test size data = \ "iVBORw0KGgoAAAANSUhEUgAAAVQAAAFUCAIAAAD08FPiAAACr3RFWHRXczlVSWdtM2ZPTGY4b2R4" \ "dWo5aHZnRlRhOndvZEtxN3pLOG5oNGRpbT1vREBTWHhOMGtzUVomNndAWkV5cz1GOUlCSiYxdDcy" \ "QmdDOFM2NGFVJmh1Nzk2bUpwOFVJOE1DJlpAY2lzdGcmbEUmRE5DZFV4SHpEO
5
90
test_translate_has_streams
83
0
3
18
tests/storage/test_event_federation.py
249,590
Fix overflows in /messages backfill calculation (#13936) * Reproduce bug * Compute `least_function` first * Substitute `least_function` with an f-string * Bugfix: avoid overflow Co-authored-by: Eric Eastwood <erice@element.io>
synapse
12
Python
56
test_event_federation.py
def test_get_insertion_event_backward_extremities_in_room(self): setup_info = self._setup_room_for_insertion_backfill_tests() room_id = setup_info.room_id depth_map = setup_info.depth_map # Try at "insertion_eventB" backfill_points = self.get_success( self.store.get_insertion_event_backward_extremities_in_room( room_id, depth_map["insertion_eventB"], limit=100 ) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertEqual(backfill_event_ids, ["insertion_eventB", "insertion_eventA"]) # Try at "insertion_eventA" backfill_points = self.get_success( self.store.get_insertion_event_backward_extremities_in_room( room_id, depth_map["insertion_eventA"], limit=100 ) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] # Event "2" has a depth of 2 but is not included here because we only # know the approximate depth of 5 from our event "3". self.assertListEqual(backfill_event_ids, ["insertion_eventA"])
e8f30a76caa4394ebb3e77c56df951e3626b3fdd
117
https://github.com/matrix-org/synapse.git
269
def test_get_insertion_event_backward_extremities_in_room(self): setup_info = self._setup_room_for_insertion_backfill_tests() room_id = setup_info.room_id depth_map = setup_info.depth_map # Try at "insertion_eventB" backfill_points = self.get_success( self.store.get_insertion_event_backward_extremities_in_room( room_id, depth_map["insertion_eventB"], limit=100 ) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertEqual(backfill_event_ids, ["insertion_eventB", "insertion_eventA"]) # Try at "insertion_eventA" backfill_points = self.get_success( self.store.get_insertion_event_backward_extremities_in_room( room_id, depth_map["insertion_eventA"], limit=100 ) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] # Event "2" has a depth of 2 but is not included here because we only # know the approximate depth of 5 from our event "3". self.assertListEqual(backfill_event_ids, ["insertion_eventA"])
15
191
test_get_insertion_event_backward_extremities_in_room
44
1
2
15
erpnext/education/doctype/topic/topic.py
65,915
style: format code with black
erpnext
12
Python
41
topic.py
def add_topic_to_courses(topic, courses, mandatory=False): courses = json.loads(courses) for entry in courses: course = frappe.get_doc("Course", entry) course.append("topics", {"topic": topic, "topic_name": topic}) course.flags.ignore_mandatory = True course.save() frappe.db.commit() frappe.msgprint( _("Topic {0} has been added to all the selected courses successfully.").format( frappe.bold(topic) ), title=_("Courses updated"), indicator="green", ) @frappe.whitelist()
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist()
100
https://github.com/frappe/erpnext.git
28
def add_topic_to_courses(topic, courses, mandatory=False): courses = json.loads(courses) for entry in courses: course = frappe.get_doc("Course", entry) course.append("topics", {"topic": topic, "topic_name": topic}) course.flags.ignore_mandatory = True course.save() frappe.db.commit() frappe.msgprint( _("Topic {0} has been added to all the selected courses successfully.").format( frappe.bold(
23
178
add_topic_to_courses
35
0
7
15
lib/matplotlib/widgets.py
107,061
Fix drawing animated artists changed in selector callback
matplotlib
14
Python
26
widgets.py
def update(self): if not self.ax.get_visible() or self.ax.figure._cachedRenderer is None: return False if self.useblit: if self.background is not None: self.canvas.restore_region(self.background) else: self.update_background(None) for artist in self.artists + self._get_animated_artists(): if artist.stale: self.ax.draw_artist(artist) self.canvas.blit(self.ax.bbox) else: self.canvas.draw_idle() return False
da31ed386482845629a8505f81810ddb341514fb
108
https://github.com/matplotlib/matplotlib.git
200
def update(self): if not self.ax.get_visible() or self.ax.figure._cachedRenderer is None: return False if self.useblit: if self.background is not None: self.canvas.restore_region(self.background) else: self.update_background(None) for artist in self.artists + self._get_animated_artists(): if artist.stale:
19
177
update
19
0
2
7
langchain/llms/huggingface_hub.py
191,649
Harrison/llm saving (#331) Co-authored-by: Akash Samant <70665700+asamant21@users.noreply.github.com>
langchain
10
Python
19
huggingface_hub.py
def _identifying_params(self) -> Mapping[str, Any]: _model_kwargs = self.model_kwargs or {} return { **{"repo_id": self.repo_id, "task": self.task}, **{"model_kwargs": _model_kwargs}, }
9bb7195085a843db3f53e4fd9a51c79f3698a19d
46
https://github.com/hwchase17/langchain.git
69
def _identifying_params(self) -> Mapping[str, Any]: _mod
9
77
_identifying_params
18
0
1
3
mkdocs/tests/config/config_options_tests.py
224,790
Refactor config_options_tests to use less implementation details Prevent missed warnings, test more error messages. And minor fixes to error messages themselves.
mkdocs
18
Python
17
config_options_tests.py
def test_invalid_children_config_int(self): with self.expect_error(option="Expected nav to be a list, got a int: 1"): self.get_config(self.Schema, {'option': [{"foo.md": [{"bar.md": 1}]}]})
3fd48806aa3fe6c571e0669afcbc4327f4c032e4
41
https://github.com/mkdocs/mkdocs.git
35
def test_invalid_children_config_int(self): with self.expect_error(option="Expected nav to be a list, got a int: 1"): self.get
6
74
test_invalid_children_config_int
10
1
1
5
octavia-cli/integration_tests/conftest.py
4,516
🐛 octavia-cli: use `list` endpoint instead of `list_latest` (#11505)
airbyte
8
Python
8
conftest.py
def destination_state_path(octavia_test_project_directory): state_path = f"{octavia_test_project_directory}/destinations/postgres/state.yaml" silent_remove(state_path) yield state_path silent_remove(state_path) @pytest.fixture(scope="session")
9f21fae6684e6833e8896d017cd5c859046b61a7
@pytest.fixture(scope="session")
19
https://github.com/airbytehq/airbyte.git
20
def destination_state_path(octavia_test_project_directory): state_path = f"{octavia_test_project_directory}/destinations/postgres/state.yaml" silent_remove(state_path) yield state_path
7
55
destination_state_path
125
0
8
37
homeassistant/components/squeezebox/media_player.py
301,542
Update integrations to pass target player when resolving media (#72597)
core
14
Python
77
media_player.py
async def async_play_media(self, media_type, media_id, **kwargs): cmd = "play" index = None if kwargs.get(ATTR_MEDIA_ENQUEUE): cmd = "add" if media_source.is_media_source_id(media_id): media_type = MEDIA_TYPE_MUSIC play_item = await media_source.async_resolve_media( self.hass, media_id, self.entity_id ) media_id = play_item.url if media_type in MEDIA_TYPE_MUSIC: if not media_id.startswith(SQUEEZEBOX_SOURCE_STRINGS): # do not process special squeezebox "source" media ids media_id = async_process_play_media_url(self.hass, media_id) await self._player.async_load_url(media_id, cmd) return if media_type == MEDIA_TYPE_PLAYLIST: try: # a saved playlist by number payload = { "search_id": int(media_id), "search_type": MEDIA_TYPE_PLAYLIST, } playlist = await generate_playlist(self._player, payload) except ValueError: # a list of urls content = json.loads(media_id) playlist = content["urls"] index = content["index"] else: payload = { "search_id": media_id, "search_type": media_type, } playlist = await generate_playlist(self._player, payload) _LOGGER.debug("Generated playlist: %s", playlist) await self._player.async_load_playlist(playlist, cmd) if index is not None: await self._player.async_index(index)
087c0b59edb4f6233849e2cf6eb9057474251934
214
https://github.com/home-assistant/core.git
593
async def async_play_media(self, media_type, media_id, **kwargs): cmd = "play" index = None if kwargs.get(ATTR_MEDIA_ENQUEUE): cmd = "add" if media_source.is_media_source_id(media_id): media_type = MEDIA_TYPE_MUSIC play_item = await media_source.async_resolve_media( self.hass, media_id, self.entity_id ) media_id = play_item.url if media_type in MEDIA_TYPE_MUSIC: if not media_id.startswith(SQUEEZEBOX_SOURCE_STRINGS): # do not process special squeezebox "source" media ids media_id = async_process_play_media_url(self.hass, media_id) await se
35
357
async_play_media
96
0
7
25
cps/helper.py
172,765
Better epub cover parsing with multiple cover-image items Code cosmetics renamed variables refactored xml page generation refactored prepare author
calibre-web
22
Python
52
helper.py
def get_sorted_author(value): value2 = None try: if ',' not in value: regexes = [r"^(JR|SR)\.?$", r"^I{1,3}\.?$", r"^IV\.?$"] combined = "(" + ")|(".join(regexes) + ")" value = value.split(" ") if re.match(combined, value[-1].upper()): if len(value) > 1: value2 = value[-2] + ", " + " ".join(value[:-2]) + " " + value[-1] else: value2 = value[0] elif len(value) == 1: value2 = value[0] else: value2 = value[-1] + ", " + " ".join(value[:-1]) else: value2 = value except Exception as ex: log.error("Sorting author %s failed: %s", value, ex) if isinstance(list, value2): value2 = value[0] else: value2 = value return value2
4545f4a20d9ff90b99bbd4e3e34b6de4441d6367
189
https://github.com/janeczku/calibre-web.git
339
def get_sorted_author(value): value2 = None try: if ',' not in value: regexes = [r"^(JR|SR)\.?$", r"^I{1,3}\.?$", r"^IV\.?$"] combined = "(" + ")|(".join(regexes) + ")" value = value.split(" ") if re.match(combined, value[-1].upper()): if len(value) > 1: value2 = value[-2] + ", " + " ".join(value[:-2]) + " " + value[-1] else: value2 = value[0] elif len(value) == 1: value2 = value[0] else: value2 = value[-1] + ", " + " ".join(value[:-1]) else: value2 = value except Exception as ex: log.error("Sorting author %s failed: %s", value, ex) if isins
17
322
get_sorted_author
49
0
1
14
modules/image/semantic_segmentation/lseg/module.py
51,162
Add LSeg Module (#2038) * add LSeg * add LSeg README * add requirements.txt * update README * update module * update * update * update * update * pre-commit * update * save jpg -> save png * bgr -> bgra * fix typo * pre-commit
PaddleHub
12
Python
45
module.py
def run_cmd(self, argvs): self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name), prog='hub run {}'.format(self.name), usage='%(prog)s', add_help=True) self.parser.add_argument('--input_path', type=str, help="path to image.") self.parser.add_argument('--labels', type=str, nargs='+', help="segmentation labels.") self.parser.add_argument('--output_dir', type=str, default='lseg_output', help="The directory to save output images.") args = self.parser.parse_args(argvs) self.segment(image=args.input_path, labels=args.labels, visualization=True, output_dir=args.output_dir) return 'segmentation results are saved in %s' % args.output_dir
98d598b7fe14ddca68f8107a66a1f8a3e4ce2bd8
144
https://github.com/PaddlePaddle/PaddleHub.git
336
def run_cmd(self, argvs): self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name), prog='hub run {}'.format(self.name), usage='%(prog)s', add_help=True) self.parser.add_argument('--input_path', type=str, help="path to image.") self.parser.a
26
236
run_cmd
10
0
1
11
code/deep/BJMMD/caffe/python/caffe/test/test_python_layer.py
60,351
Balanced joint maximum mean discrepancy for deep transfer learning
transferlearning
12
Python
10
test_python_layer.py
def python_net_file(): with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: f.write() return f.name
cc4d0564756ca067516f71718a3d135996525909
30
https://github.com/jindongwang/transferlearning.git
26
def python_net_file(): with tempfile.NamedTemporaryFile(mode='w+', delet
8
55
python_net_file
110
0
5
19
keras/engine/training.py
268,840
When interactive logging is disabled (using absl logging) and verbose="auto", we will use verbose=2, which does not print the progress bar. PiperOrigin-RevId: 420939364
keras
17
Python
70
training.py
def _detect_save_format(filepath): filepath = io_utils.path_to_string(filepath) if saving_utils.is_hdf5_filepath(filepath): return filepath, 'h5' # Filepath could be a TensorFlow checkpoint file prefix or SavedModel # directory. It's possible for filepath to be both a prefix and directory. # Prioritize checkpoint over SavedModel. if _is_readable_tf_checkpoint(filepath): save_format = 'tf' elif tf.saved_model.contains_saved_model(filepath): ckpt_path = os.path.join(filepath, tf.saved_model.VARIABLES_DIRECTORY, tf.saved_model.VARIABLES_FILENAME) if _is_readable_tf_checkpoint(ckpt_path): filepath = ckpt_path save_format = 'tf' else: raise ValueError('Unable to load weights. filepath {} appears to be a ' 'SavedModel directory, but checkpoint either doesn\'t ' 'exist, or is incorrectly formatted.'.format(filepath)) else: # Not a TensorFlow checkpoint. This filepath is likely an H5 file that # doesn't have the hdf5/keras extensions. save_format = 'h5' return filepath, save_format
ba6fddb32d20ccd0759c90d19978b02da6568fe7
102
https://github.com/keras-team/keras.git
231
def _detect_save_format(filepath): filepath = io_utils.path_to_string(filepath) if saving_utils.is_hdf5_filepath(filepath): return filepath, 'h5' # Filepath could be a TensorFlow checkpoint file prefix or SavedModel # directory. It's possible for filepath to be both a prefix and directory. # Prioritize checkpoint over SavedModel. if _is_readable_tf_checkpoint(filepath): save_format = 'tf' elif tf.saved_model.contains_saved_model(filepath): ckpt_path = os.path.join(filepath, tf.saved_model.VARIABLES_DIRECTORY, tf.saved_model.VARIABLES_FILENAME) if _is_readable_tf_checkpoint(ckpt_
19
186
_detect_save_format
9
0
1
3
modin/core/execution/dispatching/factories/test/test_dispatcher.py
155,108
FEAT-#5230: Support external query compiler and IO (#5231) Signed-off-by: Devin Petersohn <devin.petersohn@gmail.com> Co-authored-by: Vasily Litvinov <fam1ly.n4me@yandex.ru>
modin
10
Python
9
test_dispatcher.py
def test_set_execution(): with _switch_execution("Bar", "Foo"): assert FactoryDispatcher.get_factory() == FooOnBarFactory
fd776a5a4faf7695d461f369bb2470dcb8aa2745
20
https://github.com/modin-project/modin.git
18
def test_set_execution(): with _switch_execution("Bar", "Foo"): assert FactoryDispatcher.get_factory() == FooOnBarFactory
5
39
test_set_execution
54
0
4
18
saleor/graphql/meta/mutations.py
26,291
Catch update_fields did not affect any rows errors and return response with message (#9225) * Add select_for_update() to querysets in checkout and meta mutations * Remove redundant select_for_update * Remove reduntant file after merging conflicts * Catch update_fields did not affect any rows errors and return response with message * Review changes * Update products.py
saleor
17
Python
45
mutations.py
def get_instance(cls, info, **data): object_id = data.get("id") qs = data.get("qs", None) try: type_name, _ = from_global_id_or_error(object_id) # ShippingMethodType represents the ShippingMethod model if type_name == "ShippingMethodType": qs = shipping_models.ShippingMethod.objects return cls.get_node_or_error(info, object_id, qs=qs) except GraphQLError as e: if instance := cls.get_instance_by_token(object_id, qs): return instance raise ValidationError( { "id": ValidationError( str(e), code=MetadataErrorCode.GRAPHQL_ERROR.value ) } )
e85f83b4c7ef25e18509577dbc808893a3484f61
105
https://github.com/saleor/saleor.git
279
def get_instance(cls, info, **data): object_id = data.get("id") qs = data.get("qs", None) try: type_name, _ = from_global_id_or_error(object_id) # ShippingMethodType represents the ShippingMethod model if type_name == "ShippingMethodType": qs = shipping_models.ShippingMethod.objects return cls.get_node_or_error(info, object_id, qs=qs) except GraphQLError as e: if instance := cls.get_instance_by_token(object_id, qs): return instance raise ValidationError( { "id": ValidationError( str(e), code=MetadataErrorCode.GRAPHQL_ERROR.value ) } )
24
171
get_instance
15
0
1
9
src/documents/tests/test_tasks.py
319,271
addes tests: - barcode-39 - barcode-128 - qr barcodes - test for consumption Signed-off-by: Florian Brandes <florian.brandes@posteo.de>
paperless-ngx
11
Python
14
test_tasks.py
def test_barcode_reader_unreadable(self): test_file = os.path.join( os.path.dirname(__file__), "samples", "barcodes", "barcode-39-PATCHT-unreadable.png", ) img = Image.open(test_file) self.assertEqual(tasks.barcode_reader(img), [])
10ca515ac527b746d6a948d6aebca5d253923b64
51
https://github.com/paperless-ngx/paperless-ngx.git
86
def test_barcode_reader_unreadable(self): test_file = os.path.join( os.path.dirname(__file__), "samples", "barcodes", "barcode-39-PATCHT-unreadable.png", ) img = Image.open(test_file)
14
84
test_barcode_reader_unreadable
14
0
1
4
homeassistant/components/mqtt/device_tracker/schema_discovery.py
289,957
Improve MQTT type hints part 8 (#81034) * Improve typing device_tracker discovery * Improve typing device_tracker yaml * Add test source_type attribute * Follow up comment * Initialize at `__init__` not at class level. * Use full name for return variable * Correct import, remove assert * Use AsyncSeeCallback
core
8
Python
12
schema_discovery.py
def source_type(self) -> SourceType | str: source_type: SourceType | str = self._config[CONF_SOURCE_TYPE] return source_type
bcae6d604e2967c7475f0caa4b1b5e4e76ab88bf
24
https://github.com/home-assistant/core.git
35
def source_type(self) -> SourceType | str: sour
6
40
source_type
16
0
1
6
tests/sentry/api/test_client_state.py
97,814
Client State endpoint (#33135) * Client State endpoint * fixes * use configured cluster key * Add tests * make endpoint private * updates * fix tests
sentry
12
Python
16
test_client_state.py
def test_large_payload(self): resp = self.client.put( self.path, {"test": 300 * "Dummy Data"}, ) assert resp.status_code == 413
cef95dd7be74a43fb986e7bb3cd638481de0fc2f
32
https://github.com/getsentry/sentry.git
58
def test_large_payload(self): resp = self.client.put(
7
53
test_large_payload
28
0
1
5
wagtail/admin/tests/tests.py
72,314
Reformat with black
wagtail
11
Python
25
tests.py
def test_not_logged_in_redirect(self): response = self.client.get("/admin/sdfgdsfgdsfgsdf/") # Check that the user was redirected to the login page and that next was set correctly self.assertRedirects( response, reverse("wagtailadmin_login") + "?next=/admin/sdfgdsfgdsfgsdf/" )
d10f15e55806c6944827d801cd9c2d53f5da4186
28
https://github.com/wagtail/wagtail.git
66
def test_not_logged_in_redirect(self): response = self.
7
52
test_not_logged_in_redirect
10
0
2
5
keras/optimizers/optimizer_v2/utils.py
275,622
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
9
Python
10
utils.py
def make_global_gradient_clipnorm_fn(clipnorm): if clipnorm is None: return lambda grads_and_vars: grads_and_vars
84afc5193d38057e2e2badf9c889ea87d80d8fbf
20
https://github.com/keras-team/keras.git
23
def make_global_gradient_clipnorm_fn(clipnorm): if clipnorm is None: retur
3
29
make_global_gradient_clipnorm_fn
43
0
1
8
pandas/tests/frame/methods/test_reindex.py
168,916
BUG: reindex using wrong fill value when indexing cols and index for uint dtypes (#48185)
pandas
12
Python
32
test_reindex.py
def test_reindex_uint_dtypes_fill_value(self, any_unsigned_int_numpy_dtype): # GH#48184 df = DataFrame({"a": [1, 2], "b": [1, 2]}, dtype=any_unsigned_int_numpy_dtype) result = df.reindex(columns=list("abcd"), index=[0, 1, 2, 3], fill_value=10) expected = DataFrame( {"a": [1, 2, 10, 10], "b": [1, 2, 10, 10], "c": 10, "d": 10}, dtype=any_unsigned_int_numpy_dtype, ) tm.assert_frame_equal(result, expected)
d0268e719f899789f9606beb4592a17d27086b4c
113
https://github.com/pandas-dev/pandas.git
106
def test_reindex_uint_dtypes_fill_value(self, any_unsigned_int_numpy_dtype): # GH#48184 df = DataFrame({"a": [1, 2], "b":
15
169
test_reindex_uint_dtypes_fill_value
28
0
1
13
keras/layers/rnn/time_distributed.py
274,134
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
10
Python
19
time_distributed.py
def compute_output_shape(self, input_shape): input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) child_input_shape = tf.nest.map_structure( self._remove_timesteps, input_shape ) child_output_shape = self.layer.compute_output_shape(child_input_shape) child_output_shape = tf_utils.convert_shapes( child_output_shape, to_tuples=False ) timesteps = tf_utils.convert_shapes(input_shape) timesteps = tf.nest.flatten(timesteps)[1]
84afc5193d38057e2e2badf9c889ea87d80d8fbf
89
https://github.com/keras-team/keras.git
105
def compute_output_shape(self, input_shape): input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) child_input_shape = tf.nest.map_structure( self._remove_timesteps, input_shape )
15
117
compute_output_shape
17
0
2
6
sklearn/tests/test_random_projection.py
260,446
MAINT Use `_validate_params` in RandomProjection family (#23831) Co-authored-by: Meekail Zain <34613774+Micky774@users.noreply.github.com>
scikit-learn
14
Python
16
test_random_projection.py
def test_random_projection_transformer_invalid_input(): n_components = "auto" fit_data = [[0, 1, 2]] for RandomProjection in all_RandomProjection: with pytest.raises(ValueError): RandomProjection(n_components=n_components).fit(fit_data)
dcd0d4f054b4586f617d35885df05eaae4708876
42
https://github.com/scikit-learn/scikit-learn.git
43
def test_random_projection_transformer_invalid_input(): n_components = "auto" fit_data = [[0, 1, 2]] for RandomProjection in all_RandomProjection: with pytest.raises(ValueError): RandomProjection(n
9
70
test_random_projection_transformer_invalid_input
17
0
2
6
homeassistant/components/demo/vacuum.py
295,394
Add EntityFeature enum to Vacuum (#69121)
core
8
Python
16
vacuum.py
def turn_off(self, **kwargs): if self.supported_features & VacuumEntityFeature.TURN_OFF == 0: return self._state = False self._status = "Charging" self.schedule_update_ha_state()
2d37066ce59064ed6121720e03424e4dc73c2b43
36
https://github.com/home-assistant/core.git
63
def turn_off(self, **kwargs):
9
63
turn_off
95
0
1
16
sklearn/tests/test_pipeline.py
261,300
MAINT Clean deprecation for 1.2: normalize in linear models (#24391)
scikit-learn
12
Python
58
test_pipeline.py
def test_pipeline_raise_set_params_error(): # Test pipeline raises set params error message for nested models. pipe = Pipeline([("cls", LinearRegression())]) # expected error message error_msg = re.escape( "Invalid parameter 'fake' for estimator Pipeline(steps=[('cls'," " LinearRegression())]). Valid parameters are: ['memory', 'steps', 'verbose']." ) with pytest.raises(ValueError, match=error_msg): pipe.set_params(fake="nope") # invalid outer parameter name for compound parameter: the expected error message # is the same as above. with pytest.raises(ValueError, match=error_msg): pipe.set_params(fake__estimator="nope") # expected error message for invalid inner parameter error_msg = re.escape( "Invalid parameter 'invalid_param' for estimator LinearRegression(). Valid" " parameters are: ['copy_X', 'fit_intercept', 'n_jobs', 'positive']." ) with pytest.raises(ValueError, match=error_msg): pipe.set_params(cls__invalid_param="nope")
e41753ebd57c44ae91b389f190c43ddc0b384a75
96
https://github.com/scikit-learn/scikit-learn.git
182
def test_pipeline_raise_set_params_error(): # Test pipeline raises set params error message for nested models. pipe = Pipeline([("cls", LinearRegression())]) # expected error message error_msg = re.escape( "Invalid parameter 'fake' for estimator Pipeline(steps=[('cls'," " LinearRegression())]). Valid parameters are: ['memory', 'steps', 'verbose']." ) with pytest.raises(ValueError, match=error_msg): pipe.set_params(fake="nope") # invalid outer parameter name for compound parameter: the expected error message # is the same as above. with pytest.raises(ValueError, match=error_msg): pipe.set_params(fake__estimator="nope") # expected error message for invalid inner parameter error_msg = re.escape( "Invalid parameter 'invalid_param' for estimator LinearRegression(). Valid" " parameters
15
180
test_pipeline_raise_set_params_error
93
0
1
34
tests/rpc/test_rpc_webhook.py
149,435
webhookbuy -> webhookentry
freqtrade
12
Python
67
test_rpc_webhook.py
def test_exception_send_msg(default_conf, mocker, caplog): default_conf["webhook"] = get_webhook_dict() del default_conf["webhook"]["webhookentry"] webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf) webhook.send_msg({'type': RPCMessageType.ENTRY}) assert log_has(f"Message type '{RPCMessageType.ENTRY}' not configured for webhooks", caplog) default_conf["webhook"] = get_webhook_dict() default_conf["webhook"]["webhookentry"]["value1"] = "{DEADBEEF:8f}" msg_mock = MagicMock() mocker.patch("freqtrade.rpc.webhook.Webhook._send_msg", msg_mock) webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf) msg = { 'type': RPCMessageType.ENTRY, 'exchange': 'Binance', 'pair': 'ETH/BTC', 'limit': 0.005, 'order_type': 'limit', 'stake_amount': 0.8, 'stake_amount_fiat': 500, 'stake_currency': 'BTC', 'fiat_currency': 'EUR' } webhook.send_msg(msg) assert log_has("Problem calling Webhook. Please check your webhook configuration. " "Exception: 'DEADBEEF'", caplog) msg_mock = MagicMock() mocker.patch("freqtrade.rpc.webhook.Webhook._send_msg", msg_mock) msg = { 'type': 'DEADBEEF', 'status': 'whatever' } with pytest.raises(NotImplementedError): webhook.send_msg(msg)
7d3116f9fbe446a31837f483aa9bef550d7a1d3d
207
https://github.com/freqtrade/freqtrade.git
269
def test_exception_send_msg(default_conf, mocker, caplog): default_conf["webhook"] = get_webhook_dict() del default_conf["webhook"]["webhookentry"] webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf) webhook.send_msg({'type': RPCMessageType.ENTRY}) assert log_has(f"Message type '{RPCMessageType.ENTRY}' not configured for webhooks", caplog) default_conf["webhook"] = get_webhook_dict() default_conf["webhook"]["webhookentry"]["value1"] = "{DEADBEEF:8f}" msg_mock = MagicMock() mocker.patch("freqtrade.rpc.webhook.Webhook._send_msg", msg_mock) webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf) msg = { 'type': RPCMessageType.ENTRY, 'exchange': 'Binance', 'pair': 'ETH/BTC', 'limit': 0
20
374
test_exception_send_msg
55
0
2
12
keras/optimizers/optimizer_experimental/optimizer.py
275,268
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
11
Python
44
optimizer.py
def learning_rate(self, learning_rate): if isinstance( self._learning_rate, learning_rate_schedule.LearningRateSchedule ): raise TypeError( "This optimizer was created with a `LearningRateSchedule`" " object as its `learning_rate` constructor argument, " "hence its learning rate is not settable. If you need the" " learning rate to be settable, you should instantiate " "the optimizer with a float `learning_rate` argument." ) self._learning_rate.assign(learning_rate)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
36
https://github.com/keras-team/keras.git
183
def learning_rate(self, learning_rate): if isinstance( self._learning_rate, learning_rate_schedule.LearningRateSchedule ): raise TypeError( "This optimizer was created with a `LearningRateSchedule`" " object as its `learning_rate` constructor argument, " "hence its learning rate is not settable. If you need
8
65
learning_rate
15
0
1
5
keras/tests/model_architectures.py
276,476
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
8
Python
11
model_architectures.py
def call(self, inputs, **kwargs): x = self.dense1(inputs) x = self.dp(x) x = self.bn(x) return self.dense2(x)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
41
https://github.com/keras-team/keras.git
42
def call(self, inputs, **kwargs): x = self.dense1(inputs)
9
65
call
31
0
1
8
dashboard/modules/job/tests/test_job_manager.py
145,243
[jobs] Rename JobData -> JobInfo (#22499) `JobData` could be confused with the actual output data of a job, `JobInfo` makes it more clear that this is status information + metadata.
ray
12
Python
28
test_job_manager.py
async def test_failed_runtime_env_validation(self, job_manager): run_cmd = f"python {_driver_script_path('override_env_var.py')}" job_id = job_manager.submit_job( entrypoint=run_cmd, runtime_env={"working_dir": "path_not_exist"} ) data = job_manager.get_job_info(job_id) assert data.status == JobStatus.FAILED assert "path_not_exist is not a valid URI" in data.message
58e5f0140d247059ca45b249446614929930c126
52
https://github.com/ray-project/ray.git
91
async def test_failed_runtime_env_validation(self, job_manager): run_cmd = f"python {_driver_script_path('override_env_var.py')}" job_id = job_manager.submit
15
101
test_failed_runtime_env_validation
215
0
7
70
jaxlib/lapack.py
120,615
feat: refactor code using pyupgrade This PR upgrades legacy Python code to 3.7+ code using pyupgrade: ```sh pyupgrade --py37-plus --keep-runtime-typing **.py ``` a
jax
16
Python
105
lapack.py
def syevd_mhlo(dtype, a, lower=False): a_type = ir.RankedTensorType(a.type) dims = a_type.shape assert len(dims) >= 2 m, n = dims[-2:] assert m == n batch_dims = tuple(dims[:-2]) num_bd = len(batch_dims) b = 1 for d in batch_dims: b *= d layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1)) i32_type = ir.IntegerType.get_signless(32) if dtype == np.float32: fn = b"lapack_ssyevd" eigvals_type = ir.F32Type.get() workspace = [ ir.RankedTensorType.get([_lapack.syevd_work_size(n)], a_type.element_type), ir.RankedTensorType.get([_lapack.syevd_iwork_size(n)], i32_type), ] workspace_layouts = [[0], [0]] elif dtype == np.float64: fn = b"lapack_dsyevd" eigvals_type = ir.F64Type.get() workspace = [ ir.RankedTensorType.get([_lapack.syevd_work_size(n)], a_type.element_type), ir.RankedTensorType.get([_lapack.syevd_iwork_size(n)], i32_type), ] workspace_layouts = [[0], [0]] elif dtype == np.complex64: fn = b"lapack_cheevd" eigvals_type = ir.F32Type.get() workspace = [ ir.RankedTensorType.get([_lapack.heevd_work_size(n)], a_type.element_type), ir.RankedTensorType.get([_lapack.heevd_rwork_size(n)], eigvals_type), ir.RankedTensorType.get([_lapack.syevd_iwork_size(n)], i32_type), ] workspace_layouts = [[0], [0], [0]] elif dtype == np.complex128: fn = b"lapack_zheevd" eigvals_type = ir.F64Type.get() workspace = [ ir.RankedTensorType.get([_lapack.heevd_work_size(n)], a_type.element_type), ir.RankedTensorType.get([_lapack.heevd_rwork_size(n)], eigvals_type), ir.RankedTensorType.get([_lapack.syevd_iwork_size(n)], i32_type), ] workspace_layouts = [[0], [0], [0]] else: raise NotImplementedError(f"Unsupported dtype {dtype}") scalar_layout = [] layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1)) out = custom_call( fn, [ a.type, ir.RankedTensorType.get(batch_dims + (n,), eigvals_type), ir.RankedTensorType.get(batch_dims, i32_type), ] + workspace, [_mhlo_s32(1 if lower else 0), _mhlo_s32(b), _mhlo_s32(n), a], operand_layouts=[scalar_layout] * 3 + [layout], result_layouts=[ layout, tuple(range(num_bd, -1, -1)), tuple(range(num_bd - 1, -1, -1)), ] + workspace_layouts) return out[:3] # # geev: Nonsymmetric eigendecomposition
17de89b16ac5ee05aee03115d858e67489eab973
606
https://github.com/google/jax.git
582
def syevd_mhlo(dtype, a, lower=False): a_type = ir.RankedTensorType(a.type) dims = a_type.shape assert len(dims) >= 2 m, n = dims[-2:] assert m == n batch_dims = tuple(dims[:-2]) num_bd = len(batch_dims) b = 1 for d in batch_dims: b *= d layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1)) i32_type = ir.IntegerType.get_signless(32) if dtype == np.float32: fn = b"lapack_ssyevd" eigvals_type = ir.F32Type.get() workspace = [ ir.RankedTensorType.get([_lapack.syevd_work_size(n)],
48
911
syevd_mhlo
89
0
1
29
zerver/tests/test_message_edit.py
83,151
python: Replace string concatenations with f-strings.
zulip
13
Python
66
test_message_edit.py
def test_move_message_to_stream_and_topic(self) -> None: (user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics( "iago", "test move stream", "new stream", "test" ) with queries_captured() as queries, cache_tries_captured() as cache_tries: result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "stream_id": new_stream.id, "propagate_mode": "change_all", "topic": "new topic", }, ) self.assert_length(queries, 52) self.assert_length(cache_tries, 13) messages = get_topic_messages(user_profile, old_stream, "test") self.assert_length(messages, 1) self.assertEqual( messages[0].content, f"This topic was moved by @_**Iago|{user_profile.id}** to #**new stream>new topic**", ) messages = get_topic_messages(user_profile, new_stream, "new topic") self.assert_length(messages, 4) self.assertEqual( messages[3].content, f"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**", ) self.assert_json_success(result)
d560d124a304a2f6dd467200aab7f070a78bf155
163
https://github.com/zulip/zulip.git
384
def test_move_message_to_stream_and_topic(self) -> None: (user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics( "iago", "
21
283
test_move_message_to_stream_and_topic
61
0
3
17
homeassistant/components/matter/entity.py
291,868
Add matter integration BETA (#83064) * Add matter base (#79372) Co-authored-by: Marcel van der Veldt <m.vanderveldt@outlook.com> * Add matter server add-on flow (#82698) * Add matter server add-on flow * Fix stale error argument * Clean docstrings * Use localhost as default address * Add matter websocket api foundation (#82848) * Add matter config entry add-on management (#82865) * Use matter refactored server/client library (#83003) Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Bump python-matter-server to 1.0.6 (#83059) * Extend matter websocket api (#82948) * Extend matter websocket api * Finish docstring * Fix pin type * Adjust api after new client * Adjust api to frontend for now Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
core
16
Python
53
entity.py
async def async_added_to_hass(self) -> None: await super().async_added_to_hass() # Subscribe to attribute updates. for attr_cls in self.entity_description.subscribe_attributes: if matter_attr := self.get_matter_attribute(attr_cls): self._attributes_map[attr_cls] = matter_attr.path self._unsubscribes.append( self.matter_client.subscribe( self._on_matter_event, EventType.ATTRIBUTE_UPDATED, self._device_type_instance.node.node_id, matter_attr.path, ) ) continue # not sure if this can happen, but just in case log it. LOGGER.warning("Attribute not found on device: %s", attr_cls) # make sure to update the attributes once self._update_from_device()
e2308fd15cec4dfdd25d843b72cd3071657fd5b8
93
https://github.com/home-assistant/core.git
326
async def async_added_to_hass(self) -> None: await super().async_added_to_hass() # Subscribe to attribute updates. for attr_cls in self.entity_description.subscribe_attributes: if matter_attr := self.get_matter_attribute(attr_cls): self._attributes_map[attr_cls] = matter_attr.path self._unsubscribes.append( self.matter_client.subscribe( self._on_matter_event, EventType.ATTRIBUT
23
153
async_added_to_hass
31
0
4
7
python3.10.4/Lib/_pydecimal.py
219,683
add python 3.10.4 for windows
XX-Net
13
Python
27
_pydecimal.py
def _sqrt_nearest(n, a): if n <= 0 or a <= 0: raise ValueError("Both arguments to _sqrt_nearest should be positive.") b=0 while a != b: b, a = a, a--n//a>>1 return a
8198943edd73a363c266633e1aa5b2a9e9c9f526
42
https://github.com/XX-net/XX-Net.git
60
def _sqrt_nearest(n, a): i
5
77
_sqrt_nearest
14
0
2
6
apps/applications/serializers/application.py
188,091
perf: 账号备份优化 (#7503) * perf: 账号备份优化 * feat: 优化账号备份获取有序备份字段列表 Co-authored-by: feng626 <1304903146@qq.com> Co-authored-by: Michael Bai <baijiangjie@gmail.com>
jumpserver
9
Python
11
application.py
def app(self): if isinstance(self.instance, models.Application): instance = self.instance else: instance = None return instance
def9bedd3093e88f56618b068f3db53a042e6c1e
29
https://github.com/jumpserver/jumpserver.git
56
def app(self): if isinstance(self.instance, models.Application): instance = self.instance else: instance = None return instance
6
47
app
102
0
8
25
pandas/core/indexes/base.py
163,518
DEPR: DatetimeIndex.intersection with mixed timezones cast to UTC, not object (#45357) * DEPR: DatetimeIndex.intersection with mixed timezones cast to UTC instead of object * GH ref * mypy fixup Co-authored-by: Jeff Reback <jeff@reback.net>
pandas
15
Python
76
base.py
def union(self, other, sort=None): self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): if ( isinstance(self, ABCMultiIndex) and not is_object_dtype(unpack_nested_dtype(other)) and len(other) > 0 ): raise NotImplementedError( "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) self._deprecate_dti_setop(other, "union") dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) elif not len(other) or self.equals(other): # NB: whether this (and the `if not len(self)` check below) come before # or after the is_dtype_equal check above affects the returned dtype return self._get_reconciled_name_object(other) elif not len(self): return other._get_reconciled_name_object(self) result = self._union(other, sort=sort) return self._wrap_setop_result(other, result)
4e034ec0006b6c05160ce67ea1420ce28f295c91
186
https://github.com/pandas-dev/pandas.git
400
def union(self, other, sort=None): self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): if ( isinstance(self, ABCMultiIndex) and not is_object_dtype(unpack_nested_dtype(other)) and len(other) > 0 ): raise NotImplementedError( "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) self._deprecate_dti_setop(other, "union") dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) elif not len(other) or self.equals(other): # NB: whether this (and the `if not len(self)` check below) come before # or after the is_dtype_equal check above affects the returned dtype return self._get_reconciled_name_object(other) elif not len(self): return other._get_reconciled_name_object(self) result = self._union(other, sort=sort) return self._wrap_
27
300
union
24
0
2
9
erpnext/manufacturing/doctype/workstation_type/test_workstation_type.py
69,547
test: test case to check workstation type
erpnext
12
Python
20
test_workstation_type.py
def create_workstation_type(**args): args = frappe._dict(args) if workstation_type := frappe.db.exists("Workstation Type", args.workstation_type): return frappe.get_doc("Workstation Type", workstation_type) else: doc = frappe.new_doc("Workstation Type") doc.update(args) doc.insert() return doc
7bd06e6fbc30ebd92b18055983e3b88fa9545e2a
62
https://github.com/frappe/erpnext.git
15
def create_workstation_type(**args): args = frappe._dict(args) if workstation_type := frappe.db.exists("Workstation Type", args.workstation_type): return frappe.get_doc("Workstation Type", workstation_type) else: doc = frappe.new_doc("Workstation Type") doc.update(args) doc.insert() retur
12
106
create_workstation_type
18
0
1
6
cps/editbooks.py
172,728
Better epub cover parsing with multiple cover-image items Code cosmetics renamed variables refactored xml page generation refactored prepare author
calibre-web
17
Python
17
editbooks.py
def edit_single_cc_data(book_id, book, column_id, to_save): cc = (calibre_db.session.query(db.CustomColumns) .filter(db.CustomColumns.datatype.notin_(db.cc_exceptions)) .filter(db.CustomColumns.id == column_id) .all()) return edit_cc_data(book_id, book, to_save, cc)
4545f4a20d9ff90b99bbd4e3e34b6de4441d6367
67
https://github.com/janeczku/calibre-web.git
50
def edit_single_cc_data(book_id, book, column_id, to_save): cc = (calibre_db.session.query(db.CustomColumns) .filter(db.CustomColumns
18
100
edit_single_cc_data
30
0
2
7
modules/image/text_to_image/stable_diffusion/diffusers/schedulers/scheduling_karras_ve.py
50,763
Add stable diffusion module
PaddleHub
14
Python
25
scheduling_karras_ve.py
def set_timesteps(self, num_inference_steps): self.num_inference_steps = num_inference_steps self.timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() self.schedule = [(self.sigma_max * (self.sigma_min**2 / self.sigma_max**2)**(i / (num_inference_steps - 1))) for i in self.timesteps] self.schedule = np.array(self.schedule, dtype=np.float32) self.set_format(tensor_format=self.tensor_format)
a6790a651a12eb391060e533868bf0ba197f6f7e
104
https://github.com/PaddlePaddle/PaddleHub.git
88
def set_timesteps(self, num_inference_steps): self.num_inference_steps = num_inference_steps self.timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() self.schedule = [(self.sigma_max * (self.sigma_min**2 / self.sigma_max**2)**(i / (num_inference_steps - 1))) for i in self.timest
16
160
set_timesteps
19
0
1
10
tests/maskformer/test_feature_extraction_maskformer.py
35,842
Maskformer (#15682) * maskformer * conflicts * conflicts * minor fixes * feature extractor test fix refactor MaskFormerLoss following conversation MaskFormer related types should not trigger a module time import error missed one removed all the types that are not used update config mapping minor updates in the doc resolved conversation that doesn't need a discussion minor changes resolved conversations fixed DetrDecoder * minor changes minor changes fixed mdx file test feature_extractor return types functional losses -> classes removed the return type test for the feature extractor minor changes + style + quality * conflicts? * rebase master * readme * added missing files * deleded poolformers test that where in the wrong palce * CI * minor changes * Apply suggestions from code review Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * resolved conversations * minor changes * conversations [Unispeech] Fix slow tests (#15818) * remove soundfile old way of loading audio * Adapt slow test [Barthez Tokenizer] Fix saving (#15815) [TFXLNet] Correct tf xlnet generate (#15822) * [TFXLNet] Correct tf xlnet * adapt test comment Fix the push run (#15807) Fix semantic segmentation pipeline test (#15826) Fix dummy_inputs() to dummy_inputs in symbolic_trace doc (#15776) Add model specific output classes to PoolFormer model docs (#15746) * Added model specific output classes to poolformer docs * Fixed Segformer typo in Poolformer docs Adding the option to return_timestamps on pure CTC ASR models. (#15792) * Adding the option to return_timestamps on pure CTC ASR models. * Remove `math.prod` which was introduced in Python 3.8 * int are not floats. * Reworking the PR to support "char" vs "word" output. * Fixup! * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Quality. Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> HFTracer.trace should use/return self.graph to be compatible with torch.fx.Tracer (#15824) Fix tf.concatenate + test past_key_values for TF models (#15774) * fix wrong method name tf.concatenate * add tests related to causal LM / decoder * make style and quality * clean-up * Fix TFBertModel's extended_attention_mask when past_key_values is provided * Fix tests * fix copies * More tf.int8 -> tf.int32 in TF test template * clean-up * Update TF test template * revert the previous commit + update the TF test template * Fix TF template extended_attention_mask when past_key_values is provided * Fix some styles manually * clean-up * Fix ValueError: too many values to unpack in the test * Fix more: too many values to unpack in the test * Add a comment for extended_attention_mask when there is past_key_values * Fix TFElectra extended_attention_mask when past_key_values is provided * Add tests to other TF models * Fix for TF Electra test: add prepare_config_and_inputs_for_decoder * Fix not passing training arg to lm_head in TFRobertaForCausalLM * Fix tests (with past) for TF Roberta * add testing for pask_key_values for TFElectra model Co-authored-by: ydshieh <ydshieh@users.noreply.github.com> [examples/summarization and translation] fix readme (#15833) Add ONNX Runtime quantization for text classification notebook (#15817) Re-enable doctests for the quicktour (#15828) * Re-enable doctests for the quicktour * Re-enable doctests for task_summary (#15830) * Remove & Framework split model report (#15825) Add TFConvNextModel (#15750) * feat: initial implementation of convnext in tensorflow. * fix: sample code for the classification model. * chore: added checked for from the classification model. * chore: set bias initializer in the classification head. * chore: updated license terms. * chore: removed ununsed imports * feat: enabled argument during using drop_path. * chore: replaced tf.identity with layers.Activation(linear). * chore: edited default checkpoint. * fix: minor bugs in the initializations. * partial-fix: tf model errors for loading pretrained pt weights. * partial-fix: call method updated * partial-fix: cross loading of weights (4x3 variables to be matched) * chore: removed unneeded comment. * removed playground.py * rebasing * rebasing and removing playground.py. * fix: renaming TFConvNextStage conv and layer norm layers * chore: added initializers and other minor additions. * chore: added initializers and other minor additions. * add: tests for convnext. * fix: integration tester class. * fix: issues mentioned in pr feedback (round 1). * fix: how output_hidden_states arg is propoagated inside the network. * feat: handling of arg for pure cnn models. * chore: added a note on equal contribution in model docs. * rebasing * rebasing and removing playground.py. * feat: encapsulation for the convnext trunk. * Fix variable naming; Test-related corrections; Run make fixup * chore: added Joao as a contributor to convnext. * rebasing * rebasing and removing playground.py. * rebasing * rebasing and removing playground.py. * chore: corrected copyright year and added comment on NHWC. * chore: fixed the black version and ran formatting. * chore: ran make style. * chore: removed from_pt argument from test, ran make style. * rebasing * rebasing and removing playground.py. * rebasing * rebasing and removing playground.py. * fix: tests in the convnext subclass, ran make style. * rebasing * rebasing and removing playground.py. * rebasing * rebasing and removing playground.py. * chore: moved convnext test to the correct location * fix: locations for the test file of convnext. * fix: convnext tests. * chore: applied sgugger's suggestion for dealing w/ output_attentions. * chore: added comments. * chore: applied updated quality enviornment style. * chore: applied formatting with quality enviornment. * chore: revert to the previous tests/test_modeling_common.py. * chore: revert to the original test_modeling_common.py * chore: revert to previous states for test_modeling_tf_common.py and modeling_tf_utils.py * fix: tests for convnext. * chore: removed output_attentions argument from convnext config. * chore: revert to the earlier tf utils. * fix: output shapes of the hidden states * chore: removed unnecessary comment * chore: reverting to the right test_modeling_tf_common.py. * Styling nits Co-authored-by: ariG23498 <aritra.born2fly@gmail.com> Co-authored-by: Joao Gante <joao@huggingface.co> Co-authored-by: Sylvain Gugger <Sylvain.gugger@gmail.com> * minor changes * doc fix in feature extractor * doc * typose * removed detr logic from config * removed detr logic from config * removed num_labels * small fix in the config * auxilary -> auxiliary * make style * some test is failing * fix a weird char in config prevending doc-builder * retry to fix the doc-builder issue * make style * new try to fix the doc builder * CI * change weights to facebook Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: ariG23498 <aritra.born2fly@gmail.com> Co-authored-by: Joao Gante <joao@huggingface.co> Co-authored-by: Sylvain Gugger <Sylvain.gugger@gmail.com>
transformers
8
Python
19
test_feature_extraction_maskformer.py
def prepare_feat_extract_dict(self): return { "do_resize": self.do_resize, "size": self.size, "max_size": self.max_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "size_divisibility": self.size_divisibility, }
d83d22f578276e9f201b0b3b0f8f9bd68e86c133
50
https://github.com/huggingface/transformers.git
109
def prepare_feat_extract_dict(self): return { "do_resize": self.do_resize, "size": self.size, "max_size": self.max_size, "do_no
9
84
prepare_feat_extract_dict
8
0
1
2
test/mitmproxy/tools/console/test_statusbar.py
253,030
clean up `statusbar.py`
mitmproxy
9
Python
8
test_statusbar.py
def test_shorten_message(message, ready_message): assert statusbar.shorten_message(message, max_width=30) == ready_message
db7074a37dd9d696b6ca085df90a9c84d434f97a
20
https://github.com/mitmproxy/mitmproxy.git
10
def test_shorten_message(message, ready_message): assert statusbar.shorten_message(message, max_width=30) == ready_message
6
30
test_shorten_message
79
0
5
16
jax/interpreters/pxla.py
121,183
Move _get_array_mapping from gda.py to pxla.py PiperOrigin-RevId: 459891853
jax
16
Python
64
pxla.py
def _check_gda_or_array_xla_sharding_match(args, in_array_mappings): from jax.experimental.global_device_array import GlobalDeviceArray from jax.experimental.array import Array for arg, inp_array_mapping in safe_zip(args, in_array_mappings): if not isinstance(arg, (GlobalDeviceArray, Array)): continue # TODO(yashkatariya): For `Array` check the `sharding` directly when pxla # takes sharding instances. arr_type, arr_mapping = ( ('GDA', _get_array_mapping(arg.mesh_axes)) if isinstance(arg, GlobalDeviceArray) else ('Array', _get_array_mapping(arg.sharding.spec)) ) if inp_array_mapping != arr_mapping: raise ValueError( f"{arr_type} sharding does not match the input sharding. " f"Got {arr_type} spec: {array_mapping_to_axis_resources(arr_mapping)} and " f"auto sharding spec: {array_mapping_to_axis_resources(inp_array_mapping)} " f"for {arr_type}: {arg}")
09ba51f323a2675d5f31b3a8829f7dcd7d989e24
102
https://github.com/google/jax.git
159
def _check_gda_or_array_xla_sharding_match(args, in_array_mappings):
21
191
_check_gda_or_array_xla_sharding_match
16
0
1
5
erpnext/manufacturing/doctype/operation/operation_dashboard.py
66,407
style: format code with black
erpnext
13
Python
16
operation_dashboard.py
def get_data(): return { "fieldname": "operation", "transactions": [{"label": _("Manufacture"), "items": ["BOM", "Work Order", "Job Card"]}], }
494bd9ef78313436f0424b918f200dab8fc7c20b
34
https://github.com/frappe/erpnext.git
11
def get_data(): return { "fieldname": "operation", "transactions": [{"label": _("Manuf
2
67
get_data
24
0
1
6
airbyte-integrations/connectors/source-orb/unit_tests/test_source.py
3,908
🎉 New Source: Orb (#9985) * V1 of source_orb connector * add boostrap.md file * add clause on Pagination to bootstrap.md * add SUMMARY documentation * add lookback_window_days connector parameter * Add support for start_date parameter * Add ability to transform record in order to un-nest IDs * Add support for extracting event properties based on connector configuration
airbyte
11
Python
21
test_source.py
def test_check_connection_fail(mocker): responses.add(responses.GET, "https://api.billwithorb.com/v1/ping", json={"error": "Unauthorized"}, status=401) source = SourceOrb() logger_mock = MagicMock() (ok, err) = source.check_connection(logger_mock, MagicMock()) assert (ok, type(err)) == (False, HTTPError)
1e0ac30ebdcfce55a5644bcd486044da45c93dd6
68
https://github.com/airbytehq/airbyte.git
38
def test_check_connection_fail(mocker): responses.a
16
109
test_check_connection_fail
123
0
6
38
tests/integration/pods/test_pod.py
11,289
fix: return responses (#4343)
jina
12
Python
83
test_pod.py
async def test_pods_with_replicas_advance_faster(port_generator): head_port = port_generator() port_expose = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single head pod head_pod = _create_head_pod(head_port, 'head') head_pod.start() # create a single gateway pod gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port_expose) gateway_pod.start() # create the shards pods = [] for i in range(10): # create worker worker_port = port_generator() # create a single worker pod worker_pod = _create_worker_pod(worker_port, f'pod0/{i}', 'FastSlowExecutor') pods.append(worker_pod) worker_pod.start() await asyncio.sleep(0.1) head_pod.wait_start_success() gateway_pod.wait_start_success() for pod in pods: # this would be done by the Pod, its adding the worker to the head pod.wait_start_success() activate_msg = ControlRequest(command='ACTIVATE') activate_msg.add_related_entity('worker', '127.0.0.1', pod.args.port_in) GrpcConnectionPool.send_request_sync(activate_msg, f'127.0.0.1:{head_port}') c = Client(return_responses=True, host='localhost', port=port_expose, asyncio=True) input_docs = [Document(text='slow'), Document(text='fast')] responses = c.post('/', inputs=input_docs, request_size=1, return_results=True) response_list = []
2efe175c975975532f6e3fd326ed280addf20eba
291
https://github.com/jina-ai/jina.git
266
async def test_pods_with_replicas_advance_faster(port_generator): head_port = port_generator() port_expose = port_generator() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' pod_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single head pod head_pod =
44
369
test_pods_with_replicas_advance_faster
93
0
3
20
ludwig/utils/data_utils.py
6,961
fix: restore existing credentials when exiting use_credentials context manager (#2112)
ludwig
18
Python
65
data_utils.py
def use_credentials(creds): if creds is None: with contextlib.nullcontext(): yield return # https://filesystem-spec.readthedocs.io/en/latest/features.html#configuration # This allows us to avoid having to plumb the `storage_options` kwargs through # every remote FS call in Ludwig. This implementation is restricted to one thread # in the process acquiring the lock at once. with GLOBAL_CRED_LOCK: with tempfile.TemporaryDirectory() as tmpdir: fname = os.path.join(tmpdir, "conf.json") with open(fname, "w") as f: json.dump(creds, f) # Backup any existing credentials old_conf = dict(**conf) conf.clear() set_conf_files(tmpdir, conf) try: yield finally: # Restore previous credentials with open(fname, "w") as f: json.dump(old_conf, f) conf.clear() set_conf_files(tmpdir, conf)
2471b6b3f925303f337e0de9ded2cba8e23c9be9
113
https://github.com/ludwig-ai/ludwig.git
343
def use_credentials(creds): if creds is None: with contextlib.nullcontext():
21
210
use_credentials
39
0
1
10
onnx/backend/test/case/node/quantizelinear.py
254,926
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fixes Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * remove extra blank line Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotations Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix Operators.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix TestCoverage.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
onnx
12
Python
35
quantizelinear.py
def export() -> None: node = onnx.helper.make_node('QuantizeLinear', inputs=['x', 'y_scale', 'y_zero_point'], outputs=['y'],) x = np.array([0, 2, 3, 1000, -254, -1000]).astype(np.float32) y_scale = np.float32(2) y_zero_point = np.uint8(128) y = np.array([128, 129, 130, 255, 1, 0]).astype(np.uint8) expect(node, inputs=[x, y_scale, y_zero_point], outputs=[y], name='test_quantizelinear')
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
129
https://github.com/onnx/onnx.git
166
def export() -> None: node = onnx.helper.make_node(
18
196
export
23
0
3
6
body/human_pose/ambiguity_aware/lib/core/config.py
8,894
update
insightface
15
Python
20
config.py
def _update_dict(k, v): for vk, vv in v.items(): if vk in config[k]: config[k][vk] = vv else: raise ValueError("{}.{} not exist in config.py".format(k, vk))
4b3c8211b3e3eca5f9fdf6553bbd45c9c7587b0d
49
https://github.com/deepinsight/insightface.git
61
def _update_dict(k, v): for vk, vv in v.items(): if vk in config[k]: config[k][vk] = vv
9
77
_update_dict
21
0
1
10
pipenv/patched/notpip/_internal/commands/search.py
19,858
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
pipenv
9
Python
21
search.py
def add_options(self) -> None: self.cmd_opts.add_option( "-i", "--index", dest="index", metavar="URL", default=PyPI.pypi_url, help="Base URL of Python Package Index (default %default)", ) self.parser.insert_option_group(0, self.cmd_opts)
f3166e673fe8d40277b804d35d77dcdb760fc3b3
48
https://github.com/pypa/pipenv.git
107
def add_options(self) -> None: self.cmd_opts.add_option( "-i", "--index", dest="index", metavar="URL", default=PyPI.pypi_url,
12
79
add_options
24
0
1
7
sklearn/decomposition/tests/test_pca.py
259,086
FIX Reduces memory usage of `PCA.transform` (#22553)
scikit-learn
11
Python
20
test_pca.py
def test_variance_correctness(copy): rng = np.random.RandomState(0) X = rng.randn(1000, 200) pca = PCA().fit(X) pca_var = pca.explained_variance_ / pca.explained_variance_ratio_ true_var = np.var(X, ddof=1, axis=0).sum() np.testing.assert_allclose(pca_var, true_var)
2e213c618841f3635885bab034606512c40a7fd4
75
https://github.com/scikit-learn/scikit-learn.git
45
def test_variance_correctness(copy): rng = np.random.RandomState(0) X = rng.randn(1000, 200) pca = PCA().fit(X) pca_var = pca.explained_variance_ / pca.exp
21
120
test_variance_correctness
41
0
3
13
dashboard/modules/snapshot/snapshot_head.py
124,115
[dashboard] Add `component_activities` API (#25996) Add /api/component_activities to the dashboard snapshot router which returns whether various Ray components are considered active This currently only contains a response entry for drivers, but will add entries for other components on request as followups
ray
11
Python
34
snapshot_head.py
async def get_component_activities(self, req) -> aiohttp.web.Response: # Get activity information for driver timeout = req.query.get("timeout", None) if timeout and timeout.isdigit(): timeout = int(timeout) else: timeout = 5 driver_activity_info = await self._get_job_activity_info(timeout=timeout) resp = {"driver": dataclasses.asdict(driver_activity_info)} return aiohttp.web.Response( text=json.dumps(resp), content_type="application/json", status=aiohttp.web.HTTPOk.status_code, )
8fc340967654a09cfe00abfc325471258ea5b4e8
99
https://github.com/ray-project/ray.git
151
async def get_component_activities(self, req) -> aiohttp.web.Response: # Get activity information for driver timeout = req.query.get("timeout", None) if timeout and timeout.isdigit(): timeout = int(timeout) else: timeout = 5 dr
23
162
get_component_activities
57
0
5
15
parlai/core/build_data.py
194,804
[circle] Fixing broken unit tests (#4343)
ParlAI
15
Python
45
build_data.py
def download_from_google_drive(gd_id, destination): URL = 'https://docs.google.com/uc?export=download' with get_http_session() as session: response = session.get(URL, params={'id': gd_id}, stream=True) token = _get_confirm_token(response) or 't' if token: response.close() params = {'id': gd_id, 'confirm': token} response = session.get(URL, params=params, stream=True) CHUNK_SIZE = 32768 with PathManager.open(destination, 'wb') as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) response.close()
5322cd4f5821e339bf1edab98d93b5a008b97a2b
120
https://github.com/facebookresearch/ParlAI.git
187
def download_from_google_drive(gd_id, destination): URL = 'https://docs.google.com/uc?export=download' with get_http_session() as session: response = session.get(URL, params={'id': gd_id}, stream=True) token = _get_confirm_token(response) or 't' if token: response.close() params = {'id': gd_id, 'confirm': token} response = session.get(URL, p
20
208
download_from_google_drive
63
0
1
46
tests/sentry/snuba/test_tasks.py
93,398
refs(metric_alerts): Consolidate `QueryDatasets` and `Dataset` (#36894) This refactor pr removes `QueryDatasets` and just uses `Dataset` everywhere. `QueryDatasets` existed before `Dataset`, but `Dataset` is now more widely used and is more up to date. The values here are the same, `Dataset` just supports a few more datasets. We already make sure that only datasets that are valid for alerts can be passed to the alert rules api, so this won't allow people to attempt to create alerts on datasets that don't support them.
sentry
23
Python
39
test_tasks.py
def test_event_types(self): self.create_release(self.project, version="something") expected_conditions = [ And( [ Or( [ Condition(Column(name="type"), Op.EQ, "error"), Condition(Column(name="type"), Op.EQ, "default"), ] ), Or( [ Condition( Function( "ifNull", parameters=[Column(name="tags[sentry:release]"), ""] ), Op.IN, ["something"], ), Condition( Function( "ifNull", parameters=[Column(name="tags[sentry:release]"), ""] ), Op.IN, ["123"], ), ] ), ] ), Condition(Column(name="project_id"), Op.IN, (self.project.id,)), ] self.run_test( SnubaQuery.Type.ERROR, Dataset.Events, "count_unique(user)", "release:latest OR release:123", expected_conditions, entity_extra_fields={ "event_types": [ SnubaQueryEventType.EventType.ERROR, SnubaQueryEventType.EventType.DEFAULT, ] }, )
e1482001662b446c7c2be7c9daa19cba562c615c
196
https://github.com/getsentry/sentry.git
953
def test_event_types(self): self.create_release(self.project, version="something") expected_conditions = [ And( [ Or( [ Condition
27
310
test_event_types
21
0
3
10
lib/matplotlib/figure.py
110,278
DOC: improve grammar and consistency
matplotlib
10
Python
19
figure.py
def add_artist(self, artist, clip=False): artist.set_figure(self) self.artists.append(artist) artist._remove_method = self.artists.remove if not artist.is_transform_set(): artist.set_transform(self.transSubfigure) if clip: artist.set_clip_path(self.patch) self.stale = True return artist
9b6abd0b4933811e0a45c2535ab8fd107db65dd9
69
https://github.com/matplotlib/matplotlib.git
99
def add_artist(self, artist, clip=False): artist.set_figure(self) self.artists.append(artist) artist._remove_method = self.artists.remove if not artist.is_transform_set():
15
113
add_artist
60
0
9
10
netbox/ipam/forms/models.py
265,290
Clean up validation
netbox
11
Python
41
models.py
def clean(self): super().clean() interface = self.cleaned_data.get('interface') vminterface = self.cleaned_data.get('vminterface') vlan = self.cleaned_data.get('vlan') if not (interface or vminterface or vlan): raise ValidationError('A termination must specify an interface or VLAN.') if len([x for x in (interface, vminterface, vlan) if x]) > 1: raise ValidationError('A termination can only have one terminating object (an interface or VLAN).') self.instance.assigned_object = interface or vminterface or vlan
4bb4bbce1461bee0644e97900006e3fe6d71a3e4
95
https://github.com/netbox-community/netbox.git
130
def clean(self): super().clean() interface = self.cleaned_data.get('interface') vminterface = self.cleaned_data.get('vminterface') vlan = self.cleaned_data.get('vlan') if not (interface or vminterface or vlan): raise ValidationError('A termination must specify an inter
13
160
clean
51
0
4
10
mitmproxy/connection.py
251,338
make it black!
mitmproxy
13
Python
46
connection.py
def __setattr__(self, name, value): if name in ("address", "via"): connection_open = ( self.__dict__.get("state", ConnectionState.CLOSED) is ConnectionState.OPEN ) # assigning the current value is okay, that may be an artifact of calling .set_state(). attr_changed = self.__dict__.get(name) != value if connection_open and attr_changed: raise RuntimeError(f"Cannot change server.{name} on open connection.") return super().__setattr__(name, value)
b3587b52b25077f68116b9852b041d33e7fc6601
72
https://github.com/mitmproxy/mitmproxy.git
164
def __setattr__(self, name, value): if name in ("address", "via"): connection_open = ( self.__dict__.get("state", ConnectionState.CLOSED) is ConnectionState.OPEN ) # assigning the current value is okay, that may be an artifact of calling .set_state(). attr_changed = self.__dict__.get(name) != value if connection_open and attr_changed:
13
121
__setattr__
25
0
1
5
django/db/models/expressions.py
205,473
Refs #33476 -- Reformatted code with Black.
django
9
Python
24
expressions.py
def as_sql(self, *args, **kwargs): raise ValueError( "This queryset contains a reference to an outer query and may " "only be used in a subquery." )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
17
https://github.com/django/django.git
60
def as_sql(self, *args, **kwargs): raise ValueError( "This queryset contains a
5
31
as_sql
43
0
6
10
saleor/graphql/app/types.py
27,367
Improved metadata permissions for apps (#9726) * Add improvment for handling permissions for apps * Fix failing tests * Apply changes after review * Add missing space in query description * Use permission name fields * Apply changes after review * Use + instead of f for field descriptions
saleor
13
Python
33
types.py
def has_access_to_app_public_meta(root, info) -> bool: auth_token = info.context.decoded_auth_token or {} if auth_token.get("type") == JWT_THIRDPARTY_ACCESS_TYPE: _, app_id = from_global_id_or_error(auth_token["app"], "App") else: app_id = info.context.app.id if info.context.app else None if app_id is not None and int(app_id) == root.id: return True requester = get_user_or_app_from_context(info.context) return requester.has_perm(AppPermission.MANAGE_APPS)
20675c93d276101412b22439794d645a0a2fb3bd
95
https://github.com/saleor/saleor.git
81
def has_access_to_app_public_meta(root, info) -> bool: auth_token = info.context.decoded_auth_token or {} if auth_token.get("type") == JWT_THIRDPARTY_ACCESS_TYPE: _, app_id = from_global_id_or_error(auth_token["app"], "App") else: app_id = info.context.app.id if info.context.app else None if app_id is not None and int(app_id) == root.id: return True requester = get_user_or_app_from_context(info.context) return requester.has_perm(AppPermission.MANAGE_APPS)
20
153
has_access_to_app_public_meta
108
0
6
48
saleor/graphql/order/mutations/order_confirm.py
28,777
Use dataloader for plugin manager (#10581) * Use dataloader for plugin manager * Temporary fix for context fixtures * Change layer where inactive user is rejected during auth * Missed merge * Rename 'load_pllugins' to 'load_plugin_manager' * Refactor middleware tests that turned to dataloaders tests to separate file * Use anonymous dataloader, remove TODOs * Use relative imports
saleor
15
Python
85
order_confirm.py
def perform_mutation(cls, root, info, **data): order = cls.get_instance(info, **data) order.status = OrderStatus.UNFULFILLED order.save(update_fields=["status", "updated_at"]) order_info = fetch_order_info(order) payment = order_info.payment manager = load_plugin_manager(info.context) app = load_app(info.context) if payment_transactions := list(order.payment_transactions.all()): try: # We use the last transaction as we don't have a possibility to # provide way of handling multiple transaction here payment_transaction = payment_transactions[-1] request_charge_action( transaction=payment_transaction, manager=manager, charge_value=payment_transaction.authorized_value, channel_slug=order.channel.slug, user=info.context.user, app=app, ) except PaymentError as e: raise ValidationError( str(e), code=OrderErrorCode.MISSING_TRANSACTION_ACTION_REQUEST_WEBHOOK, ) elif payment and payment.is_authorized and payment.can_capture(): gateway.capture(payment, manager, channel_slug=order.channel.slug) site = load_site(info.context) transaction.on_commit( lambda: order_captured( order_info, info.context.user, app, payment.total, payment, manager, site.settings, ) ) transaction.on_commit( lambda: order_confirmed( order, info.context.user, app, manager, send_confirmation_email=True, ) ) return OrderConfirm(order=order)
ac2d4ac172d37dd8e866b679b1a6538745b43c2b
251
https://github.com/saleor/saleor.git
774
def perform_mutation(cls, root, info, **data): order = cls.get_instance(info, **data) order.status = OrderStatus.UNFULFILLED order.save(update_fields=["status", "updated_at"]) order_info = fetch_order_info(order) payment = order_info.payment manager = load_plugin_manager(info.context) app = load_app(info.context) if payment_transactions := list(order.payment_transactions.all()): try: # We use the last transaction as we don't have a possibility to # provide way of handling multiple transaction here payment_transaction = payment_transactions[-1] request_charge_action( transaction=payment_transaction, manager=manager, charge_value=payment_transaction.authorized_value, channel_slug=order.channel.slug, user=info.context.user, app=app, ) except PaymentError as e: raise ValidationError( str(e), code=OrderErrorCode.MISSING_TRANSACTION_ACTION_REQUEST_WEBHOOK, ) elif payment and payment.is_authorized and payment.can_capture(): gateway.capture(payment, manager, channel_slug=order.channel.slug) site = load_
52
380
perform_mutation
78
0
7
22
sympy/physics/continuum_mechanics/beam.py
199,275
Added solving for torsion for circular cross-sections
sympy
16
Python
34
beam.py
def apply_moment_load(self, value, start, order, dir="y"): x = self.variable value = sympify(value) start = sympify(start) order = sympify(order) if dir == "x": if not order == -2: self._moment_load_vector[0] += value else: if start in list(self._torsion_moment): self._torsion_moment[start] += value else: self._torsion_moment[start] = value self._load_Singularity[0] += value*SingularityFunction(x, start, order) elif dir == "y": if not order == -2: self._moment_load_vector[1] += value self._load_Singularity[0] += value*SingularityFunction(x, start, order) else: if not order == -2: self._moment_load_vector[2] += value self._load_Singularity[0] += value*SingularityFunction(x, start, order)
efb5f1f2b5a90d9542a4b4be7af75c9af079fa92
177
https://github.com/sympy/sympy.git
324
def apply_moment_load(self, value, start, order, dir="y"): x = self.variable value = sympify(value) start = sympify(start) order = sympify(order) if dir == "x": if not order == -2: self._moment_load_vector[0] += value else: if start in list(self._torsion_moment): self._torsion_moment[start] += value else: self._torsion_moment[start] = value self._load_Singularity[0] += value*SingularityFunction(x, start, order) elif dir == "y": if not order == -2: self._moment_load_vector[1] += value self._load_Singularity[0] += value*SingularityFunction(x, start, order) else: if not order == -2: self._moment_load_vector[2] += value self._load_Singularity[0] += value*SingularityFunction(x, start, order)
14
280
apply_moment_load
25
1
1
2
tests/unit/keyinput/test_keyutils.py
321,830
keyutils: Move public functions to KeyInfo This avoids the temptation of creating a Qt.Key() manually, which needs to be checked for ValueError with PyQt 6.2 due to its handling of unknown enum values. This is exactly what happened in RegisterKeyParser, which caused such a ValueError: https://github.com/qutebrowser/qutebrowser/issues/7047#issuecomment-1163288560 Closes #7047
qutebrowser
10
Python
24
test_keyutils.py
def test_is_special(key, modifiers, special): assert keyutils.KeyInfo(key, modifiers).is_special() == special @pytest.mark.parametrize('key, ismodifier', [ (Qt.Key.Key_Control, True), (Qt.Key.Key_X, False), (Qt.Key.Key_Super_L, False), # Modifier but not in _MODIFIER_MAP ])
f7753550f2c1dcb2348e4779fd5287166754827e
@pytest.mark.parametrize('key, ismodifier', [ (Qt.Key.Key_Control, True), (Qt.Key.Key_X, False), (Qt.Key.Key_Super_L, False), # Modifier but not in _MODIFIER_MAP ])
24
https://github.com/qutebrowser/qutebrowser.git
35
def test_is_special(key, modifiers, special): assert keyutils.KeyInfo(key, modifiers).is_special() == special @pytest.mark.parametrize('key, ismodifier', [ (Qt.Key.Key_Control, True), (Qt.Key.Key_X, False), (Qt.Key.Key_Super_L, False), # Modifier but not in _MODIFIER_MAP ])
15
97
test_is_special
30
0
1
15
tests/snuba/api/endpoints/test_organization_events_mep.py
93,303
ref(MEP): Add new option to query tag values as strings from clickhouse (#36397) Co-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>
sentry
12
Python
29
test_organization_events_mep.py
def test_non_metrics_tag_with_implicit_format_metrics_dataset(self): self.store_transaction_metric( 1, tags={"environment": "staging", "transaction": "foo_transaction"}, timestamp=self.min_ago, ) response = self.do_request( { "field": ["test", "p50(transaction.duration)"], "query": "event.type:transaction", "dataset": "metrics", "per_page": 50, } ) assert response.status_code == 400, response.content
5ceaca63890c6c660b4d061e800ccbf8c90c6e20
69
https://github.com/getsentry/sentry.git
179
def test_non_metrics_tag_with_implicit_format_metrics_dataset(self): self.store_transaction_metric( 1, tags={"environment": "staging", "transaction": "foo_transaction"}, timestamp=self.min_ago, ) response = self.do_request( { "field": ["test", "p50(transaction.duration)"], "query": "event.type:transaction", "dataset": "metrics", "per_page": 50, } ) assert response.status_code == 400, response.content
10
122
test_non_metrics_tag_with_implicit_format_metrics_dataset
15
0
2
5
python3.10.4/Lib/distutils/command/sdist.py
222,798
add python 3.10.4 for windows
XX-Net
12
Python
14
sdist.py
def _add_defaults_optional(self): optional = ['test/test*.py', 'setup.cfg'] for pattern in optional: files = filter(os.path.isfile, glob(pattern)) self.filelist.extend(files)
8198943edd73a363c266633e1aa5b2a9e9c9f526
40
https://github.com/XX-net/XX-Net.git
50
def _add_defaults_optional(self): optional = ['test/test*.py',
12
66
_add_defaults_optional
19
0
1
16
plugins/extract/pipeline.py
101,460
extract: Add batch processing mode
faceswap
10
Python
17
pipeline.py
def input_queue(self) -> EventQueue: qname = f"extract{self._instance}_{self._current_phase[0]}_in" retval = self._queues[qname] logger.trace("%s: %s", qname, retval) # type: ignore return retval
13cfb3f39e72e9ca181f173b7b3db2a048db0d08
32
https://github.com/deepfakes/faceswap.git
55
def input_queue(self) -> EventQueue: qname = f"extract{self._instance}_{self._current_phase[0]}_in" retval = self._queu
10
73
input_queue
12
0
1
4
tests/admin_views/tests.py
207,682
Refs #33476 -- Reformatted code with Black.
django
11
Python
12
tests.py
def test_custom_admin_site_view(self): self.client.force_login(self.superuser) response = self.client.get(reverse("admin2:my_view")) self.assertEqual(response.content, b"Django is a magical pony!")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
39
https://github.com/django/django.git
32
def test_custom_admin_site_view(self): self.client.
10
65
test_custom_admin_site_view
36
0
3
9
pandas/tests/extension/test_arrow.py
169,146
BUG/TST: fix a bunch of arraymanager+pyarrow tests (#48428) * BUG/TST: fix a bunch of arraymanager+pyarrow tests * remove unnecessary using_array_manager
pandas
15
Python
33
test_arrow.py
def test_setitem_mask_aligned(self, data, as_callable, setter, request): tz = getattr(data.dtype.pyarrow_dtype, "tz", None) if pa_version_under2p0 and tz not in (None, "UTC"): request.node.add_marker( pytest.mark.xfail( reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}") ) ) super().test_setitem_mask_aligned(data, as_callable, setter)
12dce19a74b7cd5badad0f61ca079b873c1b6089
71
https://github.com/pandas-dev/pandas.git
127
def test_setitem_mask_aligned(self, data, as_callable, setter, request): tz = getattr(data.dtype.pyarrow_dtype, "tz", None) if pa_version_under2p0 and tz not in (None, "UTC"): request.node.add_marker( pytest.mark.xfail( reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}") ) ) super().test_setitem_mask_aligned(data, as
18
112
test_setitem_mask_aligned
100
0
1
45
wagtail/admin/tests/test_contentstate.py
71,907
Reformat with black
wagtail
16
Python
62
test_contentstate.py
def test_wrapped_block_retains_key(self): # Test a block which uses a wrapper correctly receives the key defined on the inner element converter = ContentstateConverter(features=["h1", "ol", "bold", "italic"]) result = converter.to_database_format( json.dumps( { "entityMap": {}, "blocks": [ { "inlineStyleRanges": [], "text": "The rules of Fight Club", "depth": 0, "type": "header-one", "key": "00000", "entityRanges": [], }, { "inlineStyleRanges": [], "text": "You do not talk about Fight Club.", "depth": 0, "type": "ordered-list-item", "key": "00001", "entityRanges": [], }, { "inlineStyleRanges": [], "text": "You do not talk about Fight Club.", "depth": 0, "type": "ordered-list-item", "key": "00002", "entityRanges": [], }, ], } ) ) self.assertHTMLEqual( result, , )
d10f15e55806c6944827d801cd9c2d53f5da4186
141
https://github.com/wagtail/wagtail.git
896
def test_wrapped_block_retains_key(self): # Test a block which uses a wrapper correctly receives the key defined on the inner element converter = ContentstateConverter(features=["h1", "ol", "bold", "italic"]) result = converter.to_database_format( json.dumps( { "entityMap": {}, "blocks": [ { "inlineStyleRanges": [], "text": "The rules of Fight Club", "depth": 0, "type": "header-one", "key": "00000", "entityRanges": [], }, { "inlineStyleRanges": [], "text": "You do not talk about Fight Club.", "depth": 0, "type": "ordered-list-item", "key": "00001", "entityRanges": [], }, { "inlineStyleRanges": [], "text": "You do not talk about Fight Clu
10
264
test_wrapped_block_retains_key
22
0
1
13
airbyte-integrations/connectors/source-mixpanel/unit_tests/test_property_transformation.py
5,688
:tada: Source Mixpanel: Beta preparation (#13372) * Add extra mode to Source, to allow run acceptance tests * move streams into distinct modules * Add property name transformation for Export stream for avoiding collisions * Update doc * Add `date_window_size`
airbyte
12
Python
20
test_property_transformation.py
def export_response(): return setup_response( 200, { "event": "Problem event", "properties": { "distinct_id": "1d694fd9-31a5-4b99-9eef-ae63112063ed", "$userName": "1", "userName": "2", "username": "3", }, }, )
d79b319819650f99fae2ab8c6c8d3ab25d474cf1
38
https://github.com/airbytehq/airbyte.git
141
def export_response(): return setup_response( 200, { "event": "Problem event", "properties": { "distinct_id": "1d694fd9-31a5-4b99-9eef-ae6
2
75
export_response
20
1
1
7
airbyte-integrations/connectors/source-google-ads/unit_tests/test_source.py
5,054
Source Google Ads: Improve unit and integration tests (#12651) * #12650 source Googel ads: tests * #12650 source google ads: add changelog item * #12650 source google ads: add comments to tests * auto-bump connector version Co-authored-by: Octavia Squidington III <octavia-squidington-iii@users.noreply.github.com>
airbyte
11
Python
17
test_source.py
def client_mock(config): google_api = GoogleAds(credentials=config["credentials"], customer_id=config["customer_id"]) client = AdGroupAdReport( start_date=config["start_date"], api=google_api, conversion_window_days=config["conversion_window_days"], time_zone="local" ) client._customer_id = "1234567890" return client @pytest.fixture()
d4f8b25b8e3e109db866352cf1dcec0d73c92cbd
@pytest.fixture()
56
https://github.com/airbytehq/airbyte.git
40
def client_mock(config): google_api = GoogleAds(credentials=config["credentials"], customer_id=config["customer_id"]) client = AdGroupAdReport( start_date=config["start_date"], api=google_api, conversion_window_days=config["
15
105
client_mock
90
0
1
63
erpnext/patches/v14_0/rearrange_company_fields.py
66,855
style: format code with black
erpnext
13
Python
54
rearrange_company_fields.py
def execute(): custom_fields = { "Company": [ dict( fieldname="hra_section", label="HRA Settings", fieldtype="Section Break", insert_after="asset_received_but_not_billed", collapsible=1, ), dict( fieldname="basic_component", label="Basic Component", fieldtype="Link", options="Salary Component", insert_after="hra_section", ), dict( fieldname="hra_component", label="HRA Component", fieldtype="Link", options="Salary Component", insert_after="basic_component", ), dict(fieldname="hra_column_break", fieldtype="Column Break", insert_after="hra_component"), dict( fieldname="arrear_component", label="Arrear Component", fieldtype="Link", options="Salary Component", insert_after="hra_column_break", ), dict( fieldname="non_profit_section", label="Non Profit Settings", fieldtype="Section Break", insert_after="arrear_component", collapsible=1, ), dict( fieldname="company_80g_number", label="80G Number", fieldtype="Data", insert_after="non_profit_section", ), dict( fieldname="with_effect_from", label="80G With Effect From", fieldtype="Date", insert_after="company_80g_number", ), dict( fieldname="non_profit_column_break", fieldtype="Column Break", insert_after="with_effect_from" ), dict( fieldname="pan_details", label="PAN Number", fieldtype="Data", insert_after="non_profit_column_break", ), ] } create_custom_fields(custom_fields, update=True)
494bd9ef78313436f0424b918f200dab8fc7c20b
230
https://github.com/frappe/erpnext.git
27
def execute(): custom_fields = { "Company": [ dict( fieldname="hra_section", label="HRA Settings", fieldtype="Section Break", insert_after="asset_received_but_not_billed", collapsible=1, ), dict( fieldname="basic_component", label="Basic Component", fieldtype="Link", options="Salary Component", insert_after="hra_section", ), dict( fieldname="hra_component", label="HRA Component", fieldtype="Link", options="Salary Compo
11
390
execute
41
1
3
7
django/templatetags/i18n.py
206,308
Refs #33476 -- Reformatted code with Black.
django
11
Python
39
i18n.py
def do_get_available_languages(parser, token): # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments args = token.contents.split() if len(args) != 3 or args[1] != "as": raise TemplateSyntaxError( "'get_available_languages' requires 'as variable' (got %r)" % args ) return GetAvailableLanguagesNode(args[2]) @register.tag("get_language_info")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@register.tag("get_language_info")
47
https://github.com/django/django.git
80
def do_get_available_languages(parser, token): # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments args = token.contents.split() if len(args) != 3 or args[1] != "as":
11
95
do_get_available_languages
162
0
2
46
setup.py
111,655
Bump Python version to 3.7~3.9 (#4475)
nni
14
Python
114
setup.py
def _setup(): setuptools.setup( name = 'nni', version = release or '999.dev0', description = 'Neural Network Intelligence project', long_description = open('README.md', encoding='utf-8').read(), long_description_content_type = 'text/markdown', url = 'https://github.com/Microsoft/nni', author = 'Microsoft NNI Team', author_email = 'nni@microsoft.com', license = 'MIT', classifiers = [ 'License :: OSI Approved :: MIT License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows :: Windows 10', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], packages = _find_python_packages(), package_data = { 'nni': _find_requirements_txt() + _find_default_config(), # setuptools issue #1806 'nni_node': _find_node_files() # note: this does not work before building }, data_files = _get_data_files(), python_requires = '>=3.7', install_requires = _read_requirements_txt('dependencies/required.txt'), extras_require = { 'SMAC': _read_requirements_txt('dependencies/required_extra.txt', 'SMAC'), 'BOHB': _read_requirements_txt('dependencies/required_extra.txt', 'BOHB'), 'PPOTuner': _read_requirements_txt('dependencies/required_extra.txt', 'PPOTuner'), 'DNGO': _read_requirements_txt('dependencies/required_extra.txt', 'DNGO'), }, setup_requires = ['requests'], entry_points = { 'console_scripts' : [ 'nnictl = nni.tools.nnictl.nnictl:parse_args' ] }, cmdclass = { 'build': Build, 'build_ts': BuildTs, 'clean': Clean, 'develop': Develop, } )
c56568c9733fa286c1753a54fab2fd892a5cf6d5
195
https://github.com/microsoft/nni.git
550
def _setup(): setuptools.setup( name = 'nni', version = release or '999.dev0', description = 'Neural Network Intelligence project', long_description = open('README.md', encoding='utf-8').read(), long_description_content_type = 'text/markdown', url = 'https://github.com/Microsoft/nni', author = 'Microsoft NNI Team', author_email = 'nni@microsoft.com', license = 'MIT', classifiers = [ 'License :: OSI Approved :: MIT License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows :: Windows 10', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], packages = _find_python_packages(), package_data = { 'nni': _find_requirements_txt() + _find_default_config(), # setuptools issue #1806 'nni_node': _find_node_files() # note: this does not work before building }, data_files = _get_data_files(), python_requires = '>=3.7', install_requires = _read_requirements_txt('dependencies/required.txt'), extras_require = { 'SMAC': _read_requirements_txt('dependencies/required_extra.txt', 'SMAC'), 'BOHB': _read_requirements_txt('dependencies/required_extra.txt', 'BOHB'), 'PPOTuner': _read_requirements_txt('dependencies/required_extra.txt', 'PPOTuner'), 'DNGO': _read_requirements_txt('dependencies/required_extra.txt', 'DNGO'), }, setup_requires = ['requests'], entry_points
36
348
_setup
9
0
1
4
tests/models/cvt/test_modeling_cvt.py
38,679
Add CvT (#17299) * Adding cvt files * Adding cvt files * changes in init file * Adding cvt files * changes in init file * Style fixes * Address comments from code review * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Format lists in docstring * Fix copies * Apply suggestion from code review Co-authored-by: AnugunjNaman <anugunjjha@gmail.com> Co-authored-by: Ayushman Singh <singhayushman13@protonmail.com> Co-authored-by: Niels Rogge <nielsrogge@Nielss-MacBook-Pro.local> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
transformers
10
Python
8
test_modeling_cvt.py
def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "embed_dim")) self.parent.assertTrue(hasattr(config, "num_heads"))
adc0ff25028d29af30386f2d7d3f85e290fbef57
42
https://github.com/huggingface/transformers.git
29
def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "embed_dim")) self.parent.assertTrue(hasattr(c
8
71
create_and_test_config_common_properties
36
0
4
33
gensim/models/keyedvectors.py
9,777
Fix typo in word2vec and KeyedVectors docstrings (#3365) * Add missing word in word2vec docstring * Fix docstring typo in KeyedVectors distances() word_or_vector, not word_or_vectors
gensim
12
Python
28
keyedvectors.py
def distances(self, word_or_vector, other_words=()): if isinstance(word_or_vector, _KEY_TYPES): input_vector = self.get_vector(word_or_vector) else: input_vector = word_or_vector if not other_words: other_vectors = self.vectors else: other_indices = [self.get_index(word) for word in other_words] other_vectors = self.vectors[other_indices] return 1 - self.cosine_similarities(input_vector, other_vectors)
7f314ee10c9ad83816aa795c5ef6ebc378de3acf
78
https://github.com/RaRe-Technologies/gensim.git
133
def distances(self, word_or_vector, other_words=()): if isinstance(word_or_vector, _KEY_TYPES): input_vector = self.get_vector(word_or_vector) else: input_vector = word_or_vector if not other_words: other_vectors = self.vectors else: other_indices = [self.get_index(word) for word in
14
124
distances
4
0
1
2
tests/admin_default_site/tests.py
207,066
Refs #33476 -- Reformatted code with Black.
django
10
Python
4
tests.py
def test_use_custom_admin_site(self): self.assertEqual(admin.site.__class__.__name__, "CustomAdminSite")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
19
https://github.com/django/django.git
10
def test_use_custom_admin_site(self): self.assertEqual(admin.site.__class__.__name__, "CustomAdminSite")
7
32
test_use_custom_admin_site