n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
29
0
3
7
.venv/lib/python3.8/site-packages/pip/_vendor/toml/decoder.py
63,871
upd; format
transferlearning
9
Python
23
decoder.py
def _getpath(p): if (3, 6) <= sys.version_info: import os return os.fspath(p) if _detect_pathlib_path(p): return str(p) return p try: FNFError = FileNotFoundError except NameError: FNFError = IOError TIME_RE = re.compile(r"([0-9]{2}):([0-9]{2}):([0-9]{2})(\.([0-9]{3,6}))?")
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
38
https://github.com/jindongwang/transferlearning.git
61
def _getpath(p): if (3, 6) <= sys.version_info: import os return os.fspath(p) if _detect_pathlib_path(p): return str(p) return p try: FNFError = FileNotFoundError except NameError:
15
95
_getpath
20
0
1
9
wagtail/core/tests/test_blocks.py
74,068
Reformat with black
wagtail
12
Python
19
test_blocks.py
def test_validate_non_required_choice_block(self): block = blocks.ChoiceBlock( choices=[("tea", "Tea"), ("coffee", "Coffee")], required=False ) self.assertEqual(block.clean("coffee"), "coffee") with self.assertRaises(ValidationError): block.clean("whisky") self.assertEqual(block.clean(""), "") self.assertEqual(block.clean(None), "")
d10f15e55806c6944827d801cd9c2d53f5da4186
84
https://github.com/wagtail/wagtail.git
83
def test_validate_non_required_choice_block(self): block = blocks.ChoiceBlock( choices=[("tea", "Tea"), ("coffee", "Coffee")], required=False ) self.assertEqual(block.clean("coffee"), "coffee") with self.a
11
150
test_validate_non_required_choice_block
48
0
1
11
seaborn/tests/_core/test_subplots.py
40,673
Refactor figure setup and subplot metadata tracking into Subplots class Squashed commit of the following: commit e6f99078d46947eab678b9dd0303657a3129f9fc Author: Michael Waskom <mwaskom@nyu.edu> Date: Sun Aug 1 17:56:49 2021 -0400 Address a couple TODOs commit c48ba3af8095973b7dca9554934a695751f58726 Author: Michael Waskom <mwaskom@nyu.edu> Date: Mon Jul 26 06:42:29 2021 -0400 Add docstrings in Subplots commit 97e6465b0f998f541b445b189682fbf134869391 Author: Michael Waskom <mwaskom@nyu.edu> Date: Sun Jul 25 17:53:22 2021 -0400 Fix unshared label visibility test commit e2d93a28313c2cb9170e56b2e4b373987993be7c Author: Michael Waskom <mwaskom@nyu.edu> Date: Sun Jul 25 17:16:41 2021 -0400 Add more label visibility tests commit 698ee72b5d5f9f3939c50cde9e2baacdf5487807 Author: Michael Waskom <mwaskom@nyu.edu> Date: Sat Jul 24 11:08:32 2021 -0400 Begin adding label visibility tests commit 97167b4701532eeccadaa899520d57e38c26dd43 Author: Michael Waskom <mwaskom@nyu.edu> Date: Mon Jul 19 06:55:48 2021 -0400 Fix interior tick labels with unshared axes commit 9331d5d91a7861aebfe03fa86ee122902c0d1d8a Author: Michael Waskom <mwaskom@nyu.edu> Date: Sat Jul 17 17:03:48 2021 -0400 Fix interior labels for wrapped plots commit 38f2efa7e732958430c006f24827c6ac69640ef3 Author: Michael Waskom <mwaskom@nyu.edu> Date: Sat Jul 17 16:03:34 2021 -0400 Fix non-cartesian interior labels commit 3c07f981110890d38aee19b38c43080863132122 Author: Michael Waskom <mwaskom@nyu.edu> Date: Sat Jul 17 15:44:48 2021 -0400 Integrate Subplots into Plot commit 841a3c998eae8f8cc85fd65af7ea8e6f32fc5510 Author: Michael Waskom <mwaskom@nyu.edu> Date: Sat Jul 17 13:00:09 2021 -0400 Complete subplots tests commit 8ceb7e6c35ea0cbcd014067035d7ea219204f464 Author: Michael Waskom <mwaskom@nyu.edu> Date: Fri Jul 16 19:45:29 2021 -0400 Continue building out subplot tests commit b0ce0e7a9e3534fdad04ef9e287e4c6bb19fe684 Author: Michael Waskom <mwaskom@nyu.edu> Date: Thu Jul 15 21:35:21 2021 -0400 Continue building out subplots tests commit 5f4b67d4d90cde7d0d899527b1fd8607348a5f5b Author: Michael Waskom <mwaskom@nyu.edu> Date: Wed Jul 14 20:57:35 2021 -0400 Add some tests for Subplots functionality commit 58fbf8e3f349174f4d1d29f71fa867ad4b49d264 Author: Michael Waskom <mwaskom@nyu.edu> Date: Sun Jul 11 20:49:29 2021 -0400 Begin refactoring figure setup into Subplots class commit 6bb853e20ad3b42b2728d212a51ed8de2ff47bde Author: Michael Waskom <mwaskom@nyu.edu> Date: Sun Jul 11 16:02:26 2021 -0400 Fix overlooked lint and test
seaborn
11
Python
32
test_subplots.py
def test_col_facet_wrapped(self, long_df): key = "b" wrap = 3 data = PlotData(long_df, {"col": key}) s = Subplots({}, {"wrap": wrap}, {}, data) n_levels = len(categorical_order(long_df[key])) assert s.n_subplots == n_levels assert s.subplot_spec["ncols"] == wrap assert s.subplot_spec["nrows"] == n_levels // wrap + 1 assert s.subplot_spec["sharex"] is True assert s.subplot_spec["sharey"] is True
c16180493bd44fd76092fdd9ea0060bac91e47fe
98
https://github.com/mwaskom/seaborn.git
117
def test_col_facet_wrapped(self, long_df): key = "b" wrap = 3 data = PlotData(long_df, {"col": key}) s = Subplots({}, {"wrap": wrap}, {}, data) n_levels = len(categorical_order(long_df[key])) assert s.n_subplots == n_levels assert s.subplot_spec["ncols"] == wrap assert s.subplot_spec["nrows"] == n_levels // wrap + 1 assert s.subplot_spec["sharex"] is True as
14
165
test_col_facet_wrapped
20
0
4
8
homeassistant/components/command_line/switch.py
313,249
Improve code quality command_line (#65333)
core
12
Python
15
switch.py
def _query_state(self) -> str | int | None: if self._command_state: if self._value_template: return self._query_state_value(self._command_state) return self._query_state_code(self._command_state) if TYPE_CHECKING: return None
3771c154fa0ea8e0b49d41ece55a7a18c444ee6a
45
https://github.com/home-assistant/core.git
89
def _query_state(self) -> str | int | None:
9
74
_query_state
22
0
1
12
mkdocs/tests/config/config_options_tests.py
224,601
MarkdownExtensions' default is an empty list
mkdocs
10
Python
20
config_options_tests.py
def test_missing_default(self): option = config_options.MarkdownExtensions() config = {} config['markdown_extensions'] = option.validate(None) option.post_validation(config, 'markdown_extensions') self.assertEqual( { 'markdown_extensions': [], 'mdx_configs': {}, }, config, )
2c986996d041f0059b4d3c2ff4bd647cadeb68de
55
https://github.com/mkdocs/mkdocs.git
126
def test_missing_default(self): option = config_options.MarkdownExtensions() config = {} config['markdown_extensions'] = option.validate(None) option.post_validation(config, 'markdown_extensions') self.assertEqual( {
9
94
test_missing_default
26
0
3
11
tests/components/number/test_init.py
290,875
Align number and sensor device classes (#81909) * Align number and sensor device classes * Add tests * Tweak tests
core
12
Python
23
test_init.py
def test_device_classes_aligned(): non_numeric_device_classes = { SensorDeviceClass.DATE, SensorDeviceClass.DURATION, SensorDeviceClass.TIMESTAMP, } for device_class in SensorDeviceClass: if device_class in non_numeric_device_classes: continue assert hasattr(NumberDeviceClass, device_class.name) assert getattr(NumberDeviceClass, device_class.name).value == device_class.value
b6586d5c34bf7ea5c30fbb1b62c438078ea14f39
56
https://github.com/home-assistant/core.git
91
def test_device_classes_aligned(): non_numeric_device_classes = { SensorDeviceClass.DATE, SensorDeviceClass.DURATION, SensorDeviceClass.TIMESTAMP, } for device_class in SensorDeviceClass: if device_class in non_numeric_device_classes: continue assert hasattr(NumberDeviceClass, device_class.name) assert getattr(NumberDeviceClass, device_class.name).va
12
86
test_device_classes_aligned
194
0
6
56
python/ray/internal/internal_api.py
130,772
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
17
Python
101
internal_api.py
def store_stats_summary(reply): store_summary = "--- Aggregate object store stats across all nodes ---\n" # TODO(ekl) it would be nice if we could provide a full memory usage # breakdown by type (e.g., pinned by worker, primary, etc.) store_summary += ( "Plasma memory usage {} MiB, {} objects, {}% full, {}% " "needed\n".format( int(reply.store_stats.object_store_bytes_used / (1024 * 1024)), reply.store_stats.num_local_objects, round( 100 * reply.store_stats.object_store_bytes_used / reply.store_stats.object_store_bytes_avail, 2, ), round( 100 * reply.store_stats.object_store_bytes_primary_copy / reply.store_stats.object_store_bytes_avail, 2, ), ) ) if reply.store_stats.object_store_bytes_fallback > 0: store_summary += "Plasma filesystem mmap usage: {} MiB\n".format( int(reply.store_stats.object_store_bytes_fallback / (1024 * 1024)) ) if reply.store_stats.spill_time_total_s > 0: store_summary += ( "Spilled {} MiB, {} objects, avg write throughput {} MiB/s\n".format( int(reply.store_stats.spilled_bytes_total / (1024 * 1024)), reply.store_stats.spilled_objects_total, int( reply.store_stats.spilled_bytes_total / (1024 * 1024) / reply.store_stats.spill_time_total_s ), ) ) if reply.store_stats.restore_time_total_s > 0: store_summary += ( "Restored {} MiB, {} objects, avg read throughput {} MiB/s\n".format( int(reply.store_stats.restored_bytes_total / (1024 * 1024)), reply.store_stats.restored_objects_total, int( reply.store_stats.restored_bytes_total / (1024 * 1024) / reply.store_stats.restore_time_total_s ), ) ) if reply.store_stats.consumed_bytes > 0: store_summary += "Objects consumed by Ray tasks: {} MiB.\n".format( int(reply.store_stats.consumed_bytes / (1024 * 1024)) ) if reply.store_stats.object_pulls_queued: store_summary += "Object fetches queued, waiting for available memory." return store_summary
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
272
https://github.com/ray-project/ray.git
800
def store_stats_summary(reply): store_summary = "--- Aggregate object store stats across all nodes ---\n" # TODO(ekl) it would be nice if we could provide a full memory usage # breakdown by type (e.g., pinned by worker, primary, etc.) store_summary += ( "Plasma memory usage {} MiB, {} objects, {}% full, {}% " "needed\n".format( int(reply.store_stats.object_store_bytes_used / (1024 * 1024)), reply.store_stats.num_local_objects, round( 100 * reply.store_stats.object_store_bytes_used / reply.store_stats.object_store_bytes_avail, 2, ), round( 100 * reply.store_stats.object_store_bytes_primary_copy / reply.store_stats.object_store_bytes_avail, 2, ), ) ) if reply.store_stats.object_store_bytes_fallback > 0: store_summary += "Plasma filesystem mmap usage: {} MiB\n".format( int(reply.store_stats.object_store_bytes_fallback / (1024 * 1024)) ) if reply.store_stats.spill_time_total_s > 0: store_summary += ( "Spilled {} MiB, {} objects, avg write throughput {} MiB/s\n".format( int(reply.store_stats.spilled_bytes_total / (1024 * 1024)), reply.store_stats.spilled_objects_total, int( reply.store_stats.spilled_bytes_total / (1024 * 1024) / reply.store_stats.spill_time_total_s ), )
20
438
store_stats_summary
74
0
7
20
mitmproxy/contrib/kaitaistruct/png.py
252,406
update kaitai definitions
mitmproxy
13
Python
48
png.py
def _read(self): self.magic = self._io.read_bytes(8) if not self.magic == b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A": raise kaitaistruct.ValidationNotEqualError(b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A", self.magic, self._io, u"/seq/0") self.ihdr_len = self._io.read_u4be() if not self.ihdr_len == 13: raise kaitaistruct.ValidationNotEqualError(13, self.ihdr_len, self._io, u"/seq/1") self.ihdr_type = self._io.read_bytes(4) if not self.ihdr_type == b"\x49\x48\x44\x52": raise kaitaistruct.ValidationNotEqualError(b"\x49\x48\x44\x52", self.ihdr_type, self._io, u"/seq/2") self.ihdr = Png.IhdrChunk(self._io, self, self._root) self.ihdr_crc = self._io.read_bytes(4) self.chunks = [] i = 0 while True: _ = Png.Chunk(self._io, self, self._root) self.chunks.append(_) if ((_.type == u"IEND") or (self._io.is_eof())) : break i += 1
002f919dda5f01d067c2e786426c68751551d15c
214
https://github.com/mitmproxy/mitmproxy.git
243
def _read(self): self.magic = self._io.read_bytes(8) if not self.magic == b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A": raise kaitaistruct.ValidationNotEqualError(b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A", self.magic, self._io, u"/seq/0") self.ihdr_len = self._io.read_u4be() if not self.ihdr_len == 13: raise kaitaistruct.ValidationNotEqualError(13, self.ihdr_len, self._io, u"/seq/1") self.ihdr_type = self._io.read_bytes(4) if not self.ihdr_type == b"\x49\x48\x44\x52": raise kaitaistruct.ValidationNotEqualError(b"\x49\x48\x44\x52", self.ihdr_type, self._io, u"/seq/2") self.ihdr = Png.IhdrChunk(self._io, self, self._root) self.ihdr_crc = self._io.read_bytes(4) self.chunks = [] i = 0 while True: _ = Png.Chunk(self._io, self, self._root) self.chunks.append(_) if ((_.type == u"IEND") or (self._io.is_eof())) : break i += 1
22
354
_read
38
0
2
6
keras/optimizers/optimizer_v2/adamax_test.py
277,931
resolve line-too-long in optimizer
keras
11
Python
30
adamax_test.py
def testSlotsUniqueEager(self): v1 = tf.Variable(1.0) v2 = tf.Variable(1.0) opt = adamax.Adamax(1.0) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and two unique slot variables for v1 and # v2. self.assertLen({id(v) for v in opt.variables()}, 5)
406774b60ac6b505ae9bf7e8728b00a1523ad4a3
74
https://github.com/keras-team/keras.git
86
def testSlotsUniqueEager(self): v1 = tf.Var
15
108
testSlotsUniqueEager
47
0
2
10
ivy/backends/jax/core/general.py
213,485
renamed dev_str arg to dev for all methods.
ivy
13
Python
38
general.py
def identity(n, dtype='float32', batch_shape=None, dev=None): dtype = _jnp.__dict__[dtype] mat = _jnp.identity(n, dtype=dtype) if batch_shape is None: return_mat = mat else: reshape_dims = [1]*len(batch_shape) + [n, n] tile_dims = list(batch_shape) + [1, 1] return_mat = _jnp.tile(_jnp.reshape(mat, reshape_dims), tile_dims) return to_dev(return_mat, default_device(dev)) meshgrid = lambda *xs, indexing='ij': _jnp.meshgrid(*xs, indexing=indexing)
d743336b1f3654cd0315f380f43eed4116997c1d
102
https://github.com/unifyai/ivy.git
88
def identity(n, dtype='float32', batch_shape=None, dev=None): dtype = _jnp.__dict__[dtype] mat = _jnp.iden
20
189
identity
137
1
5
17
jax/experimental/sparse/bcoo.py
122,207
[sparse] Move broadcasting_vmap to sparse util. PiperOrigin-RevId: 478566197
jax
15
Python
97
bcoo.py
def _bcoo_multiply_dense(data, indices, v, *, spinfo): # TODO(jakevdp): the logic here is similar to bcoo_extract... can we reuse that? shape = spinfo.shape if v.ndim == 0: return lax.mul(data, v) if shape == v.shape: # Note: due to distributive property, no deduplication necessary! return lax.mul(data, bcoo_extract(indices, v)) if lax.broadcast_shapes(v.shape, shape) != shape: raise NotImplementedError( "multiplication between sparse and dense is only implemented for cases " "where the output shape matches the sparse matrix shape. Got " f"shape={shape}, v.shape={v.shape}") v = lax.expand_dims(v, range(len(shape) - v.ndim)) props = _validate_bcoo(data, indices, shape) def _mul(data, indices, v): assert indices.shape[1] == v.ndim - props.n_dense ind = tuple(indices[:, i] for i in range(indices.shape[1])) ind = tuple(i if s != 1 else 0 for i, s in zip(ind, v.shape)) return data * v[ind] for _ in range(props.n_batch): _mul = _broadcasting_vmap(_mul) return _mul(data, indices, v) @tree_util.register_pytree_node_class
58a2abe1b5496acb177a5fd10394e001c381bff9
@tree_util.register_pytree_node_class
135
https://github.com/google/jax.git
189
def _bcoo_multiply_dense(data, indices, v, *, spinfo): # TODO(jakevdp): the logic here is similar to bcoo_extract... can we reuse that? shape = spinfo.shape if v.ndim == 0: return lax.mul(data, v) if shape == v.shape: # Note: due to distributive property, no deduplication necessary! return lax.mul(data, bcoo_extract(indices, v)) if lax.broadcast_shapes(v.shape, shape) != shape: raise NotImplementedError( "multiplication between sparse and dense is only implemented for cases " "where the output shape matches the sparse matrix shape. Got " f"shape={shape}, v.shape={v.shape}") v = lax.expand_dims(v, range(len(shape) - v.ndim)) props = _validate_bcoo(data, indices, shape) def _mul(data, indices, v): assert indices.shape[1] == v.ndim - props.n_dense ind = tuple(indices[:, i] for i in range(indices.shape[1])) ind = tuple(i if s != 1 else 0 for i, s in zip(ind, v.shape)) return data * v[ind] for _ in range(props.n_batch): _mul = _broadcasting_vmap(_mul) return _m
29
340
_bcoo_multiply_dense
43
0
1
19
rllib/evaluation/tests/test_env_runner_v2.py
125,409
[RLlib] Make sure we step() after adding init_obs. (#26827)
ray
15
Python
36
test_env_runner_v2.py
def test_sample_batch_rollout_single_agent_env(self): config = ( PPOConfig() .framework("torch") .training( # Specifically ask for a batch of 200 samples. train_batch_size=200, ) .rollouts( num_envs_per_worker=1, horizon=4, num_rollout_workers=0, # Enable EnvRunnerV2. enable_connectors=True, ) ) algo = PPO(config, env=DebugCounterEnv) rollout_worker = algo.workers.local_worker() sample_batch = rollout_worker.sample() self.assertEqual(sample_batch.env_steps(), 200) self.assertEqual(sample_batch.agent_steps(), 200)
0bc560bd541c320b0699464e8d23134c07899c18
95
https://github.com/ray-project/ray.git
262
def test_sample_batch_rollout_single_agent_env(self): config = ( PPOConfig() .framework("torch") .training( # Specifically ask for a batch of 200 samples. train_batch_size=200, ) .rollouts( num_envs_per_worker=1, horizon=4,
24
152
test_sample_batch_rollout_single_agent_env
160
0
6
51
freqtrade/wallets.py
148,964
Add dry-run position wallet calculation
freqtrade
15
Python
94
wallets.py
def _update_dry(self) -> None: # Recreate _wallets to reset closed trade balances _wallets = {} _positions = {} open_trades = Trade.get_trades_proxy(is_open=True) # If not backtesting... # TODO: potentially remove the ._log workaround to determine backtest mode. if self._log: tot_profit = Trade.get_total_closed_profit() else: tot_profit = LocalTrade.total_profit tot_in_trades = sum(trade.stake_amount for trade in open_trades) used_stake = 0.0 if self._config.get('trading_mode', 'spot') != TradingMode.FUTURES: current_stake = self.start_cap + tot_profit - tot_in_trades total_stake = current_stake for trade in open_trades: curr = self._exchange.get_pair_base_currency(trade.pair) _wallets[curr] = Wallet( curr, trade.amount, 0, trade.amount ) else: tot_in_trades = 0 for position in open_trades: # size = self._exchange._contracts_to_amount(position.pair, position['contracts']) size = position.amount # TODO-lev: stake_amount in real trades does not include the leverage ... collateral = position.stake_amount / position.leverage leverage = position.leverage tot_in_trades -= collateral _positions[position.pair] = PositionWallet( position.pair, position=size, leverage=leverage, collateral=collateral, side=position.trade_direction ) current_stake = self.start_cap + tot_profit used_stake = tot_in_trades total_stake = current_stake - tot_in_trades _wallets[self._config['stake_currency']] = Wallet( currency=self._config['stake_currency'], free=current_stake, used=used_stake, total=total_stake ) self._wallets = _wallets self._positions = _positions
13e74c5693e68ddb6b7afa4559ac23d2ec8ee26c
247
https://github.com/freqtrade/freqtrade.git
750
def _update_dry(self) -> None: # Recreate _wallets to reset closed trade balances _wallets = {} _positions = {} open_trades = Trade.get_trades_proxy(is_open=True) # If not backtesting... # TODO: potentially remove the ._log workaround to determine backtest mode. if self._log: tot_profit = Trade.get_total_closed_profit() else: tot_profit = LocalTrade.total_profit tot_in_trades = sum(trade.stake_amount for trade in open_trades) used_stake = 0.0 if self._config.get('trading_mode', 'spot') != TradingMode.FUTURES: current_stake = self.start_cap + tot_profit - tot_in_trades total_stake = current_stake for trade in open_trades: curr = self._exchange.get_pair_base_currency(trade.pair) _wallets[curr] = Wallet( curr, trade.amount, 0, trade.amount ) else: tot_in_trades = 0 for position in open_trades: # size = self._exchange._contracts_to_amount(position.pair, position['contracts']) size = position.amount # TODO-lev: stake_amount in real trades does not include the leverage ... collateral = position.stake_amount / position.leverage leverage = position.leverage tot_in_trades -= collateral _positions[position.pair] = PositionWallet( position.pair, position=size, leverage=leverage, collateral=collateral, side=position.trade_direction ) current_stake = self.start_cap + tot_profit used_stak
42
389
_update_dry
66
0
5
19
jina/orchestrate/deployments/__init__.py
13,205
feat: distributed replicas across different hosts (#5217)
jina
12
Python
54
__init__.py
def update_pod_args(self): if self.args.runtime_cls == 'GatewayRuntime': _set_gateway_uses(self.args) if isinstance(self.args, Dict): # This is used when a Deployment is created in a remote context, where pods & their connections are already given. self.pod_args = self.args else: self.pod_args = self._parse_args(self.args) if self.external: for pod, port, host, scheme, tls in zip( self.pod_args['pods'][0], self.ext_repl_ports, self.ext_repl_hosts, self.ext_repl_schemes, self.ext_repl_tls, ): pod.port = port pod.host = host pod.scheme = scheme pod.tls = tls
82960f105149c478e4fc88e8b4fef8bbe2454429
118
https://github.com/jina-ai/jina.git
302
def update_pod_args(self): if self.args.runtime_cls == 'GatewayRuntime': _set_gateway_uses(self.args) if isinstance(self.args, Dict): # This is used when a Deployment is created in a remote context, where pods & their connections are already given. self.pod_args = self.args else: self.pod_args = self._parse
20
184
update_pod_args
25
0
3
8
python/ray/tune/trial.py
132,833
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
12
Python
19
trial.py
def get_json_state(self) -> str: if not self._state_json or not self._state_valid: json_state = json.dumps( self.__getstate__(), indent=2, cls=TuneFunctionEncoder ) self._state_json = json_state self._state_valid = True return self._state_json
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
52
https://github.com/ray-project/ray.git
97
def get_json_state(self) -> str:
12
82
get_json_state
28
0
1
15
mkdocs/tests/plugin_tests.py
224,347
Format code with `black -l100 --skip-string-normalization`
mkdocs
14
Python
24
plugin_tests.py
def test_plugin_config_multivalue_dict(self, mock_class): cfg = { 'plugins': [ { 'sample': { 'foo': 'foo value', 'bar': 42, }, 'extra_key': 'baz', } ], } option = config.config_options.Plugins() with self.assertRaises(config.base.ValidationError): option.validate(cfg['plugins'])
dca7cbb43fcd6ea7c677c98ba585395b070d387b
65
https://github.com/mkdocs/mkdocs.git
221
def test_plugin_config_multivalue_dict(self, mock_class): cfg = { 'plugins': [ { 'sample': { 'foo': 'foo value', 'bar': 42, }, 'extra_key':
12
116
test_plugin_config_multivalue_dict
11
0
2
3
homeassistant/components/justnimbus/entity.py
297,451
Fix Just Nimbus error codes (#83856)
core
9
Python
11
entity.py
def available(self) -> bool: return super().available and self.coordinator.data is not None
cc5d3193698c107d6b56f6001ffb7707fb77bdef
23
https://github.com/home-assistant/core.git
25
def available(self) -> bool: retu
6
39
available
23
1
1
2
pandas/tests/util/test_assert_almost_equal.py
170,704
DEPR: Remove check_less_precise in asserters (#49461)
pandas
8
Python
23
test_assert_almost_equal.py
def test_assert_almost_equal_numbers_atol(a, b): # Equivalent to the deprecated check_less_precise=True, enforced in 2.0 _assert_almost_equal_both(a, b, rtol=0.5e-3, atol=0.5e-3) @pytest.mark.parametrize("a,b", [(1.1, 1.11), (0.1, 0.101), (0.000011, 0.001012)])
490c5d049890d8ea71ec5e2dc4ffa6196c10cc63
@pytest.mark.parametrize("a,b", [(1.1, 1.11), (0.1, 0.101), (0.000011, 0.001012)])
29
https://github.com/pandas-dev/pandas.git
27
def test_assert_almost_equal_numbers_atol(a, b): # Equivalent to the deprecated check_less_precise=True, enfor
9
72
test_assert_almost_equal_numbers_atol
27
0
1
6
pandas/tests/io/parser/test_converters.py
165,141
BUG: read_csv not respecting converter in all cases for index col (#46053)
pandas
13
Python
24
test_converters.py
def test_converter_identity_object(all_parsers): # GH#40589 parser = all_parsers data = "A,B\n1,2\n3,4" rs = parser.read_csv(StringIO(data), converters={"A": lambda x: x}) xp = DataFrame({"A": ["1", "3"], "B": [2, 4]}) tm.assert_frame_equal(rs, xp)
7ee8ab07e538de55bd02f1ed5c2d211c7e342ddc
63
https://github.com/pandas-dev/pandas.git
44
def test_converter_identity_object(all_parsers): # GH#40589 parser = all_parsers data = "A,B\n1,2\n3,4" rs = parser.re
13
111
test_converter_identity_object
57
0
1
18
keras/layers/preprocessing/discretization_test.py
272,929
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
10
Python
38
discretization_test.py
def test_one_hot_output(self): input_data = np.array([-1.5, 1.0, 3.4, 3.5]) expected_output = [ [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], ] expected_output_shape = [None, 4] inputs = keras.Input(shape=(1,)) layer = discretization.Discretization( bin_boundaries=[0.0, 1.0, 2.0], output_mode="one_hot" ) outputs = layer(inputs) self.assertAllEqual(expected_output_shape, outputs.shape.as_list()) model = keras.Model(inputs, outputs) output_data = model(input_data) self.assertAllEqual(expected_output, output_data)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
196
https://github.com/keras-team/keras.git
195
def test_one_hot_output(self): input_data = np.array([-1.5, 1.0, 3.4, 3.5]) expected_output = [ [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], ] expected_output_shape = [None, 4] inputs = keras.Input(shape=(1,)) layer = discretization.Discretization( bin_boundaries=[0.0, 1.0, 2.0], output_mode="one_hot" ) outputs = layer(inputs) self.assertAllEqual(expected_output_shape, outputs.shape.as_list()) model = keras.Model(inputs, outputs) output_data = model(input_data) self.assertAllEqual(expect
22
213
test_one_hot_output
43
0
1
19
python/ray/tune/tests/test_trial_scheduler.py
132,716
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
11
Python
30
test_trial_scheduler.py
def testMedianStoppingSoftStop(self): rule = MedianStoppingRule( metric="episode_reward_mean", mode="max", grace_period=0, min_samples_required=1, hard_stop=False, ) t1, t2 = self.basicSetup(rule) runner = mock_trial_runner() rule.on_trial_complete(runner, t1, result(10, 1000)) rule.on_trial_complete(runner, t2, result(10, 1000)) t3 = Trial("PPO") self.assertEqual( rule.on_trial_result(runner, t3, result(1, 260)), TrialScheduler.CONTINUE ) self.assertEqual( rule.on_trial_result(runner, t3, result(2, 260)), TrialScheduler.PAUSE )
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
129
https://github.com/ray-project/ray.git
196
def testMedianStoppingSoftStop(self): rule = MedianStoppingRule( metric="episode_reward_mean", mode="max", grace_period=0, min_samples_required=1, hard_stop=False, ) t1, t2 = self.basicSetup(rule) runner = mock_trial_runner() rule.on_trial_complete(runner, t1, result(10, 1000)) rule.on_trial_complete(runner, t2, result(10, 1000)) t3 = Trial("PPO") self.assertEqual( rule.on_trial_result(runner, t3, result(1, 260)), TrialScheduler.CONTINUE ) self.assertEqual( rule.on_trial_result(runner, t3, result(2, 260)), TrialScheduler.
23
194
testMedianStoppingSoftStop
17
0
1
7
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
63,458
upd; format
transferlearning
11
Python
13
pyparsing.py
def copy(self): ret = ParseResults(self.__toklist) ret.__tokdict = dict(self.__tokdict.items()) ret.__parent = self.__parent ret.__accumNames.update(self.__accumNames) ret.__name = self.__name return ret
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
54
https://github.com/jindongwang/transferlearning.git
66
def copy(self): ret = ParseResults(self.__toklist) ret.__tokdict = dict(self.__tokdict.items()) ret.__parent = self.__parent ret.
12
90
copy
27
0
3
5
d2l/mxnet.py
157,706
sync lib
d2l-zh
11
Python
24
mxnet.py
def read_csv_labels(fname): with open(fname, 'r') as f: # 跳过文件头行(列名) lines = f.readlines()[1:] tokens = [l.rstrip().split(',') for l in lines] return dict(((name, label) for name, label in tokens))
1c2e25a557db446b5691c18e595e5664cc254730
62
https://github.com/d2l-ai/d2l-zh.git
53
def read_csv_labels(fname): with open(fname, 'r') as f:
13
106
read_csv_labels
21
0
1
9
tests/unit/config/test_configexc.py
320,824
Display close matches for invalid settings
qutebrowser
9
Python
19
test_configexc.py
def test_no_option_error(deleted, renamed, all_names, expected): e = configexc.NoOptionError( 'opt', deleted=deleted, renamed=renamed, all_names=all_names, ) assert e.option == 'opt' assert str(e) == expected
c9380605a1240748769c012403520323b4d2c3be
45
https://github.com/qutebrowser/qutebrowser.git
60
def test_no_option_error(deleted, renamed, all_names, expected): e = configexc.NoOptionError( 'opt', deleted=deleted, renamed=renamed, all_names=all_names, ) assert e.option == 'opt' assert str(e) == expected
10
68
test_no_option_error
38
0
1
20
tests/pytests/functional/utils/win_dacl/test_file.py
216,443
Add changelong
salt
11
Python
26
test_file.py
def test_has_permission_missing(test_file): result = win_dacl.set_permissions( obj_name=str(test_file), principal="Backup Operators", permissions="read_execute", access_mode="grant", obj_type="file", reset_perms=False, protected=None, ) assert result is True # Test has_permission not exact result = win_dacl.has_permission( obj_name=str(test_file), principal="Backup Operators", permission="write", access_mode="grant", obj_type="file", exact=False, ) assert result is False
5550d1823e9cb571740ae9e57b25424cfe6a919e
85
https://github.com/saltstack/salt.git
149
def test_has_permission_missing(test_file):
16
137
test_has_permission_missing
28
1
1
5
modin/pandas/test/test_series.py
153,924
FIX-#4411: Fix binary_op between datetime64 Series and pandas timedelta (#4592) Signed-off-by: Karthik Velayutham <vkarthik@ponder.io>
modin
11
Python
22
test_series.py
def test_add_series_to_timedeltaindex(): # Make a pandas.core.indexes.timedeltas.TimedeltaIndex deltas = pd.to_timedelta([1], unit="h") test_series = create_test_series(np.datetime64("2000-12-12")) eval_general(*test_series, lambda s: s + deltas) eval_general(*test_series, lambda s: s - deltas) @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
af7f4ed8ff0033a9a4e7d35a948f2057033bd826
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
53
https://github.com/modin-project/modin.git
41
def test_add_series_to_timedeltaindex(): # Make a pandas.core.indexes.timedeltas.TimedeltaIndex deltas = pd.to_timedelta([1], unit="h") test_series = create_test_series(np.datetim
17
114
test_add_series_to_timedeltaindex
37
0
5
15
pandas/core/indexing.py
163,268
TYP: Ignore numpy related issues (#45244)
pandas
16
Python
27
indexing.py
def _ensure_iterable_column_indexer(self, column_indexer): ilocs: Sequence[int] if is_integer(column_indexer): ilocs = [column_indexer] elif isinstance(column_indexer, slice): ilocs = np.arange(len(self.obj.columns))[ # type: ignore[assignment] column_indexer ] elif isinstance(column_indexer, np.ndarray) and is_bool_dtype( column_indexer.dtype ): ilocs = np.arange(len(column_indexer))[column_indexer] else: ilocs = column_indexer return ilocs
d603d43df2057ecdf74010d9dadc735e37f8f7b5
89
https://github.com/pandas-dev/pandas.git
175
def _ensure_iterable_column_indexer(self, column_indexer): ilocs: Sequence[int] if is_integer(column_indexer): ilocs = [column_indexer] elif isinstance(column_indexer, slice): ilocs = np.arange(len(self.obj.columns))[ # type: ignore[assignment] column_
17
144
_ensure_iterable_column_indexer
9
0
1
3
test/nodes/test_prompt_node.py
258,373
feat: Expand LLM support with PromptModel, PromptNode, and PromptTemplate (#3667) Co-authored-by: ZanSara <sarazanzo94@gmail.com>
haystack
11
Python
9
test_prompt_node.py
def test_run_invalid_template(prompt_node): with pytest.raises(ValueError, match="invalid-task not supported"): prompt_node.prompt("invalid-task", {})
9ebf164cfdfb320503b7161493420c1b0ec577a3
26
https://github.com/deepset-ai/haystack.git
18
def test_run_invalid_template(prompt_node): with pytest.raises(
7
47
test_run_invalid_template
18
0
1
7
tests/unit/bokeh/test_objects.py
212,106
Redesign serialization protocol (#11960) * Redesign serialization in bokeh * Redesign deserialization in bokehjs * Resolve type issues and test failures * Make 'bytes' serialization work in bokeh * Partially update bokeh's serialization tests * Resolve issues with cyclic references * Don't limit StaticGraphProvider to tuples * Make FixedTicker.ticks' type more flexible * Use np.array instead of np.ndarray * Remove references to BokehJSONEncoder * Resolve sphinx warnings related to JSON * Implement hybrid serialization for map/dict * Use === or !== with unset symbol * Finalize deserialization of refs * Remove 'old' attribute from ModelChangedEvent * Make ButtonClick.__init__ less restrictive * Use Map<number, ...> in StaticLayoutProvider.graph_layout * Start using Map<K, V> for non-string keys * Fix plotting/file/line_on_off example * Don't denormalize specs in bokehjs * Hack around issues with resolving figure model * Remove special cases from defaults' tests * Temporarily update unit/bokeh/test_objects * Promote streaming/patching events and remove hints * Allow to stream/patch any property in bokehjs * Drop unneeded Property.serializable_value() * Set callback_invoker on hinted events * Simplify unit/bokeh/test_objects.py * Always preserve ndarrays even for dtype="object" * Refine and normalize naming conventions * Remove unused functions * Move Model.to_json() to sphinxext.bokeh_model * Include references in serialized values * Actually encode data when streaming/patching * Robustify differential serialization * Allow bokehjs to send binary buffers * Add dtype=object code path to ColorSpec * Simplify definitions of data specs * Remove meaningless code comments * Introduce Bytes and replace Base64String * Add support for serialization of slices * Remove obsolete comment from property/dataspec.py * Add a comment regarding ndarray.tobytes() * Try serializing pandas' types last * Standardize error reporting * Resturucture bokehjs serialization code * Redesign default model resolution * Refactor 'kind' in document events * Don't depend on Document in Deserializer * Make Deserializer.encode() re-entrant * Move *Buffer to serialization/buffer * Finalize differential serialization * Serialize vectorized values as structures * Rename Event.{decode_json->from_serializable} * Don't use has_ref() in Model.to_serializable() * Handle circular object references in bokehjs * Reorganize serialization unit tests * Redesign model registry and qualified names * Remove the need for StaticSerializer * Make 'attributes' optional in type reps * Allow to serialize typed arrays as binary * Finalize handling of binary buffers * Use memoryview to further defer encoding * Test dict serialization and ordering * Downcast ndarrays {u}int{64->32} if possible * Add preliminary release/migration notes * Robustify encoding of objects and object refs * Remove support for serialization of relativedelta * Import pandas only if really necessary * Statically type bokeh.core.serialization * Add preliminary serialization's documentation * Add Deserializer.deserialize() for symmetric APIs * Handle streaming/patching/data events in io.notebook * Update handling of buffers in io.notebook * Properly serialize MessageSent event * Add a regression test for issue #11694 * Preserve order of inherited properties * Add support for serialization of symbols * Update defaults' tests to use type="object" * Move DocJson.version to the first entry * Add a preliminary regression test for #11930 * Fix integration/glyphs/rect_log_axis.py * Fix value detection in dataspecs involving String * Remove an unnecessary type assertion
bokeh
10
Python
18
test_objects.py
def test_get_class(self) -> None: from bokeh.model import get_class self.mkclass() tclass = get_class('test_objects.TestModelCls.mkclass.Test_Class') assert hasattr(tclass, 'foo') with pytest.raises(KeyError): get_class('Imaginary_Class')
fca16442ae90afcd2ac61f4e554e538776730831
43
https://github.com/bokeh/bokeh.git
63
def test_get_class(self) -> None: from bokeh.model import get_class self.mkclass() tclass = get_class('test_objects.TestModelCls.mkclass.Test_Class') assert hasattr(tclass, 'foo') with pytest.raises
11
78
test_get_class
21
0
3
5
src/transformers/models/xglm/modeling_tf_xglm.py
33,104
Add TF implementation of `XGLMModel` (#16543) * Add TFXGLM models * Add todo: self.supports_xla_generation = False Co-authored-by: Daniel Stancl <stancld@Daniels-MacBook-Pro.local> Co-authored-by: Daniel Stancl <stancld@daniels-mbp.home> Co-authored-by: Joao Gante <joaofranciscocardosogante@gmail.com> Co-authored-by: Daniel <daniel.stancl@rossum.ai> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
transformers
14
Python
17
modeling_tf_xglm.py
def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(tf.gather(past_state, beam_idx, axis=0) for past_state in layer_past),) return reordered_past
c72d7d91bf4899760725793421eff9da640c8527
42
https://github.com/huggingface/transformers.git
52
def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(tf.gather(past_sta
10
62
_reorder_cache
23
0
2
11
nni/compression/pytorch/quantization/observer_quantizer.py
113,641
[Compression] remove pruning v1 & refactor directory (#5228)
nni
11
Python
21
observer_quantizer.py
def quantize_input(self, inputs, wrapper, **kwargs): if self.compressed: module = wrapper.module inputs = self._quantize(inputs, module.input_scale, module.input_zero_point, module.input_qmin, module.input_qmax) else: self.record(wrapper, 'input', inputs) return inputs
d68c786ff81bad19c04619d6a999ff34aaa724e7
60
https://github.com/microsoft/nni.git
224
def quantize_input(self, inputs, wrapper, **kwargs): if self.compressed: module = wrapper.module
13
89
quantize_input
118
0
1
3
django/db/backends/postgresql/base.py
205,126
Refs #33476 -- Reformatted code with Black.
django
10
Python
83
base.py
def psycopg2_version(): version = psycopg2.__version__.split(" ", 1)[0] return get_version_tuple(version) PSYCOPG2_VERSION = psycopg2_version() if PSYCOPG2_VERSION < (2, 8, 4): raise ImproperlyConfigured( "psycopg2 version 2.8.4 or newer is required; you have %s" % psycopg2.__version__ ) # Some of these import psycopg2, so import them after checking if it's installed. from .client import DatabaseClient # NOQA from .creation import DatabaseCreation # NOQA from .features import DatabaseFeatures # NOQA from .introspection import DatabaseIntrospection # NOQA from .operations import DatabaseOperations # NOQA from .schema import DatabaseSchemaEditor # NOQA psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString) psycopg2.extras.register_uuid() # Register support for inet[] manually so we don't have to handle the Inet() # object on load all the time. INETARRAY_OID = 1041 INETARRAY = psycopg2.extensions.new_array_type( (INETARRAY_OID,), "INETARRAY", psycopg2.extensions.UNICODE, ) psycopg2.extensions.register_type(INETARRAY)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
24
https://github.com/django/django.git
141
def psycopg2_version(): version = psycopg2.__version__.split(" ", 1)[0] return get_version_tuple(version) PSYCOPG2_VERSION = psycopg2_version() if PSYCOPG2_VERSION < (2, 8, 4): raise ImproperlyConfigured( "psycopg2 version 2.8.4 or newer is required; you have %s" % psycopg2.__version__ ) # Some of these import psycopg2, so import them after checking if it's installed. from .client import DatabaseClient # NOQA from .creation import DatabaseCreation # NOQA from .features import DatabaseFeatures # NOQA from .introspection import DatabaseIntrospection # NOQA from .operations import DatabaseOperations # NOQA from .schema import DatabaseSchemaEditor # NOQA psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString) psycopg2.extras.register_uuid() # Register sup
31
233
psycopg2_version
23
0
3
6
netbox/utilities/forms/fields.py
264,225
Fixes #8317: Fix CSV import of multi-select custom field values
netbox
11
Python
20
fields.py
def to_python(self, value): if not value: return [] if not isinstance(value, str): raise forms.ValidationError(f"Invalid value for a multiple choice field: {value}") return value.split(',')
7421e5f7d7e579ed1a0acf840c39ae61fd851504
38
https://github.com/netbox-community/netbox.git
65
def to_python(self, value): if not value: return [] if not isinstance(value, str): raise forms.Valid
8
67
to_python
64
0
3
8
keras/distribute/dataset_creator_model_fit_test.py
278,003
resolve line-too-long in distribute
keras
11
Python
44
dataset_creator_model_fit_test.py
def testModelPredict(self, strategy): _, predictions = self._model_predict(strategy, steps=3) # Check the first (0th index), fourth (3rd index) and the last # predictions because the first, fourth and the last input are the same # in `model.predict` so there predictions should match. self.assertTrue( all(predictions[0] == predictions[i] for i in [0, 3, 5]) ) self.assertFalse( all(predictions[0] == predictions[i] for i in [0, 1, 2, 4]) )
b1105dca17670dcac229271e63d5073fe445b84c
77
https://github.com/keras-team/keras.git
141
def testModelPredict(self, strategy): _, predictions = self._model_predict(strategy, steps=3) # Check the first (0th index), fourth (3rd index) and the l
11
113
testModelPredict
66
0
7
21
youtube_dl/extractor/neteasemusic.py
106,522
[netease] Get netease music download url through player api (#31235) * remove unplayable song from test * compatible with python 2 * using standard User_Agent, fix imports * use hash instead of long description * fix lint * fix hash
youtube-dl
20
Python
54
neteasemusic.py
def extract_formats(self, info): formats = [] song_id = info['id'] for song_format in self._FORMATS: details = info.get(song_format) if not details: continue bitrate = int_or_none(details.get('bitrate')) or 999000 data = self._call_player_api(song_id, bitrate) for song in try_get(data, lambda x: x['data'], list) or []: song_url = try_get(song, lambda x: x['url']) if self._is_valid_url(song_url, info['id'], 'song'): formats.append({ 'url': song_url, 'ext': details.get('extension'), 'abr': float_or_none(song.get('br'), scale=1000), 'format_id': song_format, 'filesize': int_or_none(song.get('size')), 'asr': int_or_none(details.get('sr')), }) return formats
c91cbf60729af93c4677864aa6c8b74b576146ca
176
https://github.com/ytdl-org/youtube-dl.git
369
def extract_formats(self, info): formats = [] song_id = info['id'] for song_format in self._FORMATS: details = info.get(song_format) if not details: continue bitrate = int_or_none(details.get('bitrate')) or 999000 data = self._call_player_api(song_id, bitrate) for song in try_get(data, lambda x: x['data'], list) or []: song_url = try_get(song, lambda x: x['url']) if self._is_valid_url(song_url, info['id'], 'song'): formats.append({ 'url': song_url, 'ext': details.get('extension'), 'abr': float_or_none(song.get('br'), scale=1000),
22
296
extract_formats
280
0
21
89
test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py
266,845
Extend validate-modules to also validate plugins (#71734) * Let validate-modules also validate plugins. * Support 'option' in 'cli'. * Use DOCUMENTABLE_PLUGINS instead of UNDOCUMENTED_PLUGIN_TYPES. * Support 'keyword', clean up error codes. * Call settings.process_errors only once; remove __version__. * Add changelog fragment.
ansible
19
Python
167
main.py
def _check_for_new_args(self, doc): if not self.base_branch or self._is_new_module(): return with CaptureStd(): try: existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring( self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name, is_module=self.plugin_type == 'module') existing_options = existing_doc.get('options', {}) or {} except AssertionError: fragment = doc['extends_documentation_fragment'] self.reporter.warning( path=self.object_path, code='missing-existing-doc-fragment', msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment ) return except Exception as e: self.reporter.warning_trace( path=self.object_path, tracebk=e ) self.reporter.warning( path=self.object_path, code='unknown-doc-fragment', msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated') ) return try: mod_collection_name = existing_doc.get('version_added_collection') mod_version_added = self._create_strict_version( str(existing_doc.get('version_added', '0.0')), collection_name=mod_collection_name) except ValueError: mod_collection_name = self.collection_name mod_version_added = self._create_strict_version('0.0') options = doc.get('options', {}) or {} should_be = '.'.join(ansible_version.split('.')[:2]) strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin') for option, details in options.items(): try: names = [option] + details.get('aliases', []) except (TypeError, AttributeError): # Reporting of this syntax error will be handled by schema validation. continue if any(name in existing_options for name in names): # The option already existed. Make sure version_added didn't change. for name in names: existing_collection_name = existing_options.get(name, {}).get('version_added_collection') existing_version = existing_options.get(name, {}).get('version_added') if existing_version: break current_collection_name = details.get('version_added_collection') current_version = details.get('version_added') if current_collection_name != existing_collection_name: self.reporter.error( path=self.object_path, code='option-incorrect-version-added-collection', msg=('version_added for existing option (%s) should ' 'belong to collection %r. Currently belongs to %r' % (option, current_collection_name, existing_collection_name)) ) elif str(current_version) != str(existing_version): self.reporter.error( path=self.object_path, code='option-incorrect-version-added', msg=('version_added for existing option (%s) should ' 'be %r. Currently %r' % (option, existing_version, current_version)) ) continue try: collection_name = details.get('version_added_collection') version_added = self._create_strict_version( str(details.get('version_added', '0.0')), collection_name=collection_name) except ValueError as e: # already reported during schema validation continue if collection_name != self.collection_name: continue if (strict_ansible_version != mod_version_added and (version_added < strict_ansible_version or strict_ansible_version < version_added)): self.reporter.error( path=self.object_path, code='option-incorrect-version-added', msg=('version_added for new option (%s) should ' 'be %r. Currently %r' % (option, should_be, version_added)) ) return existing_doc
0990c4ca7cb1b239a76e8cdb78af01ca9601731e
522
https://github.com/ansible/ansible.git
1,735
def _check_for_new_args(self, doc): if not self.base_branch or self._is_new_module(): return with CaptureStd(): try: existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring( self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name, is_module=self.plugin_type == 'module') existing_options = existing_doc.get('options', {}) or {} except AssertionError: fragment = doc['extends_documentation_fragment'] self.reporter.warning( path=self.object_path, code='missing-existing-doc-fragment', msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment ) return except Exception as e: self.reporter.warning_trace( path=self.object_path, tracebk=e ) self.reporter.warning( path=self.object_path, code='unknown-doc-fragment', msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated') ) return try: mod_collection_name = existing_doc.get('version_added_collection') mod_version_added = self._create_strict_version( str(existing_doc.get('version_added', '0.0')), collection_name=mod_collection_name) except ValueError: mod_collection_name = self.collection_name mod_version_added = self._create_strict_version('0.0') options = doc.get('options', {}) or {} should_be = '.'.join(ansible_version.split('.')[:2]) strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin') for option, details in options.items(): try: names = [option] + details.get('aliases', []) except (TypeError, AttributeError): # Reporting of this syntax error will be handled by schema validation. continue if any(name in existing_options for name in names): # The option already existed. Make sure versio
56
868
_check_for_new_args
33
0
1
20
release/ray_release/tests/test_cluster_manager.py
145,008
[ci/release] Refactor release test e2e into package (#22351) Adds a unit-tested and restructured ray_release package for running release tests. Relevant changes in behavior: Per default, Buildkite will wait for the wheels of the current commit to be available. Alternatively, users can a) specify a different commit hash, b) a wheels URL (which we will also wait for to be available) or c) specify a branch (or user/branch combination), in which case the latest available wheels will be used (e.g. if master is passed, behavior matches old default behavior). The main subpackages are: Cluster manager: Creates cluster envs/computes, starts cluster, terminates cluster Command runner: Runs commands, e.g. as client command or sdk command File manager: Uploads/downloads files to/from session Reporter: Reports results (e.g. to database) Much of the code base is unit tested, but there are probably some pieces missing. Example build (waited for wheels to be built): https://buildkite.com/ray-project/kf-dev/builds/51#_ Wheel build: https://buildkite.com/ray-project/ray-builders-branch/builds/6023
ray
15
Python
30
test_cluster_manager.py
def testFindCreateClusterEnvExisting(self): # Find existing env and succeed self.cluster_manager.set_cluster_env(self.cluster_env) self.assertTrue(self.cluster_manager.cluster_env_name) self.assertFalse(self.cluster_manager.cluster_env_id) self.sdk.returns["search_cluster_environments"] = APIDict( metadata=APIDict( next_paging_token=None, ), results=[ APIDict( name="no_match", id="wrong", ), APIDict(name=self.cluster_manager.cluster_env_name, id="correct"), ], ) self.cluster_manager.create_cluster_env() self.assertEqual(self.cluster_manager.cluster_env_id, "correct") self.assertEqual(self.sdk.call_counter["search_cluster_environments"], 1) self.assertEqual(len(self.sdk.call_counter), 1)
331b71ea8dfee20bd71f6529fa372fd9d91c9ff4
138
https://github.com/ray-project/ray.git
244
def testFindCreateClusterEnvExisting(self): # Find existing env and succeed self.cluster_manager.set_cluster_env(self.cluster_env) self.assertTrue(self.cluster_manager.cluster_env_name) self.assertFalse(self.cluster_manager.cluster_env_id) self.sdk.returns["search_cluster_environments"] = APIDict( metadata=APIDict( next_paging_token=None, ), results=[ APIDict( name="no_match", id="wrong", ), APIDict(name=self.cluster_manager.cluster_env_name, id="correct"),
21
222
testFindCreateClusterEnvExisting
117
0
3
25
sympy/algebras/tests/test_quaternion.py
200,602
minor edit
sympy
13
Python
38
test_quaternion.py
def test_to_euler(): q = Quaternion(w, x, y, z) norm_of_q = Quaternion(q.norm()) # Extrinsic rotations for seq_tuple in permutations('xyz'): # asymmetric sequences seq = ''.join(seq_tuple) euler_from_q = q.to_euler(seq) q_back = Quaternion.from_euler(euler_from_q, seq) q_diff = simplify(q * q_back.conjugate()) assert q_diff == norm_of_q # symmetric sequences seq = ''.join([seq_tuple[0], seq_tuple[1], seq_tuple[0]]) euler_from_q = q.to_euler(seq) q_back = Quaternion.from_euler(euler_from_q, seq) q_diff = simplify(q * q_back.conjugate()) assert q_diff == norm_of_q # Intrinsic rotations for seq_tuple in permutations('XYZ'): # asymmetric sequences seq = ''.join(seq_tuple) euler_from_q = q.to_euler(seq) q_back = Quaternion.from_euler(euler_from_q, seq) q_diff = simplify(q * q_back.conjugate()) assert q_diff == norm_of_q # symmetric sequences seq = ''.join([seq_tuple[0], seq_tuple[1], seq_tuple[0]]) euler_from_q = q.to_euler(seq) q_back = Quaternion.from_euler(euler_from_q, seq) q_diff = simplify(q * q_back.conjugate()) assert q_diff == norm_of_q
6fe28f68866ac6fb1aea564dbde99190cec9c1ff
240
https://github.com/sympy/sympy.git
302
def test_to_euler(): q = Quaternion(w, x, y, z) norm_of_q = Quaternion(q.norm()) # Extrinsic rotations for seq_tuple in permutations('xyz'): # asymmetric sequences seq = ''.join(seq_tuple) euler_from_q = q.to_euler(seq) q_back = Quaternion.from_euler(euler_from_q, seq) q_diff = simplify(q * q_back.conjugate()) assert q_diff == norm_of_q # symmetric sequences seq = ''.join([seq_tup
20
389
test_to_euler
34
0
1
12
tests/handlers/test_auth.py
247,489
Add some type hints to the tests.handlers module. (#12207)
synapse
11
Python
30
test_auth.py
def test_short_term_login_token_gives_user_id(self) -> None: token = self.macaroon_generator.generate_short_term_login_token( self.user1, "", duration_in_ms=5000 ) res = self.get_success(self.auth_handler.validate_short_term_login_token(token)) self.assertEqual(self.user1, res.user_id) self.assertEqual("", res.auth_provider_id) # when we advance the clock, the token should be rejected self.reactor.advance(6) self.get_failure( self.auth_handler.validate_short_term_login_token(token), AuthError, )
e10a2fe0c28ec9206c0e2275df492f61ff5025f2
86
https://github.com/matrix-org/synapse.git
129
def test_short_term_login_token_gives_user_id(self) -> None: token = self.macaroon_generator.generate_short_term_login_token( self.user1, "", duration_in_ms=5000 ) res = self.get_success(self.auth_handler.validate_short_term_login_token(token)) self.assertEqual(self.user1, res.user_id) self.assertEqual("", res.auth_provider_id) # when we advance the clock, the token should be rejected self.reactor.advance(6) self.get_failure( self.auth_handler.validate_short_term_login_token(token), AuthError, )
18
137
test_short_term_login_token_gives_user_id
9
0
2
27
tests/unit/serve/runtimes/worker/test_worker_runtime.py
13,972
fix: list-like args passed as string (#5464) Co-authored-by: Alaeddine Abdessalem <alaeddine-13@live.fr>
jina
8
Python
8
test_worker_runtime.py
async def test_worker_runtime_reflection(): args = _generate_pod_args() cancel_event = multiprocessing.Event()
87912a37ce7ab3c3b63c12b48d6cdfe31f81742c
125
https://github.com/jina-ai/jina.git
14
async def test_worker_runtime_reflection(): args = _generate_pod_args() cancel_event = multiprocessing.E
6
30
test_worker_runtime_reflection
58
0
2
18
nuitka/utils/Download.py
179,000
Windows: Updated MinGW64 compiler to be used
Nuitka
13
Python
50
Download.py
def getCachedDownloadedMinGW64(target_arch, assume_yes_for_downloads): # Large URLs, pylint: disable=line-too-long if target_arch == "x86_64": url = "https://github.com/brechtsanders/winlibs_mingw/releases/download/11.2.0-14.0.0-9.0.0-msvcrt-r7/winlibs-x86_64-posix-seh-gcc-11.2.0-llvm-14.0.0-mingw-w64msvcrt-9.0.0-r7.zip" binary = r"mingw64\bin\gcc.exe" else: url = "https://github.com/brechtsanders/winlibs_mingw/releases/download/11.2.0-14.0.0-9.0.0-msvcrt-r7/winlibs-i686-posix-dwarf-gcc-11.2.0-llvm-14.0.0-mingw-w64msvcrt-9.0.0-r7.zip" binary = r"mingw32\bin\gcc.exe" gcc_binary = getCachedDownload( url=url, is_arch_specific=target_arch, specificity=url.rsplit("/", 2)[1], binary=binary, flatten=False, message="Nuitka will use gcc from MinGW64 of winlibs to compile on Windows.", reject="Only this specific gcc is supported with Nuitka.", assume_yes_for_downloads=assume_yes_for_downloads, ) return gcc_binary
c6b19fa56bbd6d14728f152e92b9001dc76dd550
77
https://github.com/Nuitka/Nuitka.git
159
def getCachedDownloadedMinGW64(target_arch, assume_yes_for_downloads): # Large URLs, pylint: disable=line-too-long if target_arch == "x86_64": url = "https://github.com/brechtsanders/winlibs_mingw/releases/download/11.2.0-14.0.0-9.0.0-msvcrt-r7/winlibs-x86_64-posix-seh-gcc-11.2.0-llvm-14.0.0-mingw-w64msvcrt-9.0.0-r7.zip" binary = r"mingw64\bin\gcc.exe" else: url = "https://github.com/brechtsanders/winlibs_mingw/releases/download/11.2.0-14.0.0-9.0.0-msvcrt-r7/winlibs-i686-posix-dwarf-gcc-11.2.0-llvm-14.0.0-mingw-w64msvcrt-9.0.0-r7.zip" binary = r"mingw32\bin\gcc.exe" gcc_binary = getCachedDownload( url=url, is_arch_specific=target_arch,
13
126
getCachedDownloadedMinGW64
166
0
1
59
tests/sentry/incidents/test_subscription_processor.py
96,421
fix(metric_alerts): Make sure critical triggers resolve properly when no action is set on a warning trigger (#31883) ### Problem If we have an alert set up like: - Warning: 50. Action: None - Critical: 100. Action: Slack Then if we go from critical -> warning state the slack resolve action will fail to fire. ### Cause The reason this happens is related to a previous fix. For an alert like - Warning: 50. Action: Slack - Critical: 100. Action: Slack When going from critical -> warning the critical action would be marked as resolved. This would cause a slack notification with `Resolved` to be sent to the channel. This is misleading, because the alert is still active, just in the warning state. What we want here is to fire a warning notification instead. The initial fix for this was that when we resolved a critical trigger, we’d check and see whether there was an active warning trigger. If so, we’d send a warning trigger fire to our actions, rather than a critical trigger resolve. This works ok for many cases, but fails when the actions on the warning trigger are different to those on the critical trigger. ### Fix Substituting the warning trigger for the critical trigger causes us subtle bugs. So, instead of this, when triggering fires/resolves on our action handlers we will also pass along the incident state change that the trigger/resolve caused the incident to go into. So if a critical trigger resolves, we check what state it would have put the incident in. If there’s a warning trigger, then the state is warning. If no warning trigger, the state is closed. This state is then used to appropriately generate the messages that we send to users via our various actions. So now, If we have an alert set up like: - Warning: 50. Action: None - Critical: 100. Action: Slack If this goes from - critical -> warning OR critical -> resolved we will send `IncidentStatus.WARNING` to any actions related to the critical trigger. - warning -> resolved We do nothing since there are no actions on the warning trigger If we have an alert set up like: - Warning: 50. Action: Slack - Critical: 100. Action: Slack If this goes from: - critical -> warning: critical trigger, `IncidentStatus.Warning` - warning -> resolved: warning trigger, `IncidentStatus.Closed` - critical -> resolved: Since we de-dupe triggers to avoid spamming the user, we will select the warning trigger here, and send `IncidentStatus.closed` If we have an alert set up like: - Warning: 50. Action: Slack - Critical: 100. Action: Pagerduty If this goes from: - critical -> warning: critical trigger, `IncidentStatus.Warning` sent to Pagerduty. Nothing sent to Slack - warning -> resolved: warning trigger, `IncidentStatus.Closed` sent to Slack. Nothing sent to Pagerduty - critical -> resolved: Critical trigger, `IncidentStatus.Warning` sent to Pagerduty. Warning trigger, `IncidentStatus.Closed` sent to Slack. We don’t de-dupe here since the actions are different.
sentry
12
Python
58
test_subscription_processor.py
def test_multiple_triggers(self): rule = self.rule rule.update(threshold_period=1) trigger = self.trigger warning_trigger = create_alert_rule_trigger( self.rule, WARNING_TRIGGER_LABEL, trigger.alert_threshold - 20 ) warning_action = create_alert_rule_trigger_action( warning_trigger, AlertRuleTriggerAction.Type.EMAIL, AlertRuleTriggerAction.TargetType.USER, str(self.user.id), ) processor = self.send_update( rule, warning_trigger.alert_threshold + 1, timedelta(minutes=-10), subscription=self.sub ) self.assert_trigger_counts(processor, warning_trigger, 0, 0) self.assert_trigger_counts(processor, trigger, 0, 0) incident = self.assert_active_incident(rule, self.sub) self.assert_trigger_exists_with_status(incident, warning_trigger, TriggerStatus.ACTIVE) self.assert_trigger_does_not_exist(trigger) self.assert_actions_fired_for_incident( incident, [warning_action], [(warning_trigger.alert_threshold + 1, IncidentStatus.WARNING)], ) processor = self.send_update( rule, trigger.alert_threshold + 1, timedelta(minutes=-9), subscription=self.sub ) self.assert_trigger_counts(processor, trigger, 0, 0) self.assert_trigger_counts(processor, warning_trigger, 0, 0) incident = self.assert_active_incident(rule, self.sub) self.assert_trigger_exists_with_status(incident, warning_trigger, TriggerStatus.ACTIVE) self.assert_trigger_exists_with_status(incident, trigger, TriggerStatus.ACTIVE) self.assert_actions_fired_for_incident( incident, [self.action], [(trigger.alert_threshold + 1, IncidentStatus.CRITICAL)] ) processor = self.send_update( rule, trigger.alert_threshold - 1, timedelta(minutes=-7), subscription=self.sub ) self.assert_trigger_counts(processor, trigger, 0, 0) self.assert_trigger_counts(processor, warning_trigger, 0, 0) incident = self.assert_active_incident(rule, self.sub) self.assert_trigger_exists_with_status(incident, trigger, TriggerStatus.RESOLVED) self.assert_trigger_exists_with_status(incident, warning_trigger, TriggerStatus.ACTIVE) self.assert_actions_resolved_for_incident( incident, [self.action], [(trigger.alert_threshold - 1, IncidentStatus.WARNING)] ) processor = self.send_update( rule, rule.resolve_threshold - 1, timedelta(minutes=-6), subscription=self.sub ) self.assert_trigger_counts(processor, trigger, 0, 0) self.assert_trigger_counts(processor, warning_trigger, 0, 0) self.assert_no_active_incident(rule, self.sub) self.assert_trigger_exists_with_status(incident, trigger, TriggerStatus.RESOLVED) self.assert_trigger_exists_with_status(incident, warning_trigger, TriggerStatus.RESOLVED) self.assert_actions_resolved_for_incident( incident, [warning_action], [(rule.resolve_threshold - 1, IncidentStatus.CLOSED)] )
146fba432a32568be7d0b884dae0c39a6c33a11f
512
https://github.com/getsentry/sentry.git
631
def test_multiple_triggers(self): rule = self.rule rule.update(threshold_period=1) trigger = self.trigger warning_trigger = create_alert_rule_trigger( self.rule, WARNING_TRIGGER_LABEL, trigger.alert_threshold - 20 ) warning_action = create_alert_rule_trigger_action( warning_trigger, AlertRuleTriggerAction.Type.EMAIL, AlertRuleTriggerAction.TargetType.USER, str(self.user.id), ) processor = self.send_update( rule, warning_trigger.alert_threshold + 1, timedelta(minutes=-10), subscription=self.sub ) self.assert_trigger_counts(processor, warning_trigger, 0, 0) self.assert_trigger_counts(processor, trigger, 0, 0) incident = self.assert_active_incident(rule, self.sub) self.assert_trigger_exists_with_status(incident, warning_trigger, TriggerStatus.ACTIVE) self.assert_trigger_does_not_exist(trigger) self.assert_actions_fired_for_incident( incident, [warning_action], [(warning_trigger.alert_threshold + 1, IncidentStatus.WARNING)], ) processor = self.send_update( rule, trigger.alert_threshold + 1, timedelta(minutes=-9), subscription=self.sub ) self.assert_trigger_counts(processor, trigger, 0, 0) self.assert_trigger_counts(processor, warning_trigger, 0, 0) incident = self.assert_active_incident(rule, self.sub) self.assert_trigger_exists_with_status(incident,
43
734
test_multiple_triggers
24
0
1
7
tests/test_builder.py
105,930
Multiprocessed dataset builder [WIP] (#5107) * multiprocessing-compatible naming scheme and refactor * multiprocessed shard writing for GeneratorBasedBuilder * multiprocessed shard writing for ArrowBasedBuilder * style * multiprocessed dataset loading * compatibility with non-sharded datasets * bugfix * bugfix * removed unused import * fixed bad ordering * less misleading tqdm * fix gen_kwargs distribution + read shards * minor * minor2 * support beam datasets * docstrings + minor * add iflatmap_unordered for parallel write & progress updates * use 1 tqdm bar receiving updates from subprocesses * docs * add test_iflatmap_unordered * style * test arrow_reader.py * fix test_iflatmap_unordered * add Beam test_download_and_prepare_sharded * test gen_kwargs distribution * test download_and_prepare with num_proc * style * improve test * don't close the pool * fix multiprocessing on windows * keep multiprocessing disabled by default * again + docs * more docs * more docs * some var renaming * style * Apply suggestions from code review Co-authored-by: Mario Šaško <mariosasko777@gmail.com> * Apply suggestions from code review Co-authored-by: Mario Šaško <mariosasko777@gmail.com> * added utils/sharding.py * style * style Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com> Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> Co-authored-by: Mario Šaško <mariosasko777@gmail.com>
datasets
11
Python
21
test_builder.py
def test_generator_based_builder_download_and_prepare_as_parquet(tmp_path): builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare(file_format="parquet") assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join(tmp_path, builder.name, "default", "0.0.0", f"{builder.name}-train.parquet") assert os.path.exists(parquet_path) assert pq.ParquetFile(parquet_path) is not None
2945690ea731f85a356220a71cdc630281c676f4
74
https://github.com/huggingface/datasets.git
41
def test_generator_based_builder_download_and_prepare_as_parquet(tmp_path): builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) builder.download_and_prepare(file_format="parquet") assert builder.info.splits["train"].num_examples == 100 parquet_path = os.path.join(tmp_path, builder.name, "default", "0.0.0", f"{builder.name}-train.parquet")
18
127
test_generator_based_builder_download_and_prepare_as_parquet
12
0
1
4
keras/mixed_precision/autocast_variable.py
274,898
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
9
Python
11
autocast_variable.py
def scatter_sub(self, sparse_delta, use_locking=False, name=None): return self._apply_update( self._variable.scatter_sub, sparse_delta, use_locking, name )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
32
https://github.com/keras-team/keras.git
36
def scatter_sub(self, sparse_delta, use_locking=False, name=None): return self._apply_update( self
7
44
scatter_sub
12
0
4
20
test/test_linalg.py
102,289
Remove random_fullrank_matrix_distinc_singular_value (#68183) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/68183 We do so in favour of `make_fullrank_matrices_with_distinct_singular_values` as this latter one not only has an even longer name, but also generates inputs correctly for them to work with the PR that tests noncontig inputs latter in this stack. We also heavily simplified the generation of samples for the SVD, as it was fairly convoluted and it was not generating the inputs correclty for the noncontiguous test. To do the transition, we also needed to fix the following issue, as it was popping up in the tests: Fixes https://github.com/pytorch/pytorch/issues/66856 cc jianyuh nikitaved pearu mruberry walterddr IvanYashchuk xwang233 Lezcano Test Plan: Imported from OSS Reviewed By: ngimel Differential Revision: D32684853 Pulled By: mruberry fbshipit-source-id: e88189c8b67dbf592eccdabaf2aa6d2e2f7b95a4
pytorch
9
Python
11
test_linalg.py
def test_inverse(self, device, dtype): make_fullrank = make_fullrank_matrices_with_distinct_singular_values make_arg = partial(make_fullrank, device=device, dtype=dtype)
baeca11a21e285d66ec3e4103c29dfd0b0245b85
175
https://github.com/pytorch/pytorch.git
25
def test_inverse(self, device, dtype): make_fullrank = make_fullrank_matrices_with_distinct_singular_value
8
38
test_inverse
35
0
1
3
netbox/dcim/models/cables.py
264,783
Migrate CablePath to use two-dimensional array
netbox
9
Python
30
cables.py
def save(self, *args, **kwargs): super().save(*args, **kwargs) # Save the flattened nodes list self._nodes = flatten_path(self.path) # TODO # Record a direct reference to this CablePath on its originating object # model = self.origin._meta.model # model.objects.filter(pk=self.origin.pk).update(_path=self.pk)
82706eb3a68e963d7ac089478788b87892d4ee79
33
https://github.com/netbox-community/netbox.git
83
def save(self, *args, **kwargs): super().save(*args, **kwargs) # Save the flattened nodes list self._nodes = flatten_path(self.path)
8
58
save
7
0
1
3
homeassistant/components/wallbox/number.py
313,748
Migrate NumberEntity u-z to native_value (#73488)
core
10
Python
7
number.py
def native_max_value(self) -> float: return cast(float, self._coordinator.data[CHARGER_MAX_AVAILABLE_POWER_KEY])
576de9ac4052c90b8737e41110d05f06f41d000e
22
https://github.com/home-assistant/core.git
21
def native_max_value(self) -> float: return cast(float,
7
36
max_value
34
0
5
11
python/ray/util/collective/collective_group/nccl_util.py
133,014
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
11
Python
27
nccl_util.py
def get_tensor_n_elements(tensor): if isinstance(tensor, cupy.ndarray) or isinstance(tensor, numpy.ndarray): return tensor.size if torch_available(): if isinstance(tensor, torch.Tensor): return torch.numel(tensor) raise ValueError( "Unsupported tensor type. Got: {}. Supported " "GPU tensor types are: torch.Tensor, " "cupy.ndarray.".format(type(tensor)) )
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
66
https://github.com/ray-project/ray.git
95
def get_tensor_n_elements(tensor): if isinstance(tensor, cupy.ndarray) or isinstance(tensor, numpy.ndarray): return tensor.size if torch_available(): if isinstance(tensor, torch.Tensor): return torch.numel(tensor) raise ValueError( "Unsupported tensor type. Got: {}. Supported " "GPU tensor types are: torch.Tensor, "
14
112
get_tensor_n_elements
497
0
1
5
examples/compose/plot_transformed_target.py
261,655
FEA add PredictionErrorDisplay (#18020) Co-authored-by: jeremie du boisberranger <jeremiedbb@yahoo.fr> Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Christian Lorentzen <lorentzen.ch@gmail.com>
scikit-learn
13
Python
243
plot_transformed_target.py
def compute_score(y_true, y_pred): return { "R2": f"{r2_score(y_true, y_pred):.3f}", "MedAE": f"{median_absolute_error(y_true, y_pred):.3f}", } # %% from sklearn.compose import TransformedTargetRegressor from sklearn.linear_model import RidgeCV from sklearn.metrics import PredictionErrorDisplay f, (ax0, ax1) = plt.subplots(1, 2, sharey=True) ridge_cv = RidgeCV().fit(X_train, y_train) y_pred_ridge = ridge_cv.predict(X_test) ridge_cv_with_trans_target = TransformedTargetRegressor( regressor=RidgeCV(), func=np.log1p, inverse_func=np.expm1 ).fit(X_train, y_train) y_pred_ridge_with_trans_target = ridge_cv_with_trans_target.predict(X_test) PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge, kind="actual_vs_predicted", ax=ax0, scatter_kwargs={"alpha": 0.5}, ) PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge_with_trans_target, kind="actual_vs_predicted", ax=ax1, scatter_kwargs={"alpha": 0.5}, ) # Add the score in the legend of each axis for ax, y_pred in zip([ax0, ax1], [y_pred_ridge, y_pred_ridge_with_trans_target]): for name, score in compute_score(y_test, y_pred).items(): ax.plot([], [], " ", label=f"{name}={score}") ax.legend(loc="upper left") ax0.set_title("Ridge regression \n without target transformation") ax1.set_title("Ridge regression \n with target transformation") f.suptitle("Synthetic data", y=1.05) plt.tight_layout() # %% # Real-world data set ##################### # # In a similar manner, the Ames housing data set is used to show the impact # of transforming the targets before learning a model. In this example, the # target to be predicted is the selling price of each house. from sklearn.datasets import fetch_openml from sklearn.preprocessing import quantile_transform ames = fetch_openml(name="house_prices", as_frame=True, parser="pandas") # Keep only numeric columns X = ames.data.select_dtypes(np.number) # Remove columns with NaN or Inf values X = X.drop(columns=["LotFrontage", "GarageYrBlt", "MasVnrArea"]) # Let the price be in k$ y = ames.target / 1000 y_trans = quantile_transform( y.to_frame(), n_quantiles=900, output_distribution="normal", copy=True ).squeeze() # %% # A :class:`~sklearn.preprocessing.QuantileTransformer` is used to normalize # the target distribution before applying a # :class:`~sklearn.linear_model.RidgeCV` model. f, (ax0, ax1) = plt.subplots(1, 2) ax0.hist(y, bins=100, density=True) ax0.set_ylabel("Probability") ax0.set_xlabel("Target") ax0.set_title("Target distribution") ax1.hist(y_trans, bins=100, density=True) ax1.set_ylabel("Probability") ax1.set_xlabel("Target") ax1.set_title("Transformed target distribution") f.suptitle("Ames housing data: selling price", y=1.05) plt.tight_layout() # %% X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1) # %% # The effect of the transformer is weaker than on the synthetic data. However, # the transformation results in an increase in :math:`R^2` and large decrease # of the MedAE. The residual plot (predicted target - true target vs predicted # target) without target transformation takes on a curved, 'reverse smile' # shape due to residual values that vary depending on the value of predicted # target. With target transformation, the shape is more linear indicating # better model fit. from sklearn.preprocessing import QuantileTransformer f, (ax0, ax1) = plt.subplots(2, 2, sharey="row", figsize=(6.5, 8)) ridge_cv = RidgeCV().fit(X_train, y_train) y_pred_ridge = ridge_cv.predict(X_test) ridge_cv_with_trans_target = TransformedTargetRegressor( regressor=RidgeCV(), transformer=QuantileTransformer(n_quantiles=900, output_distribution="normal"), ).fit(X_train, y_train) y_pred_ridge_with_trans_target = ridge_cv_with_trans_target.predict(X_test) # plot the actual vs predicted values PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge, kind="actual_vs_predicted", ax=ax0[0], scatter_kwargs={"alpha": 0.5}, ) PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge_with_trans_target, kind="actual_vs_predicted", ax=ax0[1], scatter_kwargs={"alpha": 0.5}, ) # Add the score in the legend of each axis for ax, y_pred in zip([ax0[0], ax0[1]], [y_pred_ridge, y_pred_ridge_with_trans_target]): for name, score in compute_score(y_test, y_pred).items(): ax.plot([], [], " ", label=f"{name}={score}") ax.legend(loc="upper left") ax0[0].set_title("Ridge regression \n without target transformation") ax0[1].set_title("Ridge regression \n with target transformation") # plot the residuals vs the predicted values PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge, kind="residual_vs_predicted", ax=ax1[0], scatter_kwargs={"alpha": 0.5}, ) PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge_with_trans_target, kind="residual_vs_predicted", ax=ax1[1], scatter_kwargs={"alpha": 0.5}, ) ax1[0].set_title("Ridge regression \n without target transformation") ax1[1].set_title("Ridge regression \n with target transformation") f.suptitle("Ames housing data: selling price", y=1.05) plt.tight_layout() plt.show()
40d7d880eddaf3a9a5e37ba2a8206caf22744926
20
https://github.com/scikit-learn/scikit-learn.git
555
def compute_score(y_true, y_pred): return { "R2": f"{r2_score(y_true, y_pred):.3f}", "MedAE": f"{median_absolute_error(y_true, y_pred):.3f}", } # %% from sklearn.compose import TransformedTargetRegressor from sklearn.linear_model import RidgeCV from sklearn.metrics import PredictionErrorDisplay f, (ax0, ax1) = plt.subplots(1, 2, sharey=True) ridge_cv = RidgeCV().fit(X_train, y_train) y_pred_ridge = ridge_cv.predict(X_test) ridge_cv_with_trans_target = TransformedTargetRegressor( regressor=RidgeCV(), func=np.log1p, inverse_func=np.expm1 ).fit(X_train, y_train) y_pred_ridge_with_trans_target = ridge_cv_with_trans_target.predict(X_test) PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge, kind="actual_vs_predicted", ax=ax0, scatter_kwargs={"alpha": 0.5}, ) PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge_with_trans_target, kind="actual_vs_predicted", ax=ax1, scatter_kwargs={"alpha": 0.5}, ) # Add the score in the legend of each axis for ax, y_pred in zip([ax0, ax1], [y_pred_ridge, y_pred_ridge_with_trans_target]): for name, score in compute_score(y_test, y_pred).items(): ax.plot([], [], " ", label=f"{name}={score}") ax.legend(loc="upper left") ax0.set_title("Ridge regression \n without target transformation") ax1.set_title("Ridge regression \n with target transformation") f.suptitle("Synthetic data", y=1.05) plt.tight_layout() # %% # Real-world data set ##################### # # In a similar manner, the Ames housing data set is used to show the impact # of transforming the targets before learning a model. In this example, the # target to be predicted is the selling price of each house. from sklearn.datasets import fetch_openml from sklearn.preprocessing import quantile_transform ames = fetch_openml(name="house_prices", as_frame=True, parser="pandas") # Keep only numeric columns X = ames.data.select_dtypes(np.number) # Remove columns with NaN or Inf values X = X.drop(columns=["LotFrontage", "GarageYrBlt", "MasVnrArea"]) # Let the price be in k$ y = ames.target / 1000 y_trans = quantile_transform( y.to_frame(), n_quantiles=900, output_distribution="normal", copy=True ).squeeze() # %% # A :class:`~sklearn.preprocessing.QuantileTransformer` is used to normalize # the target distribution before applying a # :class:`~sklearn.linear_model.RidgeCV` model. f, (ax0, ax1) = plt.subplots(1, 2) ax0.hist(y, bins=100, density=True) ax0.set_ylabel("Probability") ax0.set_xlabel("Target") ax0.set_title("Target distribution") ax1.hist(y_trans, bins=100, density=True) ax1.set_ylabel("Probability") ax1.set_xlabel("Targ
81
1,312
compute_score
52
0
7
14
datasets/xtreme/xtreme.py
104,703
Support streaming xtreme dataset for PAWS-X config (#4132) * Support streaming xtreme dataset for PAWS-X config * Align tasks in dataset card
datasets
18
Python
43
xtreme.py
def generate_examples(config=None, filepath=None, filename=None): lang = config.name.split(".")[1] for path, file in filepath: if f"/{lang}/" in path and path.endswith(filename): lines = (line.decode("utf-8") for line in file) data = csv.reader(lines, delimiter="\t") next(data) # skip header for id_, row in enumerate(data): if len(row) == 4: yield id_, { "sentence1": row[1], "sentence2": row[2], "label": row[3], }
8caed0c1e7b9658f08c10c8b90eb203b2cedc8e4
122
https://github.com/huggingface/datasets.git
283
def generate_examples(config=None, filepath=None, filename=None): lang = config.name.split(".")[1] for path, file in filepath: if f"/{lang}/" in path and path.endswith(filename): lines = (line.decode("utf-8") for line in file) data = csv.reader(lines, delimiter="\t") next(data) # skip header for id_, row in enumerate(data): if len(row) == 4: yield id_, { "sentence1": row[1], "sentence2": row[2], "label": row[3], }
22
201
generate_examples
34
0
6
6
src/transformers/models/donut/feature_extraction_donut.py
32,968
Add Donut (#18488) * First draft * Improve script * Update script * Make conversion work * Add final_layer_norm attribute to Swin's config * Add DonutProcessor * Convert more models * Improve feature extractor and convert base models * Fix bug * Improve integration tests * Improve integration tests and add model to README * Add doc test * Add feature extractor to docs * Fix integration tests * Remove register_buffer * Fix toctree and add missing attribute * Add DonutSwin * Make conversion script work * Improve conversion script * Address comment * Fix bug * Fix another bug * Remove deprecated method from docs * Make Swin and Swinv2 untouched * Fix code examples * Fix processor * Update model_type to donut-swin * Add feature extractor tests, add token2json method, improve feature extractor * Fix failing tests, remove integration test * Add do_thumbnail for consistency * Improve code examples * Add code example for document parsing * Add DonutSwin to MODEL_NAMES_MAPPING * Add model to appropriate place in toctree * Update namespace to appropriate organization Co-authored-by: Niels Rogge <nielsrogge@Nielss-MacBook-Pro.local>
transformers
12
Python
24
feature_extraction_donut.py
def rotate_image(self, image, size): if not isinstance(image, Image.Image): image = self.to_pil_image(image) if (size[1] > size[0] and image.width > image.height) or (size[1] < size[0] and image.width < image.height): image = self.rotate(image, angle=-90, expand=True) return image
2ab790e82d0759b667cd848a4d49e6ad65e15d59
88
https://github.com/huggingface/transformers.git
76
def rotate_image(self, image, size): if not isin
12
131
rotate_image
40
1
1
8
keras/losses_test.py
274,593
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
12
Python
33
losses_test.py
def test_ragged_tensors_3d(self): # shape [2, 1, None] y_true = tf.ragged.constant([[[1, 1]], [[0]]]) # shape [2, 1, None, 2] y_pred = tf.ragged.constant( [[[[0.1, 0.9], [0.1, 0.9]]], [[[0.9, 0.1]]]] ) cce_obj = losses.SparseCategoricalCrossentropy() loss = cce_obj(y_true, y_pred) self.assertAlmostEqual(self.evaluate(loss), 0.1054, 3) @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
109
https://github.com/keras-team/keras.git
105
def test_ragged_tensors_3d(self): # shape [2, 1, None] y_true = tf.ragged.constant([[[1, 1]], [[0]]]) # shape [2, 1, None, 2] y_pred = tf.ragged.constant( [[[[0.1, 0.9], [0.1, 0.9]]],
17
174
test_ragged_tensors_3d
407
0
7
92
python/ccxt/deribit.py
15,082
1.66.37 [ci skip]
ccxt
19
Python
220
deribit.py
def fetch_markets(self, params={}): currenciesResponse = self.publicGetGetCurrencies(params) # # { # jsonrpc: '2.0', # result: [ # { # withdrawal_priorities: [ # {value: 0.15, name: 'very_low'}, # {value: 1.5, name: 'very_high'}, # ], # withdrawal_fee: 0.0005, # min_withdrawal_fee: 0.0005, # min_confirmations: 1, # fee_precision: 4, # currency_long: 'Bitcoin', # currency: 'BTC', # coin_type: 'BITCOIN' # } # ], # usIn: 1583761588590479, # usOut: 1583761588590544, # usDiff: 65, # testnet: False # } # currenciesResult = self.safe_value(currenciesResponse, 'result', []) result = [] for i in range(0, len(currenciesResult)): currencyId = self.safe_string(currenciesResult[i], 'currency') request = { 'currency': currencyId, } instrumentsResponse = self.publicGetGetInstruments(self.extend(request, params)) # # { # jsonrpc: '2.0', # result: [ # { # tick_size: 0.0005, # taker_commission: 0.0004, # strike: 300, # settlement_period: 'week', # quote_currency: 'USD', # option_type: 'call', # min_trade_amount: 1, # maker_commission: 0.0004, # kind: 'option', # is_active: True, # instrument_name: 'ETH-13MAR20-300-C', # expiration_timestamp: 1584086400000, # creation_timestamp: 1582790403000, # contract_size: 1, # base_currency: 'ETH' # }, # ], # usIn: 1583761889500586, # usOut: 1583761889505066, # usDiff: 4480, # testnet: False # } # instrumentsResult = self.safe_value(instrumentsResponse, 'result', []) for k in range(0, len(instrumentsResult)): market = instrumentsResult[k] id = self.safe_string(market, 'instrument_name') baseId = self.safe_string(market, 'base_currency') quoteId = self.safe_string(market, 'quote_currency') settleId = quoteId base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) settle = self.safe_currency_code(settleId) kind = self.safe_string(market, 'kind') settlementPeriod = self.safe_value(market, 'settlement_period') swap = (settlementPeriod == 'perpetual') future = not swap and (kind == 'future') option = (kind == 'option') symbol = quote + '/' + base + ':' + settle expiry = self.safe_integer(market, 'expiration_timestamp') strike = None optionType = None type = 'swap' if option or future: symbol = symbol + '-' + self.yymmdd(expiry, '') if option: type = 'option' strike = self.safe_number(market, 'strike') optionType = self.safe_string(market, 'option_type') symbol = symbol + ':' + self.number_to_string(strike) + ':' + optionType else: type = 'future' minTradeAmount = self.safe_number(market, 'min_trade_amount') tickSize = self.safe_number(market, 'tick_size') result.append({ 'id': id, 'symbol': symbol, 'base': base, 'quote': quote, 'settle': settle, 'baseId': baseId, 'quoteId': quoteId, 'settleId': settleId, 'type': type, 'spot': False, 'margin': False, 'swap': swap, 'future': future, 'option': option, 'contract': True, 'linear': False, 'inverse': True, 'taker': self.safe_number(market, 'taker_commission'), 'maker': self.safe_number(market, 'maker_commission'), 'contractSize': self.safe_number(market, 'contract_size'), 'active': self.safe_value(market, 'is_active'), 'expiry': expiry, 'expiryDatetime': self.iso8601(expiry), 'strike': strike, 'optionType': optionType, 'precision': { 'amount': minTradeAmount, 'price': tickSize, }, 'limits': { 'leverage': { 'min': None, 'max': None, }, 'amount': { 'min': minTradeAmount, 'max': None, }, 'price': { 'min': tickSize, 'max': None, }, 'cost': { 'min': None, 'max': None, }, }, 'info': market, }) return result
09b439be4c7b8d1ef31ad1cbb3688f9ac48dcdcd
549
https://github.com/ccxt/ccxt.git
3,131
def fetch_markets(self, params={}): currenciesResponse = self.publicGetGetCurrencies(params) # # { # jsonrpc: '2.0', # result: [ # { # withdrawal_priorities: [ # {value: 0.15, name: 'very_low'}, # {value: 1.5, name: 'very_high'}, # ], # withdrawal_fee: 0.0005, # min_withdrawal_fee: 0.0005, # min_confirmations: 1, # fee_precision: 4, # currency_long: 'Bitcoin', # currency: 'BTC', # coin_type: 'BITCOIN' # } # ], # usIn: 1583761588590479, # usOut: 1583761588590544, # usDiff: 65, # testnet: False # } # currenciesResult = self.safe_value(currenciesResponse, 'result', []) result = [] for i in range(0, len(currenciesResult)): currencyId = self.safe_string(currenciesResult[i], 'currency') request = { 'currency': currencyId, } instrumentsResponse = self.publicGetGetInstruments(self.extend(request, params)) # # { # jsonrpc: '2.0', # result: [ # { # tick_size: 0.0005, # taker_commission: 0.0004, # strike: 300, # settlement_period: 'week', # quote_currency: 'USD', #
46
993
fetch_markets
19
0
4
6
src/sentry/search/events/builder.py
97,723
feat(mep): Validate orderby for mep (#32943) - This validates the orderby for mep queries to check that we aren't ordering by something that cannot be ordered
sentry
12
Python
18
builder.py
def validate_orderby_clause(self) -> None: for orderby in self.orderby: if isinstance(orderby.exp, Column) and orderby.exp.subscriptable == "tags": raise IncompatibleMetricsQuery("Can't orderby tags")
f2e775086eb653cf8c4680a2bdd90ee707e30ae0
38
https://github.com/getsentry/sentry.git
59
def validate_orderby_clause(self) -> None: for orderby in self.orderby: if isinstance(orderby.exp, Column) and orderby.exp.subscriptable == "tags": raise IncompatibleMetricsQuery("Can't orderby tags")
8
65
validate_orderby_clause
23
1
2
4
ludwig/utils/data_utils.py
6,764
Use pandas instead of dask to read excel (#2005) https://github.com/ludwig-ai/ludwig/pull/2005
ludwig
10
Python
23
data_utils.py
def read_spss(data_fp, df_lib): # https://github.com/dask/dask/issues/9055 if df_lib.__name__ == DASK_MODULE_NAME: logger.warning("Falling back to pd.read_spss() since dask backend does not support it") return pd.read_spss(data_fp) @spread
5c3b4475a02aaa340a6e11d4302d29d4b7eccedf
@spread
27
https://github.com/ludwig-ai/ludwig.git
37
def read_spss(data_fp, df_lib): # https://github.com/dask/dask/issues/9055 if df_lib.__name__ == DASK_MODULE_NAME: logger.warning("Falling back to pd.read_spss() since dask backend does not support it") return pd.read_spss(data_fp) @spread
9
50
read_spss
14
0
3
6
python3.10.4/Lib/ctypes/test/test_loading.py
222,055
add python 3.10.4 for windows
XX-Net
11
Python
14
test_loading.py
def test_find(self): for name in ("c", "m"): lib = find_library(name) if lib: cdll.LoadLibrary(lib) CDLL(lib)
8198943edd73a363c266633e1aa5b2a9e9c9f526
33
https://github.com/XX-net/XX-Net.git
72
def test_find(self): for name
8
57
test_find
34
0
1
13
tests/sentry/incidents/endpoints/test_project_alert_rule_index.py
100,247
ref(tests): Remove `get_valid_response()` (#34822)
sentry
12
Python
30
test_project_alert_rule_index.py
def test_simple_crash_rate_alerts_for_users(self): self.valid_alert_rule.update( { "aggregate": "percentage(users_crashed, users) AS _crash_rate_alert_aggregate", } ) with self.feature(["organizations:incidents", "organizations:performance-view"]): resp = self.get_success_response( self.organization.slug, self.project.slug, status_code=201, **self.valid_alert_rule ) assert "id" in resp.data alert_rule = AlertRule.objects.get(id=resp.data["id"]) assert resp.data == serialize(alert_rule, self.user)
096b5511e244eecd8799b2a0324655207ce8985e
93
https://github.com/getsentry/sentry.git
149
def test_simple_crash_rate_alerts_for_users(self): self.valid_alert_rule.update( { "aggregate": "percentage(users_crashed, users) AS _crash_rate_alert_aggregate", } ) with self.feature(["organizations:incidents", "organizations:performance-view"]): resp = self.get_success_response( self.organization.slug, self.project.slug, status_code=201, **self.valid_alert_rule ) assert "id" in resp.data
19
154
test_simple_crash_rate_alerts_for_users
59
0
1
34
tests/sentry/utils/test_committers.py
91,499
ref: replace self.assertRaises with pytest.raises (#35685) * add flake8 plugin to detect assertRaises * ref: replace self.assertRaises with pytest.raises * non-sed fixes
sentry
17
Python
45
test_committers.py
def test_no_commits(self): event = self.store_event( data={ "timestamp": iso_format(before_now(seconds=1)), "message": "Kaboom!", "stacktrace": { "frames": [ { "function": "handle_set_commits", "abs_path": "/usr/src/sentry/src/sentry/tasks.py", "module": "sentry.tasks", "in_app": True, "lineno": 30, "filename": "sentry/tasks.py", }, { "function": "set_commits", "abs_path": "/usr/src/sentry/src/sentry/models/release.py", "module": "sentry.models.release", "in_app": True, "lineno": 39, "filename": "sentry/models/release.py", }, ] }, "tags": {"sentry:release": self.release.version}, }, project_id=self.project.id, ) GroupRelease.objects.create( group_id=event.group.id, project_id=self.project.id, release_id=self.release.id ) with pytest.raises(Commit.DoesNotExist): get_serialized_event_file_committers(self.project, event)
284e980df0018f8baee659999268bdd4c7d08255
164
https://github.com/getsentry/sentry.git
677
def test_no_commits(self): event = self.store_event( data={ "timestamp": iso_format(before_now(seconds=1)), "message": "Kaboom!", "stacktrace": { "frames": [ { "function": "handle_set_commits", "abs_path": "/usr/src/sentry/src/sentry/tasks.py", "module": "sentry.tasks", "in_app": True, "lineno": 30, "filename": "sentry/tasks.py", }, { "function": "set_commits", "abs_path": "/usr/src/sentry/src/sentry/models/release.py", "module": "sentry.models.release", "in_a
24
288
test_no_commits
52
1
3
6
ivy_tests/test_core/test_random.py
213,412
renamed dtype_str arg to dtype for all methods.
ivy
10
Python
43
test_random.py
def test_seed(seed_val, dtype, tensor_fn, dev_str, call): # smoke test ivy.seed(seed_val) # compilation test if call in [helpers.torch_call]: # pytorch scripting does not support functions with None return return if not ivy.wrapped_mode(): helpers.assert_compilable(ivy.seed) # shuffle @pytest.mark.parametrize( "x", [[1, 2, 3], [[1., 4.], [2., 5.], [3., 6.]]]) @pytest.mark.parametrize( "dtype", ['float32']) @pytest.mark.parametrize( "tensor_fn", [ivy.array, helpers.var_fn])
562846b6dce660054181cae7b05bbadd75489795
@pytest.mark.parametrize( "x", [[1, 2, 3], [[1., 4.], [2., 5.], [3., 6.]]]) @pytest.mark.parametrize( "dtype", ['float32']) @pytest.mark.parametrize( "tensor_fn", [ivy.array, helpers.var_fn])
45
https://github.com/unifyai/ivy.git
92
def test_seed(seed_val, dtype, tensor_fn, dev_str, call): # smoke test ivy.seed(seed_val) # compilation test if call in [helpers.torch_call]: # pytorch scripting does not support functions with None return return if not ivy.wrapped_mode(): helpers.assert_compilable(ivy.seed) # shuffl
17
179
test_seed
7
0
1
2
src/transformers/testing_utils.py
37,498
Update all require decorators to use skipUnless when possible (#16999)
transformers
10
Python
7
testing_utils.py
def require_tf(test_case): return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
57e6464ac9a31156f1c93e59107323e6ec01309e
20
https://github.com/huggingface/transformers.git
13
def require_tf(test_case): return unittest.skipUnless(is_tf_available()
5
37
require_tf
446
0
1
147
python/ccxt/async_support/vcc.py
17,261
1.71.83 [ci skip]
ccxt
16
Python
259
vcc.py
def describe(self): return self.deep_extend(super(vcc, self).describe(), { 'id': 'vcc', 'name': 'VCC Exchange', 'countries': ['VN'], # Vietnam 'rateLimit': 1000, 'version': 'v3', 'has': { 'CORS': None, 'spot': True, 'margin': False, 'swap': False, 'future': False, 'option': False, 'addMargin': False, 'cancelAllOrders': True, 'cancelOrder': True, 'createOrder': True, 'createReduceOnlyOrder': False, 'editOrder': None, 'fetchBalance': True, 'fetchBorrowRate': False, 'fetchBorrowRateHistories': False, 'fetchBorrowRateHistory': False, 'fetchBorrowRates': False, 'fetchBorrowRatesPerSymbol': False, 'fetchClosedOrders': True, 'fetchCurrencies': True, 'fetchDepositAddress': True, 'fetchDeposits': True, 'fetchFundingHistory': False, 'fetchFundingRate': False, 'fetchFundingRateHistory': False, 'fetchFundingRates': False, 'fetchIndexOHLCV': False, 'fetchIsolatedPositions': False, 'fetchLeverage': False, 'fetchMarkets': True, 'fetchMarkOHLCV': False, 'fetchMyTrades': True, 'fetchOHLCV': True, 'fetchOpenOrders': True, 'fetchOrder': True, 'fetchOrderBook': True, 'fetchOrders': None, 'fetchPosition': False, 'fetchPositions': False, 'fetchPositionsRisk': False, 'fetchPremiumIndexOHLCV': False, 'fetchTicker': 'emulated', 'fetchTickers': True, 'fetchTrades': True, 'fetchTradingFee': True, 'fetchTradingFees': None, 'fetchTransactions': True, 'fetchWithdrawals': True, 'reduceMargin': False, 'setLeverage': False, 'setMarginMode': False, 'setPositionMode': False, }, 'timeframes': { '1m': '60000', '5m': '300000', '15m': '900000', '30m': '1800000', '1h': '3600000', '2h': '7200000', '4h': '14400000', '6h': '21600000', '12h': '43200000', '1d': '86400000', '1w': '604800000', }, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/100545356-8427f500-326c-11eb-9539-7d338242d61b.jpg', 'api': { 'public': 'https://api.vcc.exchange', 'private': 'https://api.vcc.exchange', }, 'www': 'https://vcc.exchange', 'doc': [ 'https://vcc.exchange/api', ], 'fees': 'https://support.vcc.exchange/hc/en-us/articles/360016401754', 'referral': 'https://vcc.exchange?ref=l4xhrH', }, 'api': { 'public': { 'get': [ 'summary', 'exchange_info', 'assets', # Available Currencies 'ticker', # Ticker list for all symbols 'trades/{market_pair}', # Recent trades 'orderbook/{market_pair}', # Orderbook 'chart/bars', # Candles 'tick_sizes', ], }, 'private': { 'get': [ 'user', 'balance', # Get trading balance 'orders/{order_id}', # Get a single order by order_id 'orders/open', # Get open orders 'orders', # Get closed orders 'orders/trades', # Get trades history 'deposit-address', # Generate or get deposit address 'transactions', # Get deposit/withdrawal history ], 'post': [ 'orders', # Create new order ], 'put': [ 'orders/{order_id}/cancel', # Cancel order 'orders/cancel-by-type', 'orders/cancel-all', ], }, }, 'fees': { 'trading': { 'tierBased': False, 'percentage': True, 'maker': self.parse_number('0.002'), 'taker': self.parse_number('0.002'), }, }, 'exceptions': { 'exact': {}, 'broad': { 'limit may not be greater than': BadRequest, # {"message":"The given data was invalid.","errors":{"limit":["The limit may not be greater than 1000."]}} 'Insufficient balance': InsufficientFunds, # {"message":"Insufficient balance."} 'Unauthenticated': AuthenticationError, # {"message":"Unauthenticated."} # wrong api key 'signature is invalid': AuthenticationError, # {"message":"The given data was invalid.","errors":{"signature":["HMAC signature is invalid"]}} 'Timeout': RequestTimeout, # {"code":504,"message":"Gateway Timeout","description":""} 'Too many requests': RateLimitExceeded, # {"code":429,"message":"Too many requests","description":"Too many requests"} 'quantity field is required': InvalidOrder, # {"message":"The given data was invalid.","errors":{"quantity":["The quantity field is required when type is market."]}} 'price field is required': InvalidOrder, # {"message":"The given data was invalid.","errors":{"price":["The price field is required when type is limit."]}} 'error_security_level': PermissionDenied, # {"message":"error_security_level"} 'pair is invalid': BadSymbol, # {"message":"The given data was invalid.","errors":{"coin":["Trading pair is invalid","Trading pair is offline"]}} # {"message":"The given data was invalid.","errors":{"type":["The selected type is invalid."]}} # {"message":"The given data was invalid.","errors":{"trade_type":["The selected trade type is invalid."]}} 'type is invalid': InvalidOrder, 'Data not found': OrderNotFound, # {"message":"Data not found"} }, }, })
ff158ebe7e1ed14772139737d13bb5edfd6d9430
523
https://github.com/ccxt/ccxt.git
2,884
def describe(self): return self.deep_extend(super(vcc, self).describe(), { 'id': 'vcc', 'name': 'VCC Exchange', 'countries': ['VN'], # Vietnam 'rateLimit': 1000, 'version': 'v3', 'has': { 'CORS': None, 'spot': True, 'margin': False, 'swap': False, 'future': False, 'option': False, 'addMargin': False, 'cancelAllOrders': True, 'cancelOrder': True, 'createOrder': True, 'createReduceOnlyOrder': False, 'editOrder': None, 'fetchBalance': True, 'fetchBorrowRate': False, 'fetchBorrowRateHistories': False, 'fetchBorrowRateHistory': False, 'fetchBorrowRates': False, 'fetchBorrowRatesPerSymbol': False, 'fetchClosedOrders': True, 'fetchCurrencies': True, 'fetchDepositAddress': True, 'fetchDeposits': True, 'fetchFundingHistory': False, 'fetchFundingRate': False, 'fetchFundingRateHistory': False, 'fetchFundingRates': False, 'fetchIndexOHLCV': False, 'fetchIsolatedPositions': False, 'fetchLeverage': False, 'fetchMarkets': True, 'fetchMarkOHLCV': False, 'fetchMyTrades': True, 'fetchOHLCV': True, 'fetchOpenOrders': True, 'fetchOrder': True, 'fetchOrderBook': True, 'fetchOrders': None, 'fetchPosition': False, 'fetchPositions': False, 'fetchPositionsRisk': False, 'fetchPremiumIndexOHLCV': False, 'fetchTicker': 'emulated', 'fetchTickers': True, 'fetchTrades': True, 'fetchTradingFee': True, 'fetchTradingFees': None, 'fetchTransactions': True, 'fetchWithdrawals': True, 'reduceMargin': False, 'setLeverage': False, 'setMarginMode': False, 'setPositionMode': False, }, 'timeframes': { '1m': '60000', '5m': '300000', '15m': '900000', '30m': '1800000', '1h': '3600000', '2h': '7200000', '4h': '14400000', '6h': '21600000', '12h': '43200000', '1d': '86400000', '1w': '604800000', }, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/100545356-8427f500-326c-11eb-9539-7d338242d61b.jpg', 'api': { 'public': 'https://api.vcc.exchange', 'private': 'https://api.vcc.exchange', }, 'www': 'https://vcc.exchange', 'doc': [ 'https://vcc.exchange/api', ], 'fees': 'https://support.vcc.exchange/hc/en-us/articles/360016401754', 'referral': 'https://vcc.exchange?ref=l4xhrH', }, 'api': { 'public': { 'get': [ 'summary', 'exchange_info', 'assets', # Available Currencies 'ticker', # Ticker list for all symbols 'trades/{market_pair}', # Recent trades 'orderbook/{market_pair}', # Orderbook 'chart/bars', # Candles 'tick_sizes', ], }, 'private': { 'get': [ 'user', 'balance', # Get trading balance 'orders/{order_id}', # Get a single order by order_id 'orders/open', # Get open orders 'orders', # Get closed orders 'orders/trades', # Get trades history 'deposit-address', # Generate or get deposit address 'transactions', # Get deposit/withdrawal history ], 'post': [ 'orders', # Create new order ], 'put': [ 'orders/{order_id}/cancel', # Cancel order 'orders/cancel-by-type', 'orders/cancel-all', ], }, }, 'fees': { 'trading': { 'tierBased': False, 'percentage': True, 'maker': self.parse_number('0.002'), 'taker': self.parse_number('0.002'), }, }, 'exceptions': { 'exact': {}, 'broad': { 'limit may not be greater than': BadRequest, # {"message":"The given data was invalid.","errors":{"limit":["The limit may not be greater than 1000."]}} 'Insufficient balance': InsufficientFunds, # {"message":"Insufficient balance."} 'Unauthenticated': AuthenticationError, # {"message":"Unauthenticated."} # wrong api key 'signature is invalid': AuthenticationError, # {"message":"The given data was invalid.","errors":{"signature":["HMAC signature is invalid"]}} 'Timeout': RequestTimeout, # {"code":504,"message":"Gateway Timeout","description":""} 'Too many requests': RateLimitExceeded, # {"code":429,"message":"Too many requests","description":"Too many requests"} 'quantity field is required': InvalidOrder, # {"message":"The given data was invalid.","errors":{"quantity":["The quantity field is required when type is market."]}} 'price field is required': InvalidOrder, # {"message":"The given data was invalid.","errors":{"price":["The price field is required when type is limit."]}} 'error_security_level': PermissionDenied, # {"message":"error_security_level"} 'pair is invalid': BadSymbol, # {"message":"The given data was invalid.","errors":{"coin":["Trading pair is invalid","Trading pa
15
1,000
describe
8
0
1
2
src/documents/tests/test_matchables.py
318,888
Reduces number of warnings from testing from 165 to 128. In doing so, fixes a few minor things in the decrypt and export commands
paperless-ngx
9
Python
8
test_matchables.py
def test_tach_invalid_regex(self): self._test_matching("[", "MATCH_REGEX", [], ["Don't match this"])
85b210ebf61d4525cae3311eaae91012c8986cf7
20
https://github.com/paperless-ngx/paperless-ngx.git
14
def test_tach_invalid_regex(self): self._test_matching("[", "MATCH_REGEX", [], ["Don't match this"])
3
36
test_tach_invalid_regex
7
0
1
3
.venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py
63,231
upd; format
transferlearning
9
Python
7
__init__.py
def ensure_directory(path): dirname = os.path.dirname(path) py31compat.makedirs(dirname, exist_ok=True)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
26
https://github.com/jindongwang/transferlearning.git
16
def ensure_directory(path): dirname = os.path.dirname(path) py31compat.makedirs(dirname, exist_ok=True)
7
44
ensure_directory
13
0
1
5
tests/models/groupvit/test_modeling_groupvit.py
31,764
Adding GroupViT Models (#17313) * add group vit and fixed test (except slow) * passing slow test * addressed some comments * fixed test * fixed style * fixed copy * fixed segmentation output * fixed test * fixed relative path * fixed copy * add ignore non auto configured * fixed docstring, add doc * fixed copies * Apply suggestions from code review merge suggestions Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * resolve comment, renaming model * delete unused attr * use fix copies * resolve comments * fixed attn * remove unused vars * refactor tests * resolve final comments * add demo notebook * fixed inconsitent default * Apply suggestions from code review Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * rename stage->stages * Create single GroupViTEncoderLayer class * Update conversion script * Simplify conversion script * Remove cross-attention class in favor of GroupViTAttention * Convert other model as well, add processor to conversion script * addressing final comment * fixed args * Update src/transformers/models/groupvit/modeling_groupvit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Niels Rogge <nielsrogge@Nielss-MacBook-Pro.local>
transformers
9
Python
12
test_modeling_groupvit.py
def setUp(self): self.model_tester = GroupViTVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=GroupViTVisionConfig, has_text_modality=False, hidden_size=37 )
6c8f4c9a938a09749ea1b19a5fa2a8dd27e99a29
33
https://github.com/huggingface/transformers.git
44
def setUp(self): self.model_tester = GroupViTVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=GroupViTVisionConfig,
10
50
setUp
36
0
1
11
test/mitmproxy/addons/test_dns_resolver.py
251,087
[dns] offline dns_resolve tests at 100% coverage
mitmproxy
12
Python
27
test_dns_resolver.py
async def test_simple(monkeypatch): monkeypatch.setattr(dns_resolver, "resolve_message", lambda _, __: asyncio.sleep(0, "resp")) dr = dns_resolver.DnsResolver() with taddons.context(dr, proxyserver.Proxyserver()) as tctx: f = tflow.tdnsflow() await dr.dns_request(f) assert f.response tctx.options.dns_mode = "reverse:8.8.8.8" f = tflow.tdnsflow() await dr.dns_request(f) assert not f.response
dd61b21ce37c112c3b1e35774396da9ad0d51b76
94
https://github.com/mitmproxy/mitmproxy.git
93
async def test_simple(monkeypatch): monkeypatch.setattr(dns_resolver, "resolve_message", lambda _, __: asyncio.sleep(0, "resp")) dr = dns_resolver.DnsResolver() with taddons.context(dr, proxyserver.Proxyserver()) as tctx: f = tflow.tdnsflow() await dr.dns_request(f) assert f.response tctx.options.dns_mode = "reverse:8.8.8.8" f
22
160
test_simple
22
0
2
5
.venv/lib/python3.8/site-packages/pip/_internal/operations/freeze.py
60,924
upd; format
transferlearning
12
Python
19
freeze.py
def __str__(self): # type: () -> str req = self.req if self.editable: req = f'-e {req}' return '\n'.join(list(self.comments) + [str(req)]) + '\n'
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
40
https://github.com/jindongwang/transferlearning.git
60
def __str__(self): # type: () -> str req = self.req if self.editable: req = f'-e {req}' return '\n'.join(list(self.comments)
8
76
__str__
72
0
1
19
tests/components/alexa/test_capabilities.py
294,185
Exclude hidden entities from alexa (#68555)
core
11
Python
57
test_capabilities.py
async def test_api_set_color_rgb(hass): request = get_new_request("Alexa.ColorController", "SetColor", "light#test") # add payload request["directive"]["payload"]["color"] = { "hue": "120", "saturation": "0.612", "brightness": "0.342", } # setup test devices hass.states.async_set( "light.test", "off", {"friendly_name": "Test light", "supported_features": 16} ) call_light = async_mock_service(hass, "light", "turn_on") msg = await smart_home.async_handle_message(hass, get_default_config(hass), request) await hass.async_block_till_done() assert "event" in msg msg = msg["event"] assert len(call_light) == 1 assert call_light[0].data["entity_id"] == "light.test" assert call_light[0].data["rgb_color"] == (33, 87, 33) assert msg["header"]["name"] == "Response"
dc8e87a6f70439f9830d93d03c53d6ff098a4861
150
https://github.com/home-assistant/core.git
151
async def test_api_set_color_rgb(hass): request = get_new_request("Alexa.ColorController", "SetColor", "light#test") # add payload request["directive"]["payload"]["color"] = {
15
276
test_api_set_color_rgb
20
0
1
4
packages/syft/tests/syft/core/tensor/adp/private_method_test.py
1,093
Remove autograd, old Mechanism, continued renaming entities to datasubject
PySyft
11
Python
19
private_method_test.py
def test_string_entity() -> None: x = sy.Tensor(np.array([1, 2, 3, 4], dtype=DEFAULT_INT_NUMPY_TYPE)) out = x.private(min_val=0, max_val=5, data_subjects="bob") assert out.child.entity.name == "bob"
859b728f41b728447b88b54479e1600a4996dc09
59
https://github.com/OpenMined/PySyft.git
28
def test_string_entity() -> None: x = sy.Tensor(np.array([1, 2, 3, 4], dtype=DEFAULT_INT_NUMPY_TYPE)) out = x.private(min_val=0, max_val=5, data_subjects="bob")
16
91
test_string_entity
132
0
12
34
deepspeed/runtime/utils.py
38,977
MoE inference + PR-MoE model support (#1705) Co-authored-by: Reza Yazdani <reyazda@microsoft.com> Co-authored-by: Zhewei Yao <zheweiy@berkeley.edu> Co-authored-by: Ammar Ahmad Awan <ammar.awan@microsoft.com> Co-authored-by: Jeff Rasley <jerasley@microsoft.com> Co-authored-by: Samyam Rajbhandari <samyamr@microsoft.com>
DeepSpeed
18
Python
83
utils.py
def has_overflow(self, params, has_moe_params=None): if has_moe_params is None: has_moe_params = self.has_moe_params overflow = self.has_overflow_serial(params) # Since each model parallel GPU carries only part of the model, # make sure overflow flag is synced across all the model parallel GPUs overflow_gpu = torch.cuda.ByteTensor([overflow]) # torch.distributed.all_reduce(overflow_gpu, # op=torch.distributed.ReduceOp.MAX, # group=mpu.get_model_parallel_group()) if has_moe_params: # All reduce this across expert_parallel_group, so that if an expert # overflows, we detect it here dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=groups.get_max_expert_parallel_group()) if self.zero_reduce_scatter: torch.distributed.all_reduce(overflow_gpu, op=torch.distributed.ReduceOp.MAX, group=torch.distributed.group.WORLD) elif self.mpu is not None: if self.deepspeed is not None: using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') if (using_pipeline and self.deepspeed.pipeline_enable_backward_allreduce is False ) or (not using_pipeline and self.deepspeed.enable_backward_allreduce is False): torch.distributed.all_reduce( overflow_gpu, op=torch.distributed.ReduceOp.MAX, group=self.mpu.get_data_parallel_group()) torch.distributed.all_reduce(overflow_gpu, op=torch.distributed.ReduceOp.MAX, group=self.mpu.get_model_parallel_group()) elif self.deepspeed is not None and self.deepspeed.enable_backward_allreduce is False: torch.distributed.all_reduce(overflow_gpu, op=torch.distributed.ReduceOp.MAX, group=torch.distributed.group.WORLD) overflow = overflow_gpu[0].item() return bool(overflow) # `x` is a torch.Tensor
e46d808a1b6cb7e04cb2806e38547b1e3e50c25a
266
https://github.com/microsoft/DeepSpeed.git
895
def has_overflow(self, params, has_moe_params=None): if has_moe_params is None: has_moe_params = self.has_moe_params overflow = self.has_overflow_serial(params) # Since each model parallel GPU carries only part of the model, # make sure overflow flag is synced across all the model parallel GPUs overflow_gpu = torch.cuda.ByteTensor([overflow]) # torch.distributed.all_reduce(overflow_gpu, # op=torch.distributed.ReduceOp.MAX, # group=mpu.get_model_parallel_group()) if has_moe_params: # All reduce this across expert_parallel_group, so that if an expert # overflows, we detect it here dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=groups.get_max_expert_parallel_group()) if self.zero
31
413
has_overflow
20
0
2
7
jina/parsers/create.py
11,237
docs: adapt to 3.0 (#4254) * docs: comparing alternatives (#4249) * docs: fix conflict * docs: remove line * docs: add docarray logos * docs: proper link to docarray * docs: change index * docs: change reference types ecosystem * docs: change comparing * docs: update docs/get-started/comparing-to-alternatives.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/get-started/comparing-to-alternatives.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/get-started/comparing-to-alternatives.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/get-started/comparing-to-alternatives.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/get-started/comparing-to-alternatives.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/get-started/comparing-to-alternatives.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/get-started/comparing-to-alternatives.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/index.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: fix kubernetes docs (#4259) * docs: fix kubernetes docs * docs: add caution * docs: executor documentation refactoring (#4256) * fix: fix link to share executors * docs: adjust install section for 3.0 (#4265) * docs: adjust readme (#4270) * docs: async in executors tuto (#4264) * docs: move things to how-to (#4271) * docs: updating docker-compose docs (#4252) * docs: move docker compose * docs: caution in kubernetes and docker compose (#4272) * docs: update gpu guide for jina 3 (#4255) * docs: move gpu to how-to (#4273) * docs: migration guide to jina 3 (#4263) * docs: change index link to how-ot * docs: move migrate to get-started (#4274) * docs: adapt some kubernetes content (#4275) * docs: add architecture overview (#4280) * docs: add proto back to API reference (#4281) * docs: external executors tutorial (#4267) * docs: move external executor how-to (#4283) * docs: rephrase comparing to alternatives (#4282) * docs: update docs/fundamentals/concepts.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/concepts.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: fix architeceture map legend * docs: update docs/fundamentals/architecture-overview.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/architecture-overview.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/architecture-overview.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/architecture-overview.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/architecture-overview.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/architecture-overview.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/architecture-overview.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/architecture-overview.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: index with readme content (#4285) * docs(executor): fix grammatical errors (#4284) * docs: update docs/fundamentals/executor/index.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/index.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/index.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/index.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/index.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs. update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: Update docs/fundamentals/executor/index.md * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/architecture-overview.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/fundamentals/architecture-overview.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: add containerize executor section (#4288) * docs: update docs/fundamentals/architecture-overview.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/how-to/kubernetes.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/how-to/sandbox.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/how-to/sandbox.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/how-to/sandbox.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/fundamentals/executor/hub/index.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: apply suggestions from code review Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: update docs/how-to/kubernetes.md * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/how-to/sandbox.md * docs: apply suggestions from code review Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: add scale tutorial (#4287) * docs: refactor scale how-to (#4289) * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-in-flow.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: Update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: update docs/fundamentals/executor/executor-api.md Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: rewrite flow section (#4266) * docs: refactor flow docs * docs: update flow index * docs: refactor create a flow section * docs: add flow api section * docs: some minor polishing * docs: add more flow info * docs: address comments * docs: small refactor flow docs (#4293) * docs: fix examples (#4294) * docs: small changes to flow (#4297) * chore: remove the eah announcement (#4295) * docs: polish sandbox tutorial (#4286) * docs: add Hub to ecosys (#4300) * docs: minor clean up on 3.0 branch (#4301) * docs: apply suggestions from code review Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: migration attributes (#4299) * docs: use post and not search (#4302) * docs: very small change (#4304) * docs: add section for extending the http api (#4303) * docs: update docs/how-to/sandbox.md Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: restructure content and layout (#4305) * docs: why to use flow (#4308) * docs: unify docarray import (#4310) * docs(readme): polish (#4307) * docs: fix snippets (#4311) * docs: apply suggestions from code review Co-authored-by: CatStark <susana.guzman@jina.ai> * docs: add jina new (#4313) * docs: add jina new * docs: add jina new * style: fix overload and cli autocomplete * docs: rephrase dockerize section (#4298) * docs: fix create flow images (#4314) * docs: restructure 2 (#4315) * docs: keep clean code (#4316) * docs: restructure 2 * docs: restructure 2 * docs: restructure 2 * docs: update docs/fundamentals/flow/index.md * docs: restructure 2 * docs: restructure 2 * docs: restructure 2 * docs: restructure 2 * docs: restructure 2 * docs: add minimum working example (#4321) * docs: create landing page for how-to's (#4312) * docs(how-to): create landing page * docs: add links to executor how-tos * docs: add links to deployment how-tos * docs: shorten scaling-out description * docs: add info box * docs(sandbox): optimize pic (#4324) * docs: fix inconsistent definition for executor and flow (#4322) * docs: fix inconsistent definitions * docs: fix inconsistent definitions * docs: restructure 2 * fix(docs): yaml formating (#4327) * docs: restructure 2 * docs: fix formatting (#4329) * chore: update banner for docs (#4330) * docs: review readme 2 (#4323) * docs(sandbox): optimize sandbox pic (#4331) * docs: remove jinad from install section (#4333) * docs: apply suggestions from code review Co-authored-by: CatStark <susana.guzman@jina.ai> * docs: restructure 2 * docs: restructure 2 * docs: add what is jina (#4332) * docs: add what is jina * docs: remove comparing to alternatives document * docs: update docs/get-started/what-is-jina.md Co-authored-by: cristian <cristianmtr@users.noreply.github.com> * docs: update docs/get-started/what-is-jina.md Co-authored-by: cristian <cristianmtr@users.noreply.github.com> * docs: apply suggestions from code review Co-authored-by: cristian <cristianmtr@users.noreply.github.com> * docs: add link to docarray * docs: apply suggestions from code review Co-authored-by: Nan Wang <nan.wang@jina.ai> Co-authored-by: Han Xiao <artex.xh@gmail.com> Co-authored-by: cristian <cristianmtr@users.noreply.github.com> Co-authored-by: Nan Wang <nan.wang@jina.ai> Co-authored-by: Han Xiao <artex.xh@gmail.com> * fix(docs): apply black automatically (#4337) * docs: fix executor api snippet (#4339) Co-authored-by: Sami Jaghouar <sami.jaghouar@jina.ai> * docs: fix quote * fix: blackifiy readme + single quote (#4340) * docs: fix quote * docs: fix quote * docs: fix quote * docs: fix quote * docs: replace png with svg (#4334) * docs: apply suggestions from code review Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> * docs: fix quote * docs: add highlighting and more positive phrasing (#4338) * docs: fix quote * chore: fix typo Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * chore: fix typo Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * chore: fix typo Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * chore: fix typo Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * chore: fix typo Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * chore: fix typo Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * chore: fix typo Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * chore: fix typo Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * chore: fix typo Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * chore: fix typo Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * chore: apply suggestions from code review Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> * docs: apply suggestions from code review Co-authored-by: CatStark <susana.guzman@jina.ai> * docs: fix quote * chore: fix typo Co-authored-by: CatStark <susana.guzman@jina.ai> Co-authored-by: Alex Cureton-Griffiths <alexcg1@users.noreply.github.com> Co-authored-by: AlaeddineAbdessalem <alaeddine-13@live.fr> Co-authored-by: Tobias Jacobowitz <tobias.jacobowitz@posteo.de> Co-authored-by: samsja <55492238+samsja@users.noreply.github.com> Co-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com> Co-authored-by: Johannes Messner <messnerjo@gmail.com> Co-authored-by: Roshan Jossy <roshan.jossy@jina.ai> Co-authored-by: Wang Bo <bo.wang@jina.ai> Co-authored-by: Nan Wang <nan.wang@jina.ai> Co-authored-by: Zhaofeng Miao <522856232@qq.com> Co-authored-by: Han Xiao <han.xiao@jina.ai> Co-authored-by: CatStark <susana.guzman@jina.ai> Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: cristian <cristianmtr@users.noreply.github.com> Co-authored-by: Han Xiao <artex.xh@gmail.com> Co-authored-by: Sami Jaghouar <sami.jaghouar@jina.ai>
jina
10
Python
19
create.py
def set_new_project_parser(parser=None): if not parser: parser = set_base_parser() parser.add_argument( 'name', type=str, help='The name of the project', default='hello-jina' ) return parser
07e2ef0a5cd2baf90a0e30c32e5898d1fdfc4d48
37
https://github.com/jina-ai/jina.git
49
def set_new_project_parser(parser=None): if not parser: parser = set_base_parser() parser.add_argument( 'nam
8
66
set_new_project_parser
39
0
2
11
ludwig/combiners/combiners.py
6,644
fix: Naming scheme cleanup that includes: renaming `ludwig.marshmallow` module to `ludwig.validation` to avoid implicit import errors, and moving `ludwig.utils.schema` into this new module. (#1936) * Rename marshmallow/ folder to marshmallow_schema_utils/, marshmallow_schema_utils.py to utils.py (under folder), update all refs. * Rename marshmallow/ folder to marshmallow_schema_utils/, marshmallow_schema_utils.py to utils.py (under folder), update all refs. * update extract_schema * update generated files. * update manifest * rename using validation/schema_utils naming * update generated files * new naming scheme * fix imports. * rerun extract_schema
ludwig
13
Python
32
combiners.py
def get_combiner_conds(): combiner_types = sorted(list(combiner_registry.keys())) conds = [] for combiner_type in combiner_types: combiner_cls = combiner_registry[combiner_type] schema_cls = combiner_cls.get_schema_cls() combiner_schema = marshmallow_utils.get_custom_schema_from_marshmallow_class(schema_cls) combiner_props = combiner_schema["properties"] combiner_cond = marshmallow_utils.create_cond({"type": combiner_type}, combiner_props) conds.append(combiner_cond) return conds # super class to house common properties
a95f611d582a724740af772ead1fa439b3713124
76
https://github.com/ludwig-ai/ludwig.git
95
def get_combiner_conds(): combiner_types = sorted(list(combiner_registry.keys())) conds = [] for combiner_type in combiner_types:
18
130
get_combiner_conds
27
0
1
9
keras/saving/experimental/saving_lib_test.py
275,849
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
10
Python
24
saving_lib_test.py
def train_step(self, data): tf.print(train_step_message) x, y = data with tf.GradientTape() as tape: y_pred = self(x) loss = self.compiled_loss(y, y_pred) gradients = tape.gradient(loss, self.trainable_variables) self.optimizer.apply_gradients(zip(gradients, self.trainable_variables)) return {}
84afc5193d38057e2e2badf9c889ea87d80d8fbf
73
https://github.com/keras-team/keras.git
90
def train_step(self, data): tf.print(train_step_message) x, y = data with tf.GradientTape() as tape: y_pred = s
19
118
train_step
22
0
1
3
src/transformers/models/longt5/modeling_flax_longt5.py
31,265
Add `LongT5` model (#16792) * Initial commit * Make some fixes * Make PT model full forward pass * Drop TF & Flax implementation, fix copies etc * Add Flax model and update some corresponding stuff * Drop some TF things * Update config and flax local attn * Add encoder_attention_type to config * . * Update docs * Do some cleansing * Fix some issues -> make style; add some docs * Fix position_bias + mask addition + Update tests * Fix repo consistency * Fix model consistency by removing flax operation over attn_mask * [WIP] Add PT TGlobal LongT5 * . * [WIP] Add flax tglobal model * [WIP] Update flax model to use the right attention type in the encoder * Fix flax tglobal model forward pass * Make the use of global_relative_attention_bias * Add test suites for TGlobal model * Fix minor bugs, clean code * Fix pt-flax equivalence though not convinced with correctness * Fix LocalAttn implementation to match the original impl. + update READMEs * Few updates * Update: [Flax] improve large model init and loading #16148 * Add ckpt conversion script accoring to #16853 + handle torch device placement * Minor updates to conversion script. * Typo: AutoModelForSeq2SeqLM -> FlaxAutoModelForSeq2SeqLM * gpu support + dtype fix * Apply some suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * * Remove (de)parallelize stuff * Edit shape comments * Update README.md * make fix-copies * Remove caching logic for local & tglobal attention * Apply another batch of suggestions from code review * Add missing checkpoints * Format converting scripts * Drop (de)parallelize links from longT5 mdx * Fix converting script + revert config file change * Revert "Remove caching logic for local & tglobal attention" This reverts commit 2a619828f6ddc3e65bd9bb1725a12b77fa883a46. * Stash caching logic in Flax model * Make side relative bias used always * Drop caching logic in PT model * Return side bias as it was * Drop all remaining model parallel logic * Remove clamp statements * Move test files to the proper place * Update docs with new version of hf-doc-builder * Fix test imports * Make some minor improvements * Add missing checkpoints to docs * Make TGlobal model compatible with torch.onnx.export * Replace some np.ndarray with jnp.ndarray * Fix TGlobal for ONNX conversion + update docs * fix _make_global_fixed_block_ids and masked neg value * update flax model * style and quality * fix imports * remove load_tf_weights_in_longt5 from init and fix copies * add slow test for TGlobal model * typo fix * Drop obsolete is_parallelizable and one warning * Update __init__ files to fix repo-consistency * fix pipeline test * Fix some device placements * [wip]: Update tests -- need to generate summaries to update expected_summary * Fix quality * Update LongT5 model card * Update (slow) summarization tests * make style * rename checkpoitns * finish * fix flax tests Co-authored-by: phungvanduy <pvduy23@gmail.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: patil-suraj <surajp815@gmail.com>
transformers
8
Python
18
modeling_flax_longt5.py
def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values return model_kwargs FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING = overwrite_call_docstring( FlaxLongT5ForConditionalGeneration, LONGT5_INPUTS_DOCSTRING + FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING ) append_replace_return_docstrings( FlaxLongT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC )
a72f1c9f5b907f96cbb7de3bbb02a1d431d34071
19
https://github.com/huggingface/transformers.git
37
def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values return model_kwargs FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING = overwrite_call_docstring( FlaxLongT5ForConditionalGeneration, LONGT5_INPUTS_DOCSTRING + FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING ) append_replace_return_docstrings( FlaxLongT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC )
14
66
update_inputs_for_generation
37
0
1
19
tests/test_visibility.py
248,462
Rename storage classes (#12913)
synapse
13
Python
31
test_visibility.py
def _inject_outlier(self) -> EventBase: builder = self.event_builder_factory.for_room_version( RoomVersions.V1, { "type": "m.room.member", "sender": "@test:user", "state_key": "@test:user", "room_id": TEST_ROOM_ID, "content": {"membership": "join"}, }, ) event = self.get_success(builder.build(prev_event_ids=[], auth_event_ids=[])) event.internal_metadata.outlier = True self.get_success( self._storage_controllers.persistence.persist_event( event, EventContext.for_outlier(self._storage_controllers) ) ) return event
1e453053cb12ff084fdcdc2f75c08ced274dff21
101
https://github.com/matrix-org/synapse.git
230
def _inject_outlier(self) -> EventBase: builder = self.event_builder_factory.for_room_version( RoomVersions.V1, {
21
171
_inject_outlier
22
0
1
6
tests/packaging/test_file_packager.py
56,890
Fix packager flow collisions
prefect
11
Python
18
test_file_packager.py
async def test_file_packager_by_serializer(serializer): packager = FilePackager(serializer=serializer) manifest = await packager.package(howdy) assert isinstance(manifest, FilePackageManifest) unpackaged_howdy = await manifest.unpackage() assert unpackaged_howdy("bro").result() == "howdy bro"
32d4fb18769d663292fb059eda1e15a8628af689
48
https://github.com/PrefectHQ/prefect.git
36
async def test_file_packager_by_serializer(serializer): packager = FileP
12
84
test_file_packager_by_serializer
9
0
1
3
homeassistant/components/homekit_controller/config_flow.py
303,366
Fix some homekit_controller pylint warnings and (local only) test failures (#76122)
core
10
Python
9
config_flow.py
async def _async_setup_controller(self) -> None: self.controller = await async_get_controller(self.hass)
d5695a2d8656d2f9cb4d549c80cad331c914af1f
19
https://github.com/home-assistant/core.git
23
async def _async_setup_controller(self) -> None: self.controller = await async_get_controller(self.hass)
5
35
_async_setup_controller
91
0
1
14
test/test_preprocessor.py
257,028
Change return types of indexing pipeline nodes (#2342) * Change return types of file converters * Change return types of preprocessor * Change return types of crawler * Adapt utils to functions to new return types * Adapt __init__.py to new method names * Prevent circular imports * Update Documentation & Code Style * Let DocStores' run method accept Documents * Adapt tests to new return types * Update Documentation & Code Style * Put "# type: ignore" to right place * Remove id_hash_keys property from Document primitive * Update Documentation & Code Style * Adapt tests to new return types and missing id_hash_keys property * Fix mypy * Fix mypy * Adapt PDFToTextOCRConverter * Remove id_hash_keys from RestAPI tests * Update Documentation & Code Style * Rename tests * Remove redundant setting of content_type="text" * Add DeprecationWarning * Add id_hash_keys to elasticsearch_index_to_document_store * Change document type from dict to Docuemnt in PreProcessor test * Fix file path in Tutorial 5 * Remove added output in Tutorial 5 * Update Documentation & Code Style * Fix file_paths in Tutorial 9 + fix gz files in fetch_archive_from_http * Adapt tutorials to new return types * Adapt tutorial 14 to new return types * Update Documentation & Code Style * Change assertions to HaystackErrors * Import HaystackError correctly Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
haystack
11
Python
47
test_preprocessor.py
def test_remove_substrings(): document = Document(content="This is a header. Some additional text. wiki. Some emoji ✨ 🪲 Weird whitespace\b\b\b.") # check that the file contains the substrings we are about to remove assert "This is a header." in document.content assert "wiki" in document.content assert "🪲" in document.content assert "whitespace" in document.content assert "✨" in document.content preprocessor = PreProcessor(remove_substrings=["This is a header.", "wiki", "🪲"]) documents = preprocessor.process(document) assert "This is a header." not in documents[0].content assert "wiki" not in documents[0].content assert "🪲" not in documents[0].content assert "whitespace" in documents[0].content assert "✨" in documents[0].content
834f8c49024063ce17a63e50a9d7cff12f1c4f91
112
https://github.com/deepset-ai/haystack.git
132
def test_remove_substrings(): document = Document(content="This is a header. Some additional text. wiki. Some emoji ✨ 🪲 Weird whitespace\b\b\b.") # check that the file contains the substrings we are about to remove assert "This is a header." in document.content assert "wiki" in document.content assert "🪲" in document.content assert "whitespace" in document.content assert "✨" in document.content preprocessor = PreProcessor(remove_substrings=["This is a header.", "wiki", "🪲"]) documents = preprocessor.process(document) assert "This is a header." not in documents[0].content assert "wiki" not in documents[0].content assert "🪲" n
9
199
test_remove_substrings
78
1
8
28
homeassistant/components/fivem/__init__.py
312,790
Fivem integration (#65089) * Initial fivem integration setup * Use licenseKey for unique ID * Create FiveMServer class * Create FiveMStatusBinarySensor * Fix platform loading * Create sensor platform * Remove config flow tests * Update manifest.json * Use attr_ instead or properties in sensors.py * Use entry_id as unique_id * Move device info to _attr instead of property * Register callback in FiveMEntity * Create config flow tests * Add loggin to fivem * Use FiveM in config_flow * Use update_coordinator instead of dispatcher * Bump fivem-api to 0.1.2 * Remove leftovers * More tests for config flow * Add component files to .coveragerc * Fix simple comments * Add gamename check to config flow * Use entity descriptions for sensors * Move extra attributes to init * Use [] instead of get() for server info * Fix error in gamename test
core
12
Python
60
__init__.py
async def _async_update_data(self) -> dict[str, Any]: was_online = self.online try: server = await self._fivem.get_server() self.online = True except FiveMServerOfflineError: self.online = False if was_online and not self.online: _LOGGER.warning("Connection to '%s:%s' lost", self.host, self.port) elif not was_online and self.online: _LOGGER.info("Connection to '%s:%s' (re-)established", self.host, self.port) if self.online: players_list: list[str] = [] for player in server.players: players_list.append(player.name) players_list.sort() resources_list = server.resources resources_list.sort() return { NAME_PLAYERS_ONLINE: len(players_list), NAME_PLAYERS_MAX: server.max_players, NAME_RESOURCES: len(resources_list), NAME_STATUS: self.online, ATTR_PLAYERS_LIST: players_list, ATTR_RESOURCES_LIST: resources_list, } raise UpdateFailed @dataclass
0ea82bdbfb0d58b1af273e39da65cbb9e4af1015
@dataclass
170
https://github.com/home-assistant/core.git
370
async def _async_update_data(self) -> dict[str, Any]: was_online = self.online try: server = await self._fivem.get_server() self.online = True except FiveMServerOfflineError: self.online = False if was_online and not self.online: _LOGGER.warning("Connection to '%s:%s' lost", self.host, self.port) elif not was_online and self.online: _LOGGER.info("Connection to '%s:%s' (re-)established", self.host, self.port) if self.online: players_list: list[str] = [] for player in server.players: players_list.append(player.name) players_list.sort() resources_list = server.resources resources_list.sort() return { NAME_PLAYERS_ONLINE: len(players_list), NAME_PLAYERS_MAX: server.max_players, NAME_RESOURCES: len(resources_list), NAME_STATUS: self.online, ATTR_PLAYERS
35
272
_async_update_data
26
0
2
4
modin/pandas/base.py
155,013
REFACTOR-#5092: Fix future warning for `set_axis` function (#5093) Co-authored-by: Vasily Litvinov <fam1ly.n4me@yandex.ru> Signed-off-by: Myachev <anatoly.myachev@intel.com>
modin
9
Python
24
base.py
def swaplevel(self, i=-2, j=-1, axis=0): # noqa: PR01, RT01, D200 axis = self._get_axis_number(axis) idx = self.index if axis == 0 else self.columns return self.set_axis(idx.swaplevel(i, j), axis=axis)
9013f54283eb6776920ee3bf527e208a516d086d
59
https://github.com/modin-project/modin.git
55
def swaplevel(self, i=-2, j=-1, axis=0): # noqa: PR01, RT01, D200
10
91
swaplevel
12
0
1
7
tests/sentry/models/test_projectownership.py
85,610
feat(issues): Store assignee integration in group activity (#38526) - When a user is assigned via slack or ms teams, add the integration to activity data - When assigned via codeowners, add the integration and rule as a string
sentry
10
Python
11
test_projectownership.py
def test_get_autoassign_owners_no_codeowners_or_issueowners(self): assert ProjectOwnership.get_autoassign_owners(self.project.id, {}) == ( False, [], False, None, )
f1c3fa1660fa8144b5965f0375f5abec122243bf
31
https://github.com/getsentry/sentry.git
69
def test_get_autoassign_owners_no_codeowners_or_issueowners(self): assert ProjectOw
6
45
test_get_autoassign_owners_no_codeowners_or_issueowners
736
0
4
546
src/sentry/search/events/datasets/metrics.py
88,281
chore(metrics): Remove tag values are strings option (#41092) - This removes the tag value option since we're now fully on using tag values as strings instead of indexed integers - This is needed so we can start on wildcard searching
sentry
28
Python
198
metrics.py
def function_converter(self) -> Mapping[str, fields.MetricsFunction]: resolve_metric_id = { "name": "metric_id", "fn": lambda args: self.resolve_metric(args["column"]), } function_converter = { function.name: function for function in [ # Note while the discover version of apdex, count_miserable, user_misery # accepts arguments, because this is precomputed with tags no parameters # are available fields.MetricsFunction( "apdex", optional_args=[fields.NullableNumberRange("satisfaction", 0, None)], snql_distribution=self._resolve_apdex_function, default_result_type="number", ), fields.MetricsFunction( "avg", required_args=[ fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS, ) ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "avgIf", [ Column("value"), Function( "equals", [ Column("metric_id"), args["metric_id"], ], ), ], alias, ), result_type_fn=self.reflective_result_type(), default_result_type="integer", ), fields.MetricsFunction( "count_miserable", required_args=[ fields.MetricArg( "column", allowed_columns=["user"], allow_custom_measurements=False ) ], optional_args=[fields.NullableNumberRange("satisfaction", 0, None)], calculated_args=[resolve_metric_id], snql_set=self._resolve_count_miserable_function, default_result_type="integer", ), fields.MetricsFunction( "count_unparameterized_transactions", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "equals", [ self.builder.column("transaction"), self.builder.resolve_tag_value("<< unparameterized >>"), ], ), ], ), ], alias, ), # Not yet exposed, need to add far more validation around tag&value private=True, default_result_type="integer", ), fields.MetricsFunction( "count_null_transactions", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "equals", [ self.builder.column("transaction"), "", ], ), ], ), ], alias, ), private=True, ), fields.MetricsFunction( "count_has_transaction_name", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "and", [ Function( "notEquals", [ self.builder.column("transaction"), "", ], ), Function( "notEquals", [ self.builder.column("transaction"), self.builder.resolve_tag_value( "<< unparameterized >>" ), ], ), ], ), ], ), ], alias, ), private=True, default_result_type="integer", ), fields.MetricsFunction( "user_misery", optional_args=[ fields.NullableNumberRange("satisfaction", 0, None), fields.with_default( constants.MISERY_ALPHA, fields.NumberRange("alpha", 0, None) ), fields.with_default( constants.MISERY_BETA, fields.NumberRange("beta", 0, None) ), ], calculated_args=[], snql_set=self._resolve_user_misery_function, default_result_type="number", ), fields.MetricsFunction( "p50", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.5 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p75", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.75 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p90", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.90 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p95", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.95 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p99", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.99 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p100", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile(args, alias, 1), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "max", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "maxIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "min", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "minIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "sum", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "sumIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "sumIf", required_args=[ fields.ColumnTagArg("if_col"), fields.FunctionArg("if_val"), ], calculated_args=[ { "name": "resolved_val", "fn": lambda args: self.builder.resolve_tag_value(args["if_val"]), } ], snql_counter=lambda args, alias: Function( "sumIf", [ Column("value"), Function("equals", [args["if_col"], args["resolved_val"]]), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "percentile", required_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), fields.NumberRange("percentile", 0, 1), ], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_percentile, result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "count_unique", required_args=[ fields.MetricArg( "column", allowed_columns=["user"], allow_custom_measurements=False ) ], calculated_args=[resolve_metric_id], snql_set=lambda args, alias: Function( "uniqIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "uniq", snql_set=lambda args, alias: Function( "uniq", [Column("value")], alias, ), ), fields.MetricsFunction( "uniqIf", required_args=[ fields.ColumnTagArg("if_col"), fields.FunctionArg("if_val"), ], calculated_args=[ { "name": "resolved_val", "fn": lambda args: self.builder.resolve_tag_value(args["if_val"]), } ], snql_set=lambda args, alias: Function( "uniqIf", [ Column("value"), Function("equals", [args["if_col"], args["resolved_val"]]), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "count", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "count_web_vitals", required_args=[ fields.MetricArg( "column", allowed_columns=[ "measurements.fp", "measurements.fcp", "measurements.lcp", "measurements.fid", "measurements.cls", ], allow_custom_measurements=False, ), fields.SnQLStringArg( "quality", allowed_strings=["good", "meh", "poor", "any"] ), ], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_web_vital_function, default_result_type="integer", ), fields.MetricsFunction( "epm", snql_distribution=lambda args, alias: Function( "divide", [ Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], ), Function("divide", [args["interval"], 60]), ], alias, ), optional_args=[fields.IntervalDefault("interval", 1, None)], default_result_type="number", ), fields.MetricsFunction( "eps", snql_distribution=lambda args, alias: Function( "divide", [ Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], ), args["interval"], ], alias, ), optional_args=[fields.IntervalDefault("interval", 1, None)], default_result_type="number", ), fields.MetricsFunction( "failure_count", snql_distribution=self._resolve_failure_count, default_result_type="integer", ), fields.MetricsFunction( "failure_rate", snql_distribution=lambda args, alias: Function( "divide", [ self._resolve_failure_count(args), Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], ), ], alias, ), default_result_type="percentage", ), fields.MetricsFunction( "histogram", required_args=[fields.MetricArg("column")], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_histogram_function, default_result_type="number", private=True, ), ] } for alias, name in constants.FUNCTION_ALIASES.items(): if name in function_converter: function_converter[alias] = function_converter[name].alias_as(alias) return function_converter
4c9c03f8a9416b53bf74f2d77df43499973ecf89
2,117
https://github.com/getsentry/sentry.git
13,912
def function_converter(self) -> Mapping[str, fields.MetricsFunction]: resolve_metric_id = { "name": "metric_id", "fn": lambda args: self.resolve_metric(args["column"]), } function_converter = { function.name: function for function in [ # Note while the discover version of apdex, count_miserable, user_misery # accepts arguments, because this is precomputed with tags no parameters # are available fields.MetricsFunction( "apdex", optional_args=[fields.NullableNumberRange("satisfaction", 0, None)], snql_distribution=self._resolve_apdex_function, default_result_type="number", ), fields.MetricsFunction( "avg", required_args=[ fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS, ) ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "avgIf", [ Column("value"), Function( "equals", [ Column("metric_id"), args["metric_id"], ], ), ], alias, ), result_type_fn=self.reflective_result_type(), default_result_type="integer", ), fields.MetricsFunction( "count_miserable", required_args=[ fields.MetricArg( "column", allowed_columns=["user"], allow_custom_measurements=False ) ], optional_args=[fields.NullableNumberRange("satisfaction", 0, None)], calculated_args=[resolve_metric_id], snql_set=self._resolve_count_miserable_function, default_result_type="integer", ), fields.MetricsFunction( "count_unparameterized_transactions", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "equals", [ self.builder.column("transaction"), self.builder.resolve_tag_value("<< unparameterized >>"), ], ), ], ), ], alias, ), # Not yet exposed, need to add far more validation around tag&value private=True, default_result_type="integer", ), fields.MetricsFunction( "count_null_transactions", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "equals", [ self.builder.column("transaction"), "", ], ), ], ), ], alias, ), private=True, ), fields.MetricsFunction( "count_has_transaction_name", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "and", [ Function( "notEquals", [ self.builder.column("transaction"), "", ], ), Function( "notEquals", [ self.builder.column("transaction"), self.builder.resolve_tag_value( "<< unparameterized >>" ), ], ), ], ), ], ), ], alias, ), private=True, default_result_type="integer", ), fields.MetricsFunction( "user_misery", optional_args=[ fields.NullableNumberRange("satisfaction", 0, None), fields.with_default( constants.MISERY_ALPHA, fields.NumberRange("alpha", 0, None) ), fields.with_default( constants.MISERY_BETA, fields.NumberRange("beta", 0, None) ), ], calculated_args=[], snql_set=self._resolve_user_misery_function, default_result_type="number", ), fields.MetricsFunction( "p50", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.5 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p75", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._re
52
3,289
function_converter
118
0
5
29
tests/exchange/test_ccxt_compat.py
149,740
Okx - conditional candle-length
freqtrade
14
Python
83
test_ccxt_compat.py
def test_ccxt__async_get_candle_history(self, exchange): exchange, exchangename = exchange # For some weired reason, this test returns random lengths for bittrex. if not exchange._ft_has['ohlcv_has_history'] or exchangename == 'bittrex': return pair = EXCHANGES[exchangename]['pair'] timeframe = EXCHANGES[exchangename]['timeframe'] candle_type = CandleType.SPOT timeframe_ms = timeframe_to_msecs(timeframe) now = timeframe_to_prev_date( timeframe, datetime.now(timezone.utc)) for offset in (360, 120, 30, 10, 5, 2): since = now - timedelta(days=offset) since_ms = int(since.timestamp() * 1000) res = exchange.loop.run_until_complete(exchange._async_get_candle_history( pair=pair, timeframe=timeframe, since_ms=since_ms, candle_type=candle_type ) ) assert res assert res[0] == pair assert res[1] == timeframe assert res[2] == candle_type candles = res[3] candle_count = exchange.ohlcv_candle_limit(timeframe, candle_type, since_ms) * 0.9 candle_count1 = (now.timestamp() * 1000 - since_ms) // timeframe_ms assert len(candles) >= min(candle_count, candle_count1) assert candles[0][0] == since_ms or (since_ms + timeframe_ms)
111b04c9e65668067646265e614326f81aa1bf1c
225
https://github.com/freqtrade/freqtrade.git
420
def test_ccxt__async_get_candle_history(self, exchange): exchange, exchangename = exchange # For some weired reason, this test returns random lengths for bittrex. if not exchange._ft_has['ohlcv_has_history'] or exchangename == 'bittrex': return pair = EXCHANGES[exchangename]['pair'] timeframe = EXCHANGES[exchangename]['timeframe'] candle_type = CandleType.SPOT timeframe_ms = timeframe_to_msecs(timeframe) now = timeframe_to_prev_date( timeframe, datetime.now(timezone.utc)) for offset in (360, 120, 30, 10, 5, 2): since = now - timedelta(days=offset) since_ms = int(since.timestamp() * 1000) res = exchange.loop.run_until_complete(exchange._async_get_candle_history( pair=pair, timeframe=timeframe, since_ms=since_ms, candle_type=candle_type ) ) assert res assert res[0] == pair assert res[1] == timeframe assert res[2] == candle_type candles = res[3] candle_count = exchange.ohlcv_candle_limit(timeframe, candle_type, since_ms) * 0.9 candle_count1 = (now.timestamp() * 1000 - since_ms) //
35
342
test_ccxt__async_get_candle_history
101
0
5
12
python/ray/_private/runtime_env/_clonevirtualenv.py
144,958
Update license for MLflow's conda utils and virtualenv-clone (#22402) When we vendor third-party code, we should update LICENSE file. Previously we vendored two pieces of code: - conda utilities from MLflow - virtualenv-clone But we only included the attribution in the relevant source files, not in our LICENSE file. This PR adds the necessary info to our LICENSE file.
ray
17
Python
75
_clonevirtualenv.py
def fix_symlink_if_necessary(src_dir, dst_dir): # sometimes the source virtual environment has symlinks that point to itself # one example is $OLD_VIRTUAL_ENV/local/lib points to $OLD_VIRTUAL_ENV/lib # this function makes sure # $NEW_VIRTUAL_ENV/local/lib will point to $NEW_VIRTUAL_ENV/lib # usually this goes unnoticed unless one tries to upgrade a package though pip, so this bug is hard to find. logger.info("scanning for internal symlinks that point to the original virtual env") for dirpath, dirnames, filenames in os.walk(dst_dir): for a_file in itertools.chain(filenames, dirnames): full_file_path = os.path.join(dirpath, a_file) if os.path.islink(full_file_path): target = os.path.realpath(full_file_path) if target.startswith(src_dir): new_target = target.replace(src_dir, dst_dir) logger.debug("fixing symlink in %s" % (full_file_path,)) os.remove(full_file_path) os.symlink(new_target, full_file_path)
606e2b2cde89a4869129dbca907bc14a7a9d1197
114
https://github.com/ray-project/ray.git
256
def fix_symlink_if_necessary(src_dir, dst_dir): # sometimes the source virtual environment has symlinks that point to itself # one example is $OLD_VIRTUAL_ENV/local/lib points to $OLD_VIRTUAL_ENV/lib # this function makes sure # $NEW_VIRTUAL_ENV/local/lib will point to $NEW_VIRTUAL_ENV/lib # usually this goes unnoticed unless one tries to upgrade a package though pip, so this bug is hard to find. logger.inf
25
184
fix_symlink_if_necessary
105
0
1
35
tests/snuba/api/endpoints/test_organization_events.py
87,974
fix(tests): Discover backend test flakes (#41057) - `MetricsQueryBuilder` wasn't sorting environment tags - Consistent timestamps on test_organization_events - Updated `apply_feature_flag_on_cls` to only apply decorator on the run method
sentry
13
Python
65
test_organization_events.py
def test_issue_in_columns(self): project1 = self.create_project() project2 = self.create_project() event1 = self.store_event( data={ "event_id": "a" * 32, "transaction": "/example", "message": "how to make fast", "timestamp": self.ten_mins_ago_iso, "fingerprint": ["group_1"], }, project_id=project1.id, ) event2 = self.store_event( data={ "event_id": "b" * 32, "transaction": "/example", "message": "how to make fast", "timestamp": self.ten_mins_ago_iso, "fingerprint": ["group_1"], }, project_id=project2.id, ) features = {"organizations:discover-basic": True, "organizations:global-views": True} query = {"field": ["id", "issue"], "orderby": ["id"]} response = self.do_request(query, features=features) assert response.status_code == 200, response.content data = response.data["data"] assert len(data) == 2 assert data[0]["id"] == event1.event_id assert data[0]["issue.id"] == event1.group_id assert data[0]["issue"] == event1.group.qualified_short_id assert data[1]["id"] == event2.event_id assert data[1]["issue.id"] == event2.group_id assert data[1]["issue"] == event2.group.qualified_short_id
618ae63cf2ba419e44e79ce578d88e8b062d7dd9
248
https://github.com/getsentry/sentry.git
446
def test_issue_in_columns(self): project1 = self.create_project() project2 = self.create_project() event1 = self.store_event( data={ "event_id": "a" * 32, "transaction": "/example", "message": "how to ma
23
422
test_issue_in_columns
11
0
1
17
dask/typing.py
156,511
Collection Protocol (#8674) [PEP 544](https://www.python.org/dev/peps/pep-0544/) introduces the `Protocol` class to the `typing` module in Python 3.8 (the soon be the minimum supported version, https://github.com/dask/community/issues/213). Writing new Dask collections for [dask-awkward](https://github.com/ContinuumIO/dask-awkward/) has had me thinking about working on a `DaskCollection` protocol. I imagine the benefits to be: - usage with static type checkers - other activity in this area at - #8295 - #8706 - #8854 - Python supporting IDEs take advantage of typing - self-documenting; some improvements to [the custom collections page](https://docs.dask.org/en/latest/custom-collections.html) of the docs. The protocol docs can be autogenerated and added to that page. - purely opt-in feature The `typing.runtime_checkable` decorator allows use of `isinstance(x, DaskCollection)` in any code base that uses Dask collections; for example: ```python >>> from dask.typing import DaskCollection >>> import dask.array as da >>> x = da.zeros((10, 3)) >>> isinstance(x, DaskCollection) True ``` (though this is an order of magnitude slower than `dask.base.is_dask_collection` which only checks for `x.__dask_graph__() is not None`; static typing checking & built-in interface documentation are the core benefits IMO) Something else that came up in the brief discussion on a call last week was having `{Scheduler,Worker,Nanny}Plugin` protocols in `distributed`; and perhaps those are better places to start introducing protocols to Dask since on the user side typically more folks would write plugins than new collections.
dask
8
Python
11
typing.py
def __dask_graph__(self) -> Mapping: raise NotImplementedError("Inheriting class must implement this method.")
1e783d9a714160e968936cb22d54d085959ab09e
13
https://github.com/dask/dask.git
25
def __dask_graph__(self) -> Mapping:
4
26
__dask_graph__
24
0
1
3
mindsdb/integrations/handlers/databend_handler/tests/test_databend_handler.py
116,827
added the unit tests for the handler
mindsdb
9
Python
14
test_databend_handler.py
def test_1_native_query_show_dbs(self): result = self.handler.native_query("SHOW DATABASES;") assert result.type is not RESPONSE_TYPE.ERROR # def test_2_wrong_native_query_returns_error(self): # result = self.handler.native_query("SHOW DATABASE1S;") # assert result.type is RESPONSE_TYPE.ERROR
add8253659f2a16152fa513ae310b4b6b5242e1e
24
https://github.com/mindsdb/mindsdb.git
54
def test_1_native_query_show_dbs(self): result = self.handler.native_query("SHOW DATABASES;") assert result.type is not RESPONSE_TYPE.ERROR # def test_2_wrong_native_query_returns_error(self): # result = self.handler.native_query("SHOW DATABASE1S;") # assert result.type is RESPONSE_TY
8
43
test_1_native_query_show_dbs
32
0
2
15
test/lib/ansible_test/_internal/host_profiles.py
268,743
ansible-test - Improve container management. (#78550) See changelogs/fragments/ansible-test-container-management.yml for details.
ansible
13
Python
30
host_profiles.py
def setup(self) -> None: bootstrapper = BootstrapDocker( controller=self.controller, python_versions=[self.python.version], ssh_key=SshKey(self.args), ) setup_sh = bootstrapper.get_script() shell = setup_sh.splitlines()[0][2:] try: docker_exec(self.args, self.container_name, [shell], data=setup_sh, capture=False) except SubprocessError: display.info(f'Checking container "{self.container_name}" logs...') docker_logs(self.args, self.container_name) raise
cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc
104
https://github.com/ansible/ansible.git
158
def setup(self) -> None: bootstrapper = BootstrapDocker( controller=self.controller, python_versions=[self.python.version], ssh_key=SshKey(self.args), ) setup_sh = bootstrapper.get_script()
23
169
setup
24
0
2
7
wagtail/snippets/tests.py
77,878
Use ReportView for Snippets HistoryView and use filterset
wagtail
12
Python
20
tests.py
def get_url(self, snippet, url_name, args=None): app_label = snippet._meta.app_label model_name = snippet._meta.model_name view_name = f"wagtailsnippets_{app_label}_{model_name}:{url_name}" if args is None: args = [quote(snippet.pk)] return reverse(view_name, args=args)
7b9531f9910ec8624ee66772805438e9f3084d3d
55
https://github.com/wagtail/wagtail.git
69
def get_url(self, snippet, url_name, args=None): app_label = snippet._meta.app_label model_name = snippet._meta.model_name view_name = f"wagtailsnippets_{app_label}_{model_name}:{url_name}" if args is None: args = [quote(snippet.pk)] return reverse(view_name, args=args)
12
96
get_url
36
0
1
16
erpnext/accounts/report/cash_flow/custom_cash_flow.py
64,648
refactor: convert raw sql to frappe.qb
erpnext
20
Python
31
custom_cash_flow.py
def get_accounts_in_mappers(mapping_names): cfm = frappe.qb.DocType('Cash Flow Mapping') cfma = frappe.qb.DocType('Cash Flow Mapping Accounts') result = ( frappe.qb .select( cfma.name, cfm.label, cfm.is_working_capital, cfm.is_income_tax_liability, cfm.is_income_tax_expense, cfm.is_finance_cost, cfm.is_finance_cost_adjustment, cfma.account ) .from_(cfm) .join(cfma) .on(cfm.name == cfma.parent) .where(cfma.parent.isin(mapping_names)) ).run() return result
00bfee97c766e771a1ab0b57d223ba9e87b70e9a
106
https://github.com/frappe/erpnext.git
20
def get_accounts_in_mappers(mapping_names): cfm = frappe.qb.DocType('Cash Flow Mapping') cfma = frappe.qb.DocType('Cash Flow Mapping Accounts') result = ( frappe.qb .select( cfma.name, cfm.label, cfm.is_working_cap
24
164
get_accounts_in_mappers
16
0
2
8
tests/sentry/search/events/test_builder.py
96,791
ref(mep): Some cleanup to the metric query builder (#32139) - This adds metric_id to the search conditions based on the aggregates added so that there's a top level filter instead of just the aggregate -if combinator filters. This should help with query performance - This also removes the combinator&merge from query construction since snuba can handle this for us, which makes the functions a bit cleaner
sentry
14
Python
16
test_builder.py
def _metric_conditions(metrics) -> List[Condition]: return [ Condition( Column("metric_id"), Op.IN, sorted(indexer.resolve(constants.METRICS_MAP[metric]) for metric in metrics), ) ]
5e1cb0e215c061e13ec1262a814450a33d49a398
44
https://github.com/getsentry/sentry.git
68
def _metric_conditions(metrics) -> List[Condition]: return [ Condition( Column("metr
13
67
_metric_conditions
49
1
1
22
tests/test_main.py
14,462
Switching to `pydantic_core` (#4516) * working on core schema generation * adapting main.py * getting tests to run * fix tests * disable pyright, fix mypy * moving to class-based model generation * working on validators * change how models are created * start fixing test_main.py * fixing mypy * SelfType * recursive models working, more tests fixed * fix tests on <3.10 * get docs build to pass * starting to cleanup types.py * starting works on custom types * working on using annotated-types * using annoated types for constraints * lots of cleanup, fixing network tests * network tests passing :tada: * working on types * working on types and cleanup * fixing UUID type, restructing again * more types and newer pydantic-core * working on Iterable * more test_types tests * support newer pydantic-core, fixing more test_types.py * working through more test_types.py * test_types.py at last passing locally :tada: * fixing more tests in test_types.py * fix datetime_parse tests and linting * get tests running again, rename to test_datetime.py * renaming internal modules * working through mypy errors * fixing mypy * refactoring _generate_schema.py * test_main.py passing * uprev deps * fix conftest and linting? * importing Annotated * ltining * import Annotated from typing_extensions * fixing 3.7 compatibility * fixing tests on 3.9 * fix linting * fixing SecretField and 3.9 tests * customising get_type_hints * ignore warnings on 3.11 * spliting repr out of utils * removing unused bits of _repr, fix tests for 3.7 * more cleanup, removing many type aliases * clean up repr * support namedtuples and typeddicts * test is_union * removing errors, uprev pydantic-core * fix tests on 3.8 * fixing private attributes and model_post_init * renaming and cleanup * remove unnecessary PydanticMetadata inheritance * fixing forward refs and mypy tests * fix signatures, change how xfail works * revert mypy tests to 3.7 syntax * correct model title * try to fix tests * fixing ClassVar forward refs * uprev pydantic-core, new error format * add "force" argument to model_rebuild * Apply suggestions from code review Suggestions from @tiangolo and @hramezani :pray: Co-authored-by: Hasan Ramezani <hasan.r67@gmail.com> Co-authored-by: Sebastián Ramírez <tiangolo@gmail.com> * more suggestions from @tiangolo * extra -> json_schema_extra on Field Co-authored-by: Hasan Ramezani <hasan.r67@gmail.com> Co-authored-by: Sebastián Ramírez <tiangolo@gmail.com>
pydantic
11
Python
37
test_main.py
def test_nullable_strings_fails(NoneCheckModel): with pytest.raises(ValidationError) as exc_info: NoneCheckModel( required_str_value=None, required_str_none_value=None, required_bytes_value=None, required_bytes_none_value=None, ) assert exc_info.value.errors() == [ { 'type': 'string_type', 'loc': ('required_str_value',), 'msg': 'Input should be a valid string', 'input': None, }, { 'type': 'bytes_type', 'loc': ('required_bytes_value',), 'msg': 'Input should be a valid bytes', 'input': None, }, ] @pytest.fixture(name='ParentModel', scope='session')
594effa279668bd955e98f1cd5c036b37d3bbd40
@pytest.fixture(name='ParentModel', scope='session')
89
https://github.com/pydantic/pydantic.git
230
def test_nullable_strings_fails(NoneCheckModel): with pytest.raises(ValidationError) as exc_info: NoneCheckModel( required_str_value=None, required_str_none_value=None, required_bytes_value=None, required_bytes_none_value=None, ) assert exc_info.value.errors() == [ { 'type': 'string_type', 'loc': ('required_str_value',), 'msg': 'Input should be a valid string', 'input': None, }, { 'type': 'bytes_type', 'loc': ('required_bytes_value',), 'msg': 'Input should be a valid bytes', 'input': None, }, ] @pytest.fixture(name='Pare
15
180
test_nullable_strings_fails
23
1
2
5
test/lib/ansible_test/_internal/host_configs.py
266,778
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
ansible
9
Python
21
host_configs.py
def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None assert isinstance(defaults, PosixCompletionConfig) super().apply_defaults(context, defaults) self.python = self.python or NativePythonConfig() self.python.apply_defaults(context, defaults) @dataclasses.dataclass
a06fa496d3f837cca3c437ab6e9858525633d147
@dataclasses.dataclass
48
https://github.com/ansible/ansible.git
58
def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None assert isinstance(defaults, PosixCompletionConfig) super().apply_defaults(context, defaults) self.python = self.python or NativePythonConfig() self.python.apply_defaults(context, defaults) @dataclasses.data
11
84
apply_defaults
12
0
1
7
tests/test_logging.py
53,139
Implement `flush(block: bool ...)` Previously, this always blocked. The new implementaiton is non-blocking, but we need to block in tests so the data is present for assertions
prefect
11
Python
12
test_logging.py
def test_flush_event_is_cleared(self, worker): worker._flush_event = MagicMock(return_val=False) with temporary_settings(PREFECT_LOGGING_ORION_BATCH_INTERVAL="5"): worker.start() worker.flush(block=True) worker._flush_event.wait.assert_called_with(5) worker._flush_event.clear.assert_called()
fa64dff0102537b3d249af16c7ea7821982195dd
57
https://github.com/PrefectHQ/prefect.git
61
def test_flush_event_is_cleared(self, worker): worker._flush_event = MagicMock(return_val=False) with temporary_settings(PREFECT_LOGGING_ORION_BATCH_INTERVAL="5"): worker.start() worker.flush(block=True) worker._flush_event.wait.assert_called_with(5) worker._
15
97
test_flush_event_is_cleared
54
0
2
35
awx/main/tests/unit/api/test_views.py
82,010
Add new flak8 rules to do some meaningful corrections
awx
10
Python
49
test_views.py
def test_get_endpoints(self, mocker): endpoints = [ 'ping', 'config', # 'settings', 'me', 'dashboard', 'organizations', 'users', 'projects', 'teams', 'credentials', 'inventory', 'inventory_sources', 'groups', 'hosts', 'job_templates', 'jobs', 'ad_hoc_commands', 'system_job_templates', 'system_jobs', 'schedules', 'notification_templates', 'notifications', 'labels', 'unified_job_templates', 'unified_jobs', 'activity_stream', 'workflow_job_templates', 'workflow_jobs', ] view = ApiVersionRootView() ret = view.get(mocker.MagicMock()) assert ret.status_code == 200 for endpoint in endpoints: assert endpoint in ret.data
d3eb2c197595c29c4a3f7b38cd609ce953009623
99
https://github.com/ansible/awx.git
414
def test_get_endpoints(self, mocker): endpoints = [ 'ping', 'config', # 'settings', 'me', 'dashboard', 'organizations', 'users', 'projects', 'teams', 'credentials', 'inventory', 'inventory_sources', 'groups', 'hosts', 'job_templates', 'jobs', 'ad_hoc_commands', 'system_job_templates', 'system_jobs', 'schedules', 'notification_templates',
12
181
test_get_endpoints
28
0
2
10
modin/pandas/test/test_series.py
153,176
FEAT-#4035: Upgrade pandas support to 1.4 (#4036) Co-authored-by: Igoshev, Yaroslav <yaroslav.igoshev@intel.com> Co-authored-by: Alexey Prutskov <alexey.prutskov@intel.com> Co-authored-by: Rehan Durrani <rehan@ponder.io> Co-authored-by: ienkovich <ilya.enkovich@intel.com> Co-authored-by: Vasily Litvinov <vasilij.n.litvinov@intel.com> Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Devin Petersohn <devin.petersohn@gmail.com>
modin
14
Python
23
test_series.py
def test_var(data, skipna, ddof): modin_series, pandas_series = create_test_series(data) try: pandas_result = pandas_series.var(skipna=skipna, ddof=ddof) except Exception as e: with pytest.raises(type(e)): modin_series.var(skipna=skipna, ddof=ddof) else: modin_result = modin_series.var(skipna=skipna, ddof=ddof) df_equals(modin_result, pandas_result)
39fbc57e809c2422b250f0be58d076a22bd45031
83
https://github.com/modin-project/modin.git
78
def test_var(data, skipna, ddof): modin_series, pandas_series = create_test_series(data) try: pandas_result = pandas_series
16
132
test_var
35
0
1
12
keras/regularizers_test.py
275,837
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
15
Python
32
regularizers_test.py
def test_zero_regularization(self): # Verifies that training with zero regularization works. x, y = np.ones((10, 10)), np.ones((10, 3)) model = test_utils.get_model_from_layers( [ keras.layers.Dense( 3, kernel_regularizer=keras.regularizers.l2(0) ) ], input_shape=(10,), ) model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly()) model.fit(x, y, batch_size=5, epochs=1)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
98
https://github.com/keras-team/keras.git
158
def test_zero_regularization(self): # Verifies that training with zero regularization works. x, y = np.ones((10, 10)), np.ones((10, 3)) model = t
22
150
test_zero_regularization
33
0
1
9
tests/trainer/test_trainer.py
241,591
Raise a warning if evaulation is triggered with best ckpt in case of multiple checkpoint callbacks (#11274) Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
lightning
11
Python
26
test_trainer.py
def test_best_ckpt_evaluate_raises_warning_with_multiple_ckpt_callbacks(): ckpt_callback1 = ModelCheckpoint() ckpt_callback1.best_model_path = "foo_best_model.ckpt" ckpt_callback2 = ModelCheckpoint() ckpt_callback2.best_model_path = "bar_best_model.ckpt" trainer = Trainer(callbacks=[ckpt_callback1, ckpt_callback2]) trainer.state.fn = TrainerFn.TESTING with pytest.warns(UserWarning, match="best checkpoint path from first checkpoint callback"): trainer._Trainer__set_ckpt_path(ckpt_path="best", model_provided=False, model_connected=True)
7eab379da2fdca542849ed4ad313d0851c2271e3
74
https://github.com/Lightning-AI/lightning.git
64
def test_best_ckpt_evaluate_raises_warning_with_multiple_ckpt_callbacks(): ckpt_callback1 = ModelCheckpoint() ckpt_callback1.best_model_path = "foo_best_model.ckpt" ckpt_callback2 = ModelCheckpoint() ckpt_callback2.best_model_path = "bar_best_model.ckpt" train
20
129
test_best_ckpt_evaluate_raises_warning_with_multiple_ckpt_callbacks
14
0
1
7
test/mitmproxy/net/test_server_spec.py
251,820
make it black!
mitmproxy
10
Python
14
test_server_spec.py
def test_parse_with_mode(): assert server_spec.parse_with_mode("m:example.com") == ( "m", ("https", ("example.com", 443)), ) with pytest.raises(ValueError): server_spec.parse_with_mode("moo")
b3587b52b25077f68116b9852b041d33e7fc6601
40
https://github.com/mitmproxy/mitmproxy.git
43
def test_parse_with_mode(): assert server_spec.parse_with_mode("m:example.com") == ( "m", ("https", ("example.com", 443)), ) with pyte
6
73
test_parse_with_mode
17
1
1
6
tests/components/matter/test_config_flow.py
291,888
Add matter integration BETA (#83064) * Add matter base (#79372) Co-authored-by: Marcel van der Veldt <m.vanderveldt@outlook.com> * Add matter server add-on flow (#82698) * Add matter server add-on flow * Fix stale error argument * Clean docstrings * Use localhost as default address * Add matter websocket api foundation (#82848) * Add matter config entry add-on management (#82865) * Use matter refactored server/client library (#83003) Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Bump python-matter-server to 1.0.6 (#83059) * Extend matter websocket api (#82948) * Extend matter websocket api * Finish docstring * Fix pin type * Adjust api after new client * Adjust api to frontend for now Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
core
11
Python
17
test_config_flow.py
def setup_entry_fixture() -> Generator[AsyncMock, None, None]: with patch( "homeassistant.components.matter.async_setup_entry", return_value=True ) as mock_setup_entry: yield mock_setup_entry @pytest.fixture(name="client_connect", autouse=True)
e2308fd15cec4dfdd25d843b72cd3071657fd5b8
@pytest.fixture(name="client_connect", autouse=True)
28
https://github.com/home-assistant/core.git
39
def setup_entry_fixture() -> Generator[AsyncMock, None, None]: with patch( "homeassistant.components.matter.async_setu
10
72
setup_entry_fixture
9
0
1
6
lib/matplotlib/backends/backend_wx.py
110,659
Separately track modifier keys for mouse events. Whether the event modifiers are directly available on enter/leave events depends on the backend, but all are handled here (except possibly for macos, which I haven't checked).
matplotlib
12
Python
9
backend_wx.py
def _on_motion(self, event): event.Skip() MouseEvent("motion_notify_event", self, *self._mpl_coords(event), modifiers=self._mpl_modifiers(event), guiEvent=event)._process()
b4e9e3131cdd7f1ad33ea06e21e7d3e51762af91
44
https://github.com/matplotlib/matplotlib.git
84
def _on_motion(self, event): event.Skip() MouseEvent("motion_notify_event", self, *self._mpl_coords(event), modifiers=self._mpl_modifiers(event), gui
10
72
_on_motion