n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
57
0
2
19
tests/sentry/rules/history/test_preview.py
89,412
feat(alert-preview): last triggered (#42098) Attaches `last_triggered` to group info. `preview` now returns a mapping of group_ids to triggers, updated tests to reflect that.
sentry
16
Python
39
test_preview.py
def test_frequency_condition_alone(self): prev_hour = timezone.now() - timedelta(hours=1) group = None for i in range(5): group = self.store_event( project_id=self.project.id, data={"timestamp": iso_format(prev_hour)} ).group conditions = [ { "id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition", "value": 4, "interval": "5m", } ] result = preview(self.project, conditions, [], *MATCH_ARGS) assert group.id in result conditions[0]["value"] = 5 result = preview(self.project, conditions, [], *MATCH_ARGS) assert group.id not in result
583a7ec15744b2ca8a9c56df484516111dbf783d
129
https://github.com/getsentry/sentry.git
230
def test_frequency_condition_alone(self): prev_hour = timezone.now() - timedelta(hours=1) group = None for i in range(5): group = self.store_event( project_id=self.project.id, data={"timestamp": iso_format(prev_hour)} ).group conditions = [ {
20
207
test_frequency_condition_alone
15
1
1
4
modin/pandas/test/test_series.py
153,032
TEST-#3655: Check that Modin is defaulting to Pandas. (#3656) Co-authored-by: Dmitry Chigarev <62142979+dchigarev@users.noreply.github.com> Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com> Signed-off-by: mvashishtha <mahesh@ponder.io>
modin
9
Python
15
test_series.py
def test_expanding(data): modin_series, _ = create_test_series(data) # noqa: F841 with warns_that_defaulting_to_pandas(): modin_series.expanding() @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
be2716f393fddd2f669f26616f80e051fc7ceee6
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
23
https://github.com/modin-project/modin.git
27
def test_expanding(data): modin_series, _ = create_
13
67
test_expanding
18
0
1
4
pandas/tests/frame/test_query_eval.py
164,047
TST: Remove unused fixtures (#45692) * TST: Remove unused fixtures * Undo a removed fixture * Add back other fixtures * Undo a file * Try undoing this? * Revert "Try undoing this?" This reverts commit 0e56cb04f5e8cb1f7b2ac4c5e6191485bb2fe1ab.
pandas
11
Python
16
test_query_eval.py
def setup_method(self): self.df = DataFrame({"A": [1, 2, 3]}) self.expected1 = self.df[self.df.A > 0] self.expected2 = self.df.A + 1
f46df091df3afea25a273f491d1f6b2c7d20b32c
50
https://github.com/pandas-dev/pandas.git
38
def setup_method(self): self.df = DataFrame({"A": [1, 2, 3]}) self.expected1 = self.df[self.df.A > 0] self.expected2 = self.df.A + 1
7
78
setup_method
103
0
5
33
t/integration/test_canvas.py
208,362
Fixed bug when chaining a chord with a group (#7919) * Reproduced Bug from Issue #5958 * Fixed Issue #5958 * Added unit test: test_chord__or__group_of_single_task() * Added unit test: test_chord_upgrade_on_chaining() * Added unit test: test_chain_of_chord__or__group_of_single_task() * Added unit test: test_chain_of_chord_upgrade_on_chaining()
celery
16
Python
80
test_canvas.py
def test_chaining_upgraded_chords_mixed_canvas(self, manager, subtests): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_connection = get_redis_connection() redis_key = 'echo_chamber' c = chain( chord(group([redis_echo.si('1', redis_key=redis_key), redis_echo.si('2', redis_key=redis_key), redis_echo.si('3', redis_key=redis_key)]), group([redis_echo.si('4', redis_key=redis_key), redis_echo.si('5', redis_key=redis_key), redis_echo.si('6', redis_key=redis_key)])), redis_echo.si('7', redis_key=redis_key), group( redis_echo.si('8', redis_key=redis_key), ), redis_echo.si('9', redis_key=redis_key), redis_echo.si('Done', redis_key='Done'), ) with subtests.test(msg='Run the chain and wait for completion'): redis_connection.delete(redis_key, 'Done') c.delay().get(timeout=TIMEOUT) await_redis_list_message_length(1, redis_key='Done', timeout=10) with subtests.test(msg='All tasks are executed once'): actual = [sig.decode('utf-8') for sig in redis_connection.lrange(redis_key, 0, -1)] expected = [str(i) for i in range(1, 10)] with subtests.test(msg='All tasks are executed once'): assert sorted(actual) == sorted(expected) # Cleanup redis_connection.delete(redis_key, 'Done')
87613c780ccd92c8b2694becfb50511a6052e8f1
321
https://github.com/celery/celery.git
495
def test_chaining_upgraded_chords_mixed_canvas(self, manager, subtests): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_connection = get_redis_connection() redis_key = 'echo_chamber' c = chain( chord(group([redis_echo.si('1', redis_key=redis_key), redis_echo.si('2', redis_key=redis_key), redis_echo.si('3', redis_key=redis_key)]), group([redis_echo.si('4', redis_key=redis_key), redis_echo.si('5', redis_key=redis_key), redis_echo.si('6', redis_key=redis_key)])), redis_echo.si('7', redis_key=redis_key), group( redis_echo.si('8', redis_key=redis_key), ), redis_echo.si('9', redis_key=redis_key), redis_echo.si('Done', redis_key='Done'), ) with subtests.test(msg='Run the chain and wait for completion'): redis_connection.delete(redis_key, 'Done') c.delay().get(timeout=TIMEOUT) await_redis_list_message_length(1, redis_key='Done', timeout=10) with subtests.test(msg='All tasks are executed once'): actual = [sig.decode('utf-8') for sig
41
532
test_chaining_upgraded_chords_mixed_canvas
54
0
2
9
jax/experimental/jax2tf/impl_no_xla.py
121,216
[jax2tf] Fix conv1d padding; it's already normalized before the _pad_spatial_dims call. Enable non-XLA tests of conv1d. PiperOrigin-RevId: 461556553
jax
11
Python
40
impl_no_xla.py
def _pad_spatial_dims(x, x_shape, padding): # Add empty padding for batch and feature dimensions. no_pad = ((0, 0),) padding = tuple(padding) padding = no_pad + padding + no_pad x = tf.pad(x, padding) assert len(x.shape) == len(padding) x_shape = tuple(p0 + xs + p1 for xs, (p0, p1) in zip(x_shape, padding)) jax2tf._assert_matching_abstract_shape(x, x_shape) return x, x_shape
ae4aee762a6ab18b17d61b68d8ee32d2c4e3b957
92
https://github.com/google/jax.git
64
def _pad_spatial_dims(x, x_shape, padding): # Add empty padding for batch and feature dimensions. no_pad = ((0, 0),) padding = tuple(padding) padding = no_pad + padding + no_pad x = tf.pad(x, padding) assert len(x.shape) == len(p
16
141
_pad_spatial_dims
16
0
3
5
.venv/lib/python3.8/site-packages/pip/_vendor/urllib3/_collections.py
63,913
upd; format
transferlearning
12
Python
13
_collections.py
def iteritems(self): for key in self: vals = self._container[key.lower()] for val in vals[1:]: yield vals[0], val
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
39
https://github.com/jindongwang/transferlearning.git
67
def iteritems(self): fo
7
63
iteritems
11
0
2
5
tests/components/sleepiq/conftest.py
292,026
Refactor sleepiq as async with config flow (#64850) Co-authored-by: J. Nick Koston <nick@koston.org>
core
13
Python
11
conftest.py
def mock_sleepers(): return [ Sleeper(sleeper) for sleeper in json.loads(load_fixture("sleeper.json", "sleepiq"))["sleepers"] ]
0bd0b4766e8221584a74bffc7c2f0430c23169df
29
https://github.com/home-assistant/core.git
34
def mock_sleepers(): return [ Sleeper(sleeper) for sleeper in json.loads(load_fixture("sleeper.json", "sleepiq"))["sleepers
6
53
mock_sleepers
82
0
6
21
homeassistant/components/plugwise/coordinator.py
297,951
String formatting and max line length - Part 4 (#84445) Co-authored-by: jjlawren <jjlawren@users.noreply.github.com>
core
12
Python
59
coordinator.py
async def _async_update_data(self) -> PlugwiseData: try: if not self._connected: await self._connect() data = await self.api.async_update() except InvalidAuthentication as err: raise ConfigEntryError("Invalid username or Smile ID") from err except (InvalidXMLError, ResponseError) as err: raise UpdateFailed( "Invalid XML data, or error indication received for the Plugwise" " Adam/Smile/Stretch" ) from err except UnsupportedDeviceError as err: raise ConfigEntryError("Device with unsupported firmware") from err except ConnectionFailedError as err: raise UpdateFailed("Failed to connect to the Plugwise Smile") from err return PlugwiseData( gateway=cast(GatewayData, data[0]), devices=cast(dict[str, DeviceData], data[1]), )
94755a5773f8197153ab9bffe83b9711f3a76d9d
118
https://github.com/home-assistant/core.git
282
async def _async_update_data(self) -> PlugwiseData: try: if not self._connected: await self._connect() data = await self.api.async_update() except InvalidAuthentication as err: raise ConfigEntryError("Invalid username or Smile ID") from err except (InvalidXMLError, ResponseError) as err: raise UpdateFailed( "Invalid XML data, or error indication received for the Plugwise" " Adam/Smile/Stretch" ) from err
23
197
_async_update_data
97
0
7
13
jax/_src/lax/lax.py
121,136
[dynamic-shapes] revive basic bounded int machinery, add tests
jax
14
Python
77
lax.py
def _iota_abstract_eval(*, dtype, shape, dimension): _check_shapelike("iota", "shape", shape) if not any(dtypes.issubdtype(dtype, t) for t in _num): msg = 'iota does not accept dtype {}. Accepted dtypes are subtypes of {}.' typename = str(np.dtype(dtype).name) accepted_typenames = (t.__name__ for t in _num) raise TypeError(msg.format(typename, ', '.join(accepted_typenames))) if not 0 <= dimension < len(shape): raise ValueError("iota dimension must be between 0 and len(shape), got " f"dimension={dimension} for shape {shape}") if not any(isinstance(d, core.BInt) for d in shape): return ShapedArray(shape, dtype) # TODO(mattjj): unify DShapedArray with ShapedArray, and remove this code return core.DShapedArray(shape, dtype, False) iota_p = Primitive('iota') iota_p.def_impl(partial(xla.apply_primitive, iota_p)) iota_p.def_abstract_eval(_iota_abstract_eval)
98e71fe31de8f6ea26be76488d41fb471fef56eb
135
https://github.com/google/jax.git
137
def _iota_abstract_eval(*, dtype, shape, dimension): _check_shapelike("iota", "shape", shape) if not any(dtypes.issubdtype(dtype, t) for t in _num): msg = 'iota does not accept dtyp
35
264
_iota_abstract_eval
14
0
2
4
thumbor/filters/redeye.py
190,898
Feature/optional opencv (#1400) * Removed opencv dependency Now OpenCV is optional and detectors are smart to skip if cv2 could not be imported. Also refactored face detector a bit to make it more maintainable. Now thumbor can be installed with pip install thumbor pip install thumbor[all] pip install thumbor[opencv] pip install thumbor[tests]
thumbor
11
Python
14
redeye.py
def cascade(self) -> None: if not hasattr(self, "_cascade"): setattr(self, "_cascade", cv2.CascadeClassifier(CASCADE_FILE_PATH)) return getattr(self, "_cascade")
d34fd16034e307b545c3e3adfa4d9d472a582cc6
36
https://github.com/thumbor/thumbor.git
38
def cascade(self) -> None: if not hasattr(self, "_cascade"): setattr(self, "_cascade", cv2.CascadeClassifier(CASCADE_FILE_PATH)) return getattr(self, "_cas
8
61
cascade
25
0
3
8
django/db/models/query.py
205,744
Refs #33476 -- Reformatted code with Black.
django
13
Python
21
query.py
def __deepcopy__(self, memo): obj = self.__class__() for k, v in self.__dict__.items(): if k == "_result_cache": obj.__dict__[k] = None else: obj.__dict__[k] = copy.deepcopy(v, memo) return obj
9c19aff7c7561e3a82978a272ecdaad40dda5c00
60
https://github.com/django/django.git
105
def __deepcopy__(self, memo): obj = self.__class__() for k, v in self.
11
98
__deepcopy__
14
0
1
10
python/ray/data/_internal/lazy_block_list.py
125,417
Make execution plan/blocklist aware of the memory ownership and who runs the plan (#26650) Having the indicator about who's running the stage and who created a blocklist will enable the eager memory releasing. This is an alternative with better abstraction to https://github.com/ray-project/ray/pull/26196. Note: this doesn't work for Dataset.split() yet, will do in a followup PR.
ray
11
Python
14
lazy_block_list.py
def copy(self) -> "LazyBlockList": return LazyBlockList( self._tasks.copy(), block_partition_refs=self._block_partition_refs.copy(), block_partition_meta_refs=self._block_partition_meta_refs.copy(), cached_metadata=self._cached_metadata, ray_remote_args=self._remote_args.copy(), owned_by_consumer=self._owned_by_consumer, stats_uuid=self._stats_uuid, )
8553df49bba654a9edd6befce198be90d6524fca
67
https://github.com/ray-project/ray.git
104
def copy(self) -> "LazyBlockList": return LazyBlockList( self._tasks.copy(), block_partition_refs=self._block_partition_refs.copy(), block_partition_meta_refs=self._block_partition_meta_refs.copy(),
16
102
copy
43
0
4
19
mindsdb/integrations/handlers/couchbase_handler/couchbase_handler.py
115,589
Initial commit for the couchbase handler
mindsdb
12
Python
31
couchbase_handler.py
def get_tables(self) -> Response: cluster = self.connect() bucket = cluster.bucket(self.bucket_name) collections = [] for _scope in bucket.collections().get_all_scopes(): for __collections in _scope.collections: collections.append(__collections.name) collections_ar = [ [i] for i in collections ] df = pd.DataFrame(collections_ar, columns=['TABLE_NAME']) response = Response( RESPONSE_TYPE.TABLE, df ) return response
41f58415fbd45c9ce0fb47962949e40e488424c6
94
https://github.com/mindsdb/mindsdb.git
212
def get_tables(self) -> Response: cluster = self.connect() bucket = cluster.bucket(self.bucket_name) collections = [] for _scope in bucket.collections().get_all_scopes(): for __collections in _scope.collections: collections.append(__collections.name) collections_ar = [ [i] for i in collections ] df = pd.DataFrame(collections_ar, col
22
152
get_tables
63
0
2
20
datasets/quickdraw/quickdraw.py
105,091
Add QuickDraw dataset (#3592) * Add QuickDraw dataset * Style * Add infos file, dummy data, improve script * Add info and dummy data * Test readme * Finish readme * Delete generate_dummy.py * Remove whitespace
datasets
13
Python
49
quickdraw.py
def process_struct(fileobj): (key_id,) = struct.unpack("Q", fileobj.read(8)) (country_code,) = struct.unpack("2s", fileobj.read(2)) (recognized,) = struct.unpack("b", fileobj.read(1)) (timestamp,) = struct.unpack("I", fileobj.read(4)) (n_strokes,) = struct.unpack("H", fileobj.read(2)) drawing = [] for _ in range(n_strokes): (n_points,) = struct.unpack("H", fileobj.read(2)) fmt = str(n_points) + "B" x = struct.unpack(fmt, fileobj.read(n_points)) y = struct.unpack(fmt, fileobj.read(n_points)) drawing.append({"x": list(x), "y": list(y)}) return { "key_id": str(key_id), "recognized": recognized, "timestamp": datetime.fromtimestamp(timestamp), "countrycode": country_code.decode("utf-8"), "drawing": drawing, }
1c1eaf96d5ef4623e36c9124d49e88ab476dd655
220
https://github.com/huggingface/datasets.git
163
def process_struct(fileobj): (key_id,) = struct.unpack("Q", fileobj.read(8)) (country_code,) = struct.unpack("2s", fileobj.read(2)) (recognized,) = struct.unpack("b", fileobj.read(1)) (timestamp,) = struct.unpack("I", fileobj.read(4)) (n_strokes,) = struct.unpack("H", fileobj.read(2)) drawing = [] for _ in range(n_str
23
365
process_struct
17
0
3
6
py/visdom/__init__.py
106,840
apply black py to all python files
visdom
10
Python
15
__init__.py
def check_connection(self, timeout_seconds=0): while not self._has_connection() and timeout_seconds > 0: time.sleep(0.1) timeout_seconds -= 0.1 print("waiting") return self._has_connection()
5b8b7f267cfaf76a2a39a727ef31a62b3909a093
45
https://github.com/fossasia/visdom.git
71
def check_connection(self, timeout_seconds=0):
7
72
check_connection
10
2
1
28
python/ray/tests/test_namespace.py
131,615
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
8
Python
9
test_namespace.py
def test_namespace_client(): cluster = Cluster() cluster.add_node(num_cpus=4, ray_client_server_port=8080) cluster.wait_for_nodes(1) template =
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
template = """ import ray ray.util.connect("{address}", namespace="{namespace}")@ray.remote
104
https://github.com/ray-project/ray.git
22
def test_namespace_client(): cluster = Cluster() cluster.add_node(num_cpus=4, ray_client_server_port=8080) cluster.wait_for_nodes(1) template =
10
57
test_namespace_client
9
0
1
6
tests/backends/base/test_operations.py
201,689
Refs #33476 -- Reformatted code with Black.
django
10
Python
9
test_operations.py
def test_adapt_unknown_value_decimal(self): value = decimal.Decimal("3.14") self.assertEqual( self.ops.adapt_unknown_value(value), self.ops.adapt_decimalfield_value(value), )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
36
https://github.com/django/django.git
51
def test_adapt_unknown_value_decimal(self): value = decimal.Decimal("3.14") self.assertEqual(
9
59
test_adapt_unknown_value_decimal
35
1
1
6
tests/pylint/test_enforce_type_hints.py
311,059
Adjust pylint plugin to enforce device_tracker type hints (#64903) * Adjust pylint plugin to enforce device_tracker type hints * Use a constant for the type hint matchers * Add tests * Add x_of_y match * Adjust bluetooth_tracker * Adjust mysensors * Adjust tile Co-authored-by: epenet <epenet@users.noreply.github.com>
core
11
Python
28
test_enforce_type_hints.py
def test_regex_x_of_y_comma_z(string, expected_x, expected_y, expected_z): assert (match := _TYPE_HINT_MATCHERS["x_of_y_comma_z"].match(string)) assert match.group(0) == string assert match.group(1) == expected_x assert match.group(2) == expected_y assert match.group(3) == expected_z @pytest.mark.parametrize( ("string", "expected_a", "expected_b"), [("DiscoveryInfoType | None", "DiscoveryInfoType", "None")], )
367521e369839e6504989603b1282c2ba31dad49
@pytest.mark.parametrize( ("string", "expected_a", "expected_b"), [("DiscoveryInfoType | None", "DiscoveryInfoType", "None")], )
62
https://github.com/home-assistant/core.git
57
def test_regex_x_of_y_comma_z(string, expected_x, expected_y, expected_z):
11
145
test_regex_x_of_y_comma_z
68
0
7
17
keras/engine/base_layer_v1.py
278,599
fix the rest
keras
12
Python
54
base_layer_v1.py
def add_update(self, updates): call_context = base_layer_utils.call_context() if ( tf.distribute.has_strategy() and tf.distribute.in_cross_replica_context() # When saving the model, the distribution strategy context should be # ignored, following the default path for adding updates. and not call_context.saving ): # Updates don't need to be run in a cross-replica context. return updates = generic_utils.to_list(updates) if call_context.in_call: relevant_inputs = call_context.inputs else: inbound_nodes = getattr(self, "_inbound_nodes", []) relevant_inputs = [node.input_tensors for node in inbound_nodes]
5cf72f4934f3104ac2378c8b9b3638afea38ba1e
104
https://github.com/keras-team/keras.git
227
def add_update(self, updates): call_context = base_layer_utils.call_context() if ( tf.distribute.has_strategy() and tf.distribute.in_cross_replica_context()
19
138
add_update
60
0
5
18
ppdet/modeling/transformers/detr_transformer.py
211,590
[dev] fix export model bug in DETR (#7120)
PaddleDetection
14
Python
26
detr_transformer.py
def forward(self, src, src_mask=None, pos_embed=None): residual = src if self.normalize_before: src = self.norm1(src) q = k = self.with_pos_embed(src, pos_embed) src = self.self_attn(q, k, value=src, attn_mask=src_mask) src = residual + self.dropout1(src) if not self.normalize_before: src = self.norm1(src) residual = src if self.normalize_before: src = self.norm2(src) src = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = residual + self.dropout2(src) if not self.normalize_before: src = self.norm2(src) return src
fa67fb9f88ff7b03ca24a4f80e0fde2ef6d80384
160
https://github.com/PaddlePaddle/PaddleDetection.git
187
def forward(self, src, src_mask=None, pos_embed=None): residual = src if self.normalize_before: src = self.norm1(src) q = k = self.with_pos_embed(src, pos_embed) src = self.self_attn(q, k, value=src, attn_mask=src_mask) src = residual + self.dropout1(src) if not self.normalize_before: src = self.norm1(src) residual = src if self.normalize_before: src = self.norm2(src) src = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = residual + self.dropout2(src)
21
234
forward
88
0
7
26
django/db/models/base.py
205,412
Refs #33476 -- Reformatted code with Black.
django
18
Python
53
base.py
def _check_m2m_through_same_relationship(cls): errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id="models.E003", ) ) else: seen_intermediary_signatures.append(signature) return errors
9c19aff7c7561e3a82978a272ecdaad40dda5c00
136
https://github.com/django/django.git
460
def _check_m2m_through_same_relationship(cls): errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures:
21
215
_check_m2m_through_same_relationship
91
0
1
15
sympy/physics/mechanics/tests/test_rigidbody.py
199,472
Add optional frame argument to parallel axis method
sympy
14
Python
60
test_rigidbody.py
def test_parallel_axis(): N = ReferenceFrame('N') m, Ix, Iy, Iz, a, b = symbols('m, I_x, I_y, I_z, a, b') Io = inertia(N, Ix, Iy, Iz) o = Point('o') p = o.locatenew('p', a * N.x + b * N.y) R = RigidBody('R', o, N, m, (Io, o)) Ip = R.parallel_axis(p) Ip_expected = inertia(N, Ix + m * b**2, Iy + m * a**2, Iz + m * (a**2 + b**2), ixy=-m * a * b) assert Ip == Ip_expected A = ReferenceFrame('A') A.orient_axis(N, N.z, 1) assert (R.parallel_axis(p, A).to_matrix(A) - Ip_expected.to_matrix(A)).simplify() == zeros(3, 3)
801e149d69d5f88919a735f8b55b6024f97c6950
191
https://github.com/sympy/sympy.git
162
def test_parallel_axis(): N = Refe
30
293
test_parallel_axis
8
0
1
3
ivy_tests/test_core/test_general.py
213,825
renamed dev_str arg to dev for all methods.
ivy
7
Python
8
test_general.py
def test_set_framework(fw_str, dev, call): ivy.set_framework(fw_str) ivy.unset_framework() # use_framework
d743336b1f3654cd0315f380f43eed4116997c1d
20
https://github.com/unifyai/ivy.git
12
def test_set_framework(fw_str, dev, call): ivy.set_framework(fw_str) ivy.unset_framework() # use_framework
7
33
test_set_framework
22
0
1
3
keras/layers/rnn/gru_test.py
273,929
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
9
Python
21
gru_test.py
def test_recurrent_dropout_with_implementation_restriction(self): layer = keras.layers.GRU(2, recurrent_dropout=0.1, implementation=2) # The implementation is force to 1 due to the limit of recurrent_dropout. self.assertEqual(layer.implementation, 1)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
35
https://github.com/keras-team/keras.git
42
def test_recurrent_dropout_with_implementation_restriction(self): laye
9
51
test_recurrent_dropout_with_implementation_restriction
81
0
2
21
ppocr/losses/rec_vl_loss.py
24,351
add vl
PaddleOCR
13
Python
54
rec_vl_loss.py
def forward(self, predicts, batch): text_pre = predicts[0] target = batch[1].astype('int64') label_flatten, length = self.flatten_label(target) text_pre = self._flatten(text_pre, length) if self.mode == 'LF_1': loss = self.loss_func(text_pre, label_flatten) else: text_rem = predicts[1] text_mas = predicts[2] target_res = batch[2].astype('int64') target_sub = batch[3].astype('int64') label_flatten_res, length_res = self.flatten_label(target_res) label_flatten_sub, length_sub = self.flatten_label(target_sub) text_rem = self._flatten(text_rem, length_res) text_mas = self._flatten(text_mas, length_sub) loss_ori = self.loss_func(text_pre, label_flatten) loss_res = self.loss_func(text_rem, label_flatten_res) loss_mas = self.loss_func(text_mas, label_flatten_sub) loss = loss_ori + loss_res * self.weight_res + loss_mas * self.weight_mas return {'loss': loss}
a3a095150e8e1f56dd03d88ac71db6ad6262611a
190
https://github.com/PaddlePaddle/PaddleOCR.git
272
def forward(self, predicts, batch): text_pre = predicts[0] target = batch[1].astype('int64') label_flatten, length = self.flatten_label(target) text_pre = self._flatten(text_pre, length) if self.mode == 'LF_1': loss = self.loss_func(text_pre, label_flatten) else: text_rem = predicts[1] text_mas = predicts[2]
27
301
forward
26
0
1
6
seaborn/tests/_core/test_moves.py
41,173
Add move concept, with Dodge and Jitter, and ordered GroupBy
seaborn
10
Python
21
test_moves.py
def test_height(self, df, groupby): df["height"] = df["width"] height = .4 res = Jitter(height=height)(df, groupby, "y") self.check_same(res, df, "y", "grp2", "width") self.check_pos(res, df, "x", height * df["height"])
430cb8fe332a752b79fb74bd618038ac51e82df8
68
https://github.com/mwaskom/seaborn.git
60
def test_height(self, df, groupby): df["height"] = df["width"] height = .4 res = Jitter(height=height)(df, groupby, "y") self.check_same(res, df, "y", "grp2", "width") self.check_pos(res, df, "x", height *
9
110
test_height
30
0
4
8
jax/interpreters/batching.py
122,451
add a basic prototype of piles, behind jax_dynamic_shapes Co-authored-by: Adam Paszke <apaszke@google.com> Co-authored-by: Dougal Maclaurin <dougalm@google.com>
jax
14
Python
25
batching.py
def _pile_flatten(pile): lengths = [] new_shape = [lengths.append(d.lengths) or d.replace(lengths=len(lengths)) if type(d) is IndexedAxisSize else d for d in pile.aval.elt_ty.shape] elt_ty = pile.aval.elt_ty.update(shape=tuple(new_shape)) aval = pile.aval.replace(elt_ty=elt_ty) return (lengths, pile.data), aval
f2f2faa4fa166f40a4a93bc966379cf1ebb720d1
91
https://github.com/google/jax.git
62
def _pile_flatten(pile): lengths = [] new_shape = [lengths.append(d.lengths) or d.replace(lengths=len(lengths)) if type(d)
16
141
_pile_flatten
22
0
3
8
awx/main/tasks/receptor.py
82,163
Fix fallout from turning off work signing in docker-compose
awx
12
Python
19
receptor.py
def get_receptor_ctl(config_data=None): if config_data is None: config_data = read_receptor_config() receptor_sockfile = get_receptor_sockfile(config_data) try: return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(config_data, True)) except RuntimeError: return ReceptorControl(receptor_sockfile)
84f2b91105c959c4d89a63063cca441f3d67fc0f
51
https://github.com/ansible/awx.git
54
def get_receptor_ctl(config_data=None): if config_data is None: config_data = read_receptor_config() receptor_sockfile = get_receptor_sockfile(config_data) try: return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(config_data, True)) except RuntimeError: r
11
81
get_receptor_ctl
83
0
1
29
tests/snuba/api/endpoints/test_organization_events_mep.py
93,164
chore(discover): Cleanup events tests (#36797) - Delete the deprecated eventsv2 tests - Move MEP tests to its own file
sentry
13
Python
30
test_organization_events_mep.py
def test_failed_dry_run_does_not_error(self, mock_builder): with self.feature("organizations:performance-dry-run-mep"): mock_builder.side_effect = InvalidSearchQuery("Something bad") query = { "field": ["count()"], "project": [self.project.id], } response = self.do_request(query) assert response.status_code == 200, response.content assert len(mock_builder.mock_calls) == 1 assert mock_builder.call_args.kwargs["dry_run"] mock_builder.side_effect = IncompatibleMetricsQuery("Something bad") query = { "field": ["count()"], "project": [self.project.id], } response = self.do_request(query) assert response.status_code == 200, response.content assert len(mock_builder.mock_calls) == 2 assert mock_builder.call_args.kwargs["dry_run"] mock_builder.side_effect = InvalidConditionError("Something bad") query = { "field": ["count()"], "project": [self.project.id], } response = self.do_request(query) assert response.status_code == 200, response.content assert len(mock_builder.mock_calls) == 3 assert mock_builder.call_args.kwargs["dry_run"]
d3b8c9dd7bef6bccb5e70d2ccf3cda8463444a34
207
https://github.com/getsentry/sentry.git
410
def test_failed_dry_run_does_not_error(self, mock_builder): with self.feature("organizations:performance-dry-run-mep"): mock_builder.side_effect = InvalidSearchQuery("Something bad") query = { "field": ["count()"], "project": [self.project.id], } response = self.do_request(query) assert response.status_code == 200, response.content assert len(mock_builder.mock_calls) == 1 assert mock_builder.call_args.kwargs["dry_run"] mock_builder.side_effect = IncompatibleMetricsQuery("Something bad") query = { "field": ["count()"], "project": [self.project.id], } response = self.do_request(query) assert response.status_code == 200, response.content assert len(mock_builder.mock_calls) == 2 assert mock_builder.call_args.kwargs["dry_run"] mock_builder.side_effect = InvalidConditionError("Something bad") query = { "field": ["count()"], "project": [self.project.id]
19
346
test_failed_dry_run_does_not_error
91
0
14
29
pipenv/utils/resolver.py
19,824
Improve documentation around index restrictions (#5029) * Improve documentation around index restrictions * Update docs/advanced.rst * Refine index documentation updates. Factor out and re-use method before closing down other PR. * Fully remove the --extra-index-url argument Co-authored-by: Yusuke Nishioka <yusuke.nishioka.0713@gmail.com>
pipenv
17
Python
60
resolver.py
def collect_hashes(self, ireq): link = ireq.link # Handle VCS and file links first if link and (link.is_vcs or (link.is_file and link.is_existing_dir())): return set() if not is_pinned_requirement(ireq): return set() sources = self.sources # Enforce index restrictions if ireq.name in self.index_lookup: sources = list( filter(lambda s: s.get("name") == self.index_lookup[ireq.name], sources) ) if any(is_pypi_url(source["url"]) for source in sources): hashes = self._get_hashes_from_pypi(ireq) if hashes: return hashes applicable_candidates = self.ignore_compatibility_finder.find_best_candidate( ireq.name, ireq.specifier ).iter_applicable() applicable_candidates = list(applicable_candidates) if applicable_candidates: return { self._get_hash_from_link(candidate.link) for candidate in applicable_candidates } if link: return {self._get_hash_from_link(link)} if ireq.original_link: return {self._get_hash_from_link(ireq.original_link)} return set()
99cf729dd52100efba406b9c6af585425de0788c
195
https://github.com/pypa/pipenv.git
364
def collect_hashes(self, ireq): link = ireq.link # Handle VCS and file links first if link and (link.is_vcs or (link.is_file and link.is_existing_dir())): return set() if not is_pinned_requirement(ireq): return set() sources = self.sources # Enforc
29
313
collect_hashes
109
0
1
29
tests/integration_tests/test_api.py
5,908
Rename fc_size to output_size (#1641) * Rename fc_size to output_size * Responding to comments
ludwig
13
Python
68
test_api.py
def test_api_callbacks(csv_filename): mock_callback = mock.Mock() epochs = 2 batch_size = 8 num_examples = 32 with tempfile.TemporaryDirectory() as output_dir: input_features = [sequence_feature(reduce_output="sum")] output_features = [category_feature(vocab_size=5, reduce_input="sum")] config = { "input_features": input_features, "output_features": output_features, "combiner": {"type": "concat", "output_size": 14}, "training": {"epochs": epochs, "batch_size": batch_size}, } model = LudwigModel(config, callbacks=[mock_callback]) data_csv = generate_data( input_features, output_features, os.path.join(output_dir, csv_filename), num_examples=num_examples ) val_csv = shutil.copyfile(data_csv, os.path.join(output_dir, "validation.csv")) test_csv = shutil.copyfile(data_csv, os.path.join(output_dir, "test.csv")) model.train(training_set=data_csv, validation_set=val_csv, test_set=test_csv) assert mock_callback.on_epoch_start.call_count == epochs assert mock_callback.on_epoch_end.call_count == epochs assert mock_callback.on_validation_start.call_count == epochs assert mock_callback.on_validation_end.call_count == epochs assert mock_callback.on_test_start.call_count == epochs assert mock_callback.on_test_end.call_count == epochs assert mock_callback.on_batch_start.call_count == epochs * (num_examples / batch_size) assert mock_callback.on_batch_end.call_count == epochs * (num_examples / batch_size)
69604268c2ddc06a4ee0b3dce0e05a8fb73b5d16
255
https://github.com/ludwig-ai/ludwig.git
272
def test_api_callbacks(csv_filename): mock_callback = mock.Mock() epochs = 2 batch_size = 8 num_examples = 32 with tempfile.TemporaryDirectory() as output_dir: input_features = [sequence_feature(reduce_output="sum")] output_features = [category_feature(vocab_size=5, reduce_input="sum")] config = { "input_features": input_features, "output_features": output_features, "combiner": {"type": "concat", "output_size": 14}, "training": {"epochs": epochs, "batch_size": batch_size}, } model = LudwigModel(config, callbacks=[mock_callback]) data_csv = generate_data( input_features, output_features, os.path.join(output_dir, csv_filename), num_examples=num_examples ) val_csv = shutil.copyfile(data_csv, os.path.join(output_dir, "validation.csv")) test_csv = shutil.copyfile(data_csv, os.path.join(output_dir, "test.csv")) model.
44
409
test_api_callbacks
70
0
4
15
TTS/tts/datasets/formatters.py
262,547
Implement bucketed weighted sampling for VITS (#1871)
TTS
15
Python
56
formatters.py
def mls(root_path, meta_files=None, ignored_speakers=None): items = [] with open(os.path.join(root_path, meta_files), "r", encoding="utf-8") as meta: for line in meta: file, text = line.split("\t") text = text[:-1] speaker, book, *_ = file.split("_") wav_file = os.path.join(root_path, os.path.dirname(meta_files), "audio", speaker, book, file + ".wav") # ignore speakers if isinstance(ignored_speakers, list): if speaker in ignored_speakers: continue items.append( {"text": text, "audio_file": wav_file, "speaker_name": "MLS_" + speaker, "root_path": root_path} ) return items # ======================================== VOX CELEB ===========================================
bfc63829ac869f479bf9e8bf0fb75a2fb6d04959
146
https://github.com/coqui-ai/TTS.git
225
def mls(root_path, meta_files=None, ignored_speakers=None): items = [] with open(os.path.join(root_path, meta_files), "r", encoding="utf-8") as meta: for line in meta: file, text = line.split("\t") text = text[:-1] speaker, book, *_ = file.split("_") wav_file = os.path.join(root_path, os.path.dirname(meta_files), "audio", speaker, book, file + ".wav") # ignore speakers if isinstance(ignored_speakers, list): if speaker in ignored_speakers: continue items.append( {"text": text, "audio_
23
246
mls
28
0
1
10
pandas/tests/io/xml/test_xml_dtypes.py
163,734
ENH: Add dtypes/converters arguments for pandas.read_xml (#45411)
pandas
15
Python
27
test_xml_dtypes.py
def test_dtype_float(parser): df_result = read_xml(xml_types, dtype={"degrees": "float"}, parser=parser) df_expected = DataFrame( { "shape": ["square", "circle", "triangle"], "degrees": Series([360, 360, 180]).astype("float"), "sides": [4.0, float("nan"), 3.0], } ) tm.assert_frame_equal(df_result, df_expected)
d2d7ffb56f0f12c412c36c0c867ab3bb240d04ca
83
https://github.com/pandas-dev/pandas.git
86
def test_dtype_float(parser): df_resul
13
133
test_dtype_float
6
0
5
19
python/ray/tests/test_object_manager.py
131,627
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
7
Python
6
test_object_manager.py
def test_actor_broadcast(ray_start_cluster_with_resource): cluster, num_nodes = ray_start_cluster_with_resource
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
147
https://github.com/ray-project/ray.git
8
def test_actor_broadcast(ray_start_cluster_with_resource): cluster, num_nodes = ray_start_cluster_with_resource
4
17
test_actor_broadcast
432
0
34
57
keras/utils/dataset_utils.py
269,240
adds mnist dataset test case
keras
14
Python
146
dataset_utils.py
def _rescale_dataset_split_sizes(left_size,right_size,total_length): left_size_type = type(left_size) right_size_type = type(right_size) # check both left_size and right_size are integers or floats if ((left_size is not None and left_size_type not in [int,float]) and (right_size is not None and right_size_type not in [int,float])): raise TypeError('Invalid `left_size` and `right_size` Types. Expected: ' 'integer or float or None, Received: type(left_size)=' f'{left_size_type} and type(right_size)={right_size_type}') # check left_size is a integer or float if left_size is not None and left_size_type not in [int,float]: raise TypeError('Invalid `left_size` Type.Expected: int or float or None, ' f'Received: type(left_size)={left_size_type}. ') # check right_size is a integer or float if right_size is not None and right_size_type not in [int,float]: raise TypeError(f'Invalid `right_size` Type.Expected: int or float or None,' f'Received: type(right_size)={right_size_type}. ') # check left_size and right_size are non-zero if left_size == 0 and right_size == 0: raise ValueError('Both `left_size` and `right_size` are zero. ' 'Atleast one of the split sizes must be non-zero.') # check left_size is non-negative and less than 1 and less than total_length if (left_size_type == int and (left_size <= 0 or left_size>= total_length) or left_size_type == float and (left_size <= 0 or left_size>= 1) ): raise ValueError('`left_size` should be either a positive integer ' f'and smaller than {total_length} or a float ' 'within the range `[0, 1]`. Received: left_size=' f'{left_size}') # check right_size is non-negative and less than 1 and less than total_length if (right_size_type == int and (right_size <= 0 or right_size>= total_length) or right_size_type == float and (right_size <= 0 or right_size>= 1)): raise ValueError('`right_size` should be either a positive integer ' f'and smaller than {total_length} or a float ' 'within the range `[0, 1]`. Received: right_size=' f'{right_size}') # check sum of left_size and right_size is less than or equal to total_length if right_size_type == left_size_type == float and right_size + left_size > 1: raise ValueError('The sum of `left_size` and `right_size` is greater ' 'than 1. It must be less than or equal to 1.') if left_size_type == float: left_size = round(left_size*total_length) elif left_size_type == int: left_size = float(left_size) if right_size_type == float: right_size = round(right_size*total_length) elif right_size_type == int: right_size = float(right_size) if left_size is None: left_size = total_length - right_size elif right_size is None: right_size = total_length - left_size if left_size + right_size > total_length: raise ValueError('The sum of `left_size` and `right_size` should ' 'be smaller than the {total_length}. ' f'Received: left_size + right_size = {left_size+right_size}' f'and total_length = {total_length}') for split,side in [(left_size,'left'),(right_size,'right')]: if split == 0: raise ValueError(f'With `dataset` of length={total_length}, `left_size`=' '{left_size} and `right_size`={right_size}.' f'Resulting {side} side dataset split will be empty. ' 'Adjust any of the aforementioned parameters') left_size,right_size = int(left_size) ,int(right_size) return left_size,right_size
3337f8716967b9b5c9c575e73c66cef0a17e891f
362
https://github.com/keras-team/keras.git
882
def _rescale_dataset_split_sizes(left_size,right_size,total_length): left_size_type = type(left_size) right_size_type = type(right_size) # check both left_size and right_size are integers or floats if ((left_size is not None and left_size_type not in [int,float]) and (right_size is not None and right_size_type not in [int,float])): raise TypeError('Invalid `left_size` and `right_size` Types. Expected: ' 'integer or float or None, Received: type(left_size)=' f'{left_size_type} and type(right_size)={right_size_type}') # check left_size is a integer or float if left_size is not None and left_size_type not in [int,float]: raise TypeError('Invalid `left_size` Type.Expected: int or float or None,
14
651
_rescale_dataset_split_sizes
8
1
1
2
keras/backend.py
269,616
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
9
Python
8
backend.py
def flatten(x): return tf.reshape(x, [-1]) @keras_export("keras.backend.batch_flatten") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.batch_flatten") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
18
https://github.com/keras-team/keras.git
11
def flatten(x): return tf.reshape(x, [-1]) @keras_export("keras.backend.batch_flatten") @tf.__internal__.dispatch.add_
10
60
flatten
14
0
1
3
saleor/graphql/discount/schema.py
26,959
Stricter signatures for resolvers and mutations (#9649)
saleor
8
Python
14
schema.py
def resolve_voucher(_root, _info, *, id, channel=None): _, id = from_global_id_or_error(id, Voucher) return resolve_voucher(id, channel)
513fc80bc698c177b87774b3aff3da7b9aedbe06
32
https://github.com/saleor/saleor.git
27
def resolve_voucher(_root, _info, *, id, channel=None): _, id = from_global_id_or_error(id, Voucher)
8
46
resolve_voucher
18
0
1
5
wagtail/api/v2/tests/test_pages.py
72,798
Reformat with black
wagtail
11
Python
16
test_pages.py
def test_descendant_of_filter(self): response = self.get_response(descendant_of=6) content = json.loads(response.content.decode("UTF-8")) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [10, 15, 17, 21, 22, 23])
d10f15e55806c6944827d801cd9c2d53f5da4186
58
https://github.com/wagtail/wagtail.git
45
def test_descendant_of_filter(self): response = self.get_response(descendant_of=6) content = json.loads(response.content.decode("UTF-8")) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [10, 15, 17, 21, 22,
12
89
test_descendant_of_filter
150
0
7
15
jax/_src/third_party/numpy/linalg.py
122,411
Call _check_arraylike for jnp.linalg & jnp.fft functions
jax
17
Python
92
linalg.py
def _multi_dot_matrix_chain_order(arrays, return_costs=False): n = len(arrays) # p stores the dimensions of the matrices # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] # m is a matrix of costs of the subproblems # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} m = np.zeros((n, n), dtype=np.double) # s is the actual ordering # s[i, j] is the value of k at which we split the product A_i..A_j s = np.empty((n, n), dtype=np.intp) for l in range(1, n): for i in range(n - l): j = i + l m[i, j] = jnp.inf for k in range(i, j): q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1] if q < m[i, j]: m[i, j] = q s[i, j] = k # Note that Cormen uses 1-based index return (s, m) if return_costs else s
2416d154355f19e77b5c1ddf1de1f8552e4a98ad
196
https://github.com/google/jax.git
214
def _multi_dot_matrix_chain_order(arrays, return_costs=False): n = len(arrays) # p stores the dimensions of the matrices
24
290
_multi_dot_matrix_chain_order
24
0
1
7
Lib/test/test_posix.py
175,833
bpo-46426: Improve tests for the dir_fd argument (GH-30668) Ensure that directory file descriptors refer to directories different from the current directory, and that src_dir_fd and dst_dir_fd refer to different directories. Add context manager open_dir_fd() in test.support.os_helper.
cpython
11
Python
22
test_posix.py
def test_chmod_dir_fd(self): with self.prepare_file() as (dir_fd, name, fullname): posix.chmod(fullname, stat.S_IRUSR) posix.chmod(name, stat.S_IRUSR | stat.S_IWUSR, dir_fd=dir_fd) s = posix.stat(fullname) self.assertEqual(s.st_mode & stat.S_IRWXU, stat.S_IRUSR | stat.S_IWUSR)
54610bb448a9cf5be77d53b66169fca4c11be6cb
76
https://github.com/python/cpython.git
102
def test_chmod_dir_fd(self): with self.prepare_file() as (dir_fd, name, fullname): posix.chmod(fullname, stat.S_I
15
118
test_chmod_dir_fd
9
0
1
2
wagtail/admin/views/pages/revisions.py
77,926
Extract generic RevisionsCompareView from page revisions_compare view
wagtail
9
Python
8
revisions.py
def dispatch(self, request, *args, **kwargs): return super().dispatch(request, *args, **kwargs)
2664a4c1fc7df471225d3e71355802401217889a
28
https://github.com/wagtail/wagtail.git
15
def dispatch(self, request, *args, **kwargs): return super().dispatch(request, *args, **kwargs)
6
42
dispatch
39
0
1
10
tests/components/homekit/test_util.py
308,607
Add hardware revision support to homekit (#63336)
core
9
Python
24
test_util.py
async def test_format_version(): assert format_version("soho+3.6.8+soho-release-rt120+10") == "3.6.8" assert format_version("undefined-undefined-1.6.8") == "1.6.8" assert format_version("56.0-76060") == "56.0.76060" assert format_version(3.6) == "3.6" assert format_version("AK001-ZJ100") == "001.100" assert format_version("HF-LPB100-") == "100" assert format_version("AK001-ZJ2149") == "001.2149" assert format_version("0.1") == "0.1" assert format_version("unknown") is None
5c8271552a3023808e272125f71ba79f3a1e97d8
70
https://github.com/home-assistant/core.git
69
async def test_format_version(): assert format_version("soho+3.6.8+soho-release-rt120+10") == "3.6.8" assert format_version("undefined-undefined-1.6.8") == "1.6.8" assert format_version("56.0-76060") == "56.0.76060" assert format_version(3.6) == "3.6" assert format_version("AK001-ZJ100") == "001.100" assert format_version("HF-LPB100-") == "100" assert
2
144
test_format_version
55
0
4
22
tools/alignments/jobs.py
101,659
Bugfix: Alignments tool - don't error on from-faces job
faceswap
13
Python
45
jobs.py
def _set_skip_list(self) -> Optional[List[int]]: skip_num = self._arguments.extract_every_n if skip_num == 1: logger.debug("Not skipping any frames") return None skip_list = [] for idx, item in enumerate(self._frames.file_list_sorted): if idx % skip_num != 0: logger.trace("Adding image '%s' to skip list due to " # type:ignore "extract_every_n = %s", item["frame_fullname"], skip_num) skip_list.append(idx) logger.debug("Adding skip list: %s", skip_list) return skip_list
892d8626ed4e7f834ac5607af59f14f5476d5997
89
https://github.com/deepfakes/faceswap.git
196
def _set_skip_list(self) -> Optional[List[int]]: skip_num = self._arguments.extract_every_n if skip_num == 1: logger.debug("Not skipping any frames") return None skip_list = [] for idx, item in enumerate(s
18
151
_set_skip_list
32
0
3
16
saleor/graphql/order/mutations/draft_order_create.py
29,400
Simple (flat rate) taxes API (#9784) * Add empty tax module * Add tax models (#9839) * Add tax API queries (#9856) * Add MANAGE_TAXES permission * Add tax configuration queries * Create tax configuration when channel is created * Drop sorters for now * Add TaxConfigurationPerCountry type * Update migration * Add metadata to TaxConfiguration type * Add tests for tax configuration queries * Add TaxClass types * Improve tests * Add queries for tax configuration per country * Fix query in tests * Update query cost map * Add tax API mutations (#9934) * Add taxConfigurationUpdate mutation * Update schema * Add tax class CRUD mutations * Add mutations to update/delete tax class rates per country * Review fixes * Add taxClass field to ProductType type (#9999) * Add taxClass field to ProductType type * Add taxClass field to Product type * Add taxClass field to shipping method type * Add displayGrossPrices to ProductPricingInfo (#10008) * Add displayGrossPrices to ProductPricingInfo * Add displayGrossPrices to Checkout * Add displayGrossPrices to Order * Add tests * Add ADDED_IN_35 label to new fields' descriptions * Use new display_gross_prices flag (#10121) * Use new display_gross_prices flag * Update tests * Add tests * Review fixes * Drop Vatlayer (#10335) * Add migration from Vatlayer to simple taxes * Review fixes * Review fixes * Drop usages of global include_taxes_in_prices flag (#10406) * Drop `include_taxes_in_prices` function from site settings * Adjust tests * Review fixes * Drop the `charge_taxes_on_shipping` flag from site settings. (#10466) * Include migrating Avatax tax codes in tax class migration * Drop `charge_taxes_on_shipping` function * Add tax_class to ShippingMethodData * Review fixes * Always calculate shipping tax with Avalara * Add default country rate (#10497) * Allow setting default tax rate for a country (without providing a tax class) * Add validation to allow settings only one default rate at once * Code review fixes * Add taxCalculationStrategy field * Add tests * CR fixes * Adjust resolver to use new tax configuration (#10533) * CR fixes * Add database router to fix false positives on relation mismatch. (#10524) * Add database router to fix false positives on relation mismatch. * The db router should have only 'allow_relation' implemented. * The 'db_for_write' part should stay. * Subscription for sync tax webooks (#10433) * Add proposed changes to schema * Add base implementation for sync tax subscription * Add test for empty order * Add clean up and missing part for tests * Use subscription for tax webhooks. Add more tests * Improve descriptions for tax objects * Adjust resolver to use new tax configuration (#10533) * Add taxCalculationStrategy field (#10532) * Add taxCalculationStrategy field * Add tests * CR fixes * CR fixes * Add datamigration to populate taxCalculationStrategy * Migrate Product.charge_taxes to new tax configuration (#10585) * Migrate Product.charge_taxes field to new tax configuration * Rename function * Fix tests * Change assign_tax_code_to_object_meta function to support tax classes * Update tax class fixtures * Improve dataloader * CR fixes * CR fixes * Add deprecation notice to dataloader * Allow removing country rates in the `taxCountryConfigurationUpdate` mutation. (#10647) * Allow deleting rates in taxCountryConfigurationUpdate mutation * Change tax rates ordering to keep default rates first (with null tax classes) * Update existing migration * Remove TaxClass.is_default field (#10660) * Change tax rates ordering to keep default rates first (with null tax classes) * Update existing migration * Drop is_default field from TaxClass model * Drop extra Avalara config (#10673) * Drop extra Avatax config options * Adjust tests * Use flat rates in tax calculations (#10747) * WIP Use new tax configuration in tax calculations * Use new tax calculations for checkout * Adjust tests * Add flat rates calculations for checkout and order * Calculate flat rates in product pricing objects * Adjust tests * Add tests for order calculations * Add tests for product queries tax calculations * Add tests for order calculations * Use base calculations to get default checkout shipping price * Add tests for using tax_class from product_type * Add tests for get_order_country * Adjust tests * Code review fixes * Drop update_taxes_for_order_lines (#11000) * Fix calls to Avalara not validating order (#11012) * Add validation to disallow creating negative rates (#11010) * Add missing recalculation of order.undiscounted_total (#11039) * Optimize getting tax class country rates (#11040) * Tax API adjustments for dashboard (#11042) * Ignore null rates in taxCountryConfigurationUpdate mutation * Allow to pass null rates in taxClassUpdate mutation * Improve tests * Update saleor/graphql/tax/mutations/tax_class_update.py Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> * Update schema Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> * Cleanup before release (#11049) * Update ADDED_IN labels * Fix skippeded test * Regenerate migrations * Deprecate CountryDisplay.vat field * Add changelog * Update order.undiscounted_total calculation to not include taxes (#11068) * Fix assigning rates to tax classes (#11105) * Allow all staff users and apps to query tax-related data (#11113) * Bump dependencies for origin/SALEOR-6391-simple-taxes (#11127) Bumps: - cryptography to 38.0.3 - pillow to 9.3.0 * Fix using tax code from product and product type's tax class (#11111) * Fix using tax code from product and product type's tax class * Extract function * Replace synchronous load_site with promise (#11165) * Denormalize tax class for order lines and orders (#11172) * WIP Denormalize tax class for order lines and orders * Add denormalized fields in GraphQL types * Add tests for denormalized API fields * Return 0 rate in API when rate is null * Add preview/version notes in new field descriptions * Update changelog Co-authored-by: Dominik Kozaczko <dominik@kozaczko.info> Co-authored-by: Maciej Korycinski <maciej@mirumee.com> Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> Co-authored-by: Mika <6186720+NyanKiyoshi@users.noreply.github.com> Co-authored-by: Krzysztof Kwaśniak <mr.brzys@gmail.com>
saleor
12
Python
28
draft_order_create.py
def _save_lines(info, instance, lines_data, app, manager): if lines_data: lines = [] for line_data in lines_data: new_line = create_order_line( instance, line_data, manager, ) lines.append(new_line) # New event events.order_added_products_event( order=instance, user=info.context.user, app=app, order_lines=lines, )
67df28935c555fdd673f17e8c9183e24dde7c51f
67
https://github.com/saleor/saleor.git
255
def _save_lines(info, instance, lines_data, app, manager): if lines_data: lines = [] for line_data in lines_data: new_line = create_order_line( instance, line_data, manager, ) lines.append(new_line) # New event events.order_added_products_event( order=instance, user=info.context.user, app=app, order_lines=lines,
17
97
_save_lines
69
0
1
20
wagtail/documents/tests/test_admin_views.py
74,800
Reformat with black
wagtail
12
Python
58
test_admin_views.py
def test_reupload_different_file_size_and_file_hash(self): # Build a fake file, and create it through the admin view # since self.document doesn't have a file_size set. fake_file = SimpleUploadedFile("some_file.txt", b"this is the content") post_data = { "title": "My doc", "file": fake_file, } self.client.post(reverse("wagtaildocs:add"), post_data) document = models.Document.objects.get(title="My doc") old_file_size, old_file_hash = document.file_size, document.file_hash new_file = SimpleUploadedFile(document.filename, b"less content") self.client.post( reverse("wagtaildocs:edit", args=(document.pk,)), { "title": document.title, "file": new_file, }, ) document.refresh_from_db() self.assertNotEqual(document.file_size, old_file_size) self.assertNotEqual(document.file_hash, old_file_hash)
d10f15e55806c6944827d801cd9c2d53f5da4186
135
https://github.com/wagtail/wagtail.git
259
def test_reupload_different_file_size_and_file_hash(self): # Build a fake file, and create it through the admin view # since self.document doesn't have a file_size set. fake_file = SimpleUploaded
24
227
test_reupload_different_file_size_and_file_hash
45
0
1
15
python/ray/util/rpdb.py
142,705
[api] Annotate as public / move ray-core APIs to _private and add enforcement rule (#25695) Enable checking of the ray core module, excluding serve, workflows, and tune, in ./ci/lint/check_api_annotations.py. This required moving many files to ray._private and associated fixes.
ray
15
Python
35
rpdb.py
def do_remote(self, arg): # Tell the next task to drop into the debugger. ray._private.worker.global_worker.debugger_breakpoint = self._breakpoint_uuid # Tell the debug loop to connect to the next task. data = json.dumps( { "job_id": ray.get_runtime_context().job_id.hex(), } ) _internal_kv_put( "RAY_PDB_CONTINUE_{}".format(self._breakpoint_uuid), data, namespace=ray_constants.KV_NAMESPACE_PDB, ) self.__restore() self.handle.connection.close() return Pdb.do_continue(self, arg)
43aa2299e6623c8f8c7c4a1b80133459d0aa68b0
87
https://github.com/ray-project/ray.git
192
def do_remote(self, arg): # Tell the next task to drop into the debugger. ray._private.worker.global_worker.debugger_breakpoint = self._breakpoint_uuid # Tell the debug loop to connect to the next task. data = json.dumps( { "job_id": ray.get_runtime_context().job_id.hex(), } ) _internal_kv_put( "RAY_PDB_CONTINUE_{}".format(self._breakpoint_uuid), data, namespace=ray_constants.KV_NAMESPACE_PDB, ) self.__restore() self.handle.connection.close() return Pdb.do_continue(self, arg)
26
144
do_remote
19
0
1
7
mkdocs/tests/config/base_tests.py
225,158
Rework ConfigOption schemas as class-based This is NOT a breaking change, the old style keeps working. Now developers can make a subclass of Config, declare the schema of the config as fields of the class, and instances of this class will hold the processed config. This better represents the relationship between what a config definition and a config instance is, now you think of configs definitions as classes and parsed configs as instances. We also can write these fields as descriptors and enable safe attribute-based access. Static analysis will be able to see when a missing fields is accessed. And in followup changes I plan to add type annotations which will make even type checking fully sound.
mkdocs
12
Python
17
base_tests.py
def test_missing_required(self): conf = defaults.MkDocsConfig() errors, warnings = conf.validate() self.assertEqual( errors, [('site_name', ValidationError('Required configuration not provided.'))] ) self.assertEqual(warnings, [])
73e8fef5068d47ab7bdc4c49bc4abcc74434b57e
47
https://github.com/mkdocs/mkdocs.git
64
def test_missing_required(self): conf = defa
10
79
test_missing_required
6
0
1
3
tests/openbb_terminal/cryptocurrency/test_cryptocurrency_helpers.py
284,453
Refactored Crypto Tests (#1743) * Refactored tests * Removed unused command * Added tests * Tests : remove cassettes files + add fixture * Black * Tests : skip tests Co-authored-by: didierlopes.eth <dro.lopes@campus.fct.unl.pt> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
OpenBBTerminal
9
Python
6
test_cryptocurrency_helpers.py
def test_read_data_file(recorder): file = read_data_file("coinbase_gecko_map.json") recorder.capture(file)
9068ad01249c1e1adaca3ef9a704d70da7e3a17b
17
https://github.com/OpenBB-finance/OpenBBTerminal.git
11
def test_read_data_file(recorder): file = read_data_file("coinbase_gecko_map.json") recorder.captur
5
31
test_read_data_file
32
1
1
11
tests/components/anthemav/conftest.py
318,057
Refactor and improve anthemav (#75852)
core
8
Python
22
conftest.py
def mock_anthemav() -> AsyncMock: avr = AsyncMock() avr.protocol.macaddress = "000000000001" avr.protocol.model = "MRX 520" avr.reconnect = AsyncMock() avr.close = MagicMock() avr.protocol.input_list = [] avr.protocol.audio_listening_mode_list = [] avr.protocol.power = False return avr @pytest.fixture
bbd7041a73572547be49ead53b183aa1e55a6d75
@pytest.fixture
65
https://github.com/home-assistant/core.git
61
def mock_anthemav() -> AsyncMock: avr = AsyncMock() avr.protocol.macaddress = "000000000001" avr.protocol.model = "MRX 520" avr.reconnect = AsyncMock() avr.close = MagicMock() avr.protocol.input_list = [] avr.protocol.audio_listening_mode_list = [] avr.protocol.power = False return avr @pytest.fixture
14
121
mock_anthemav
94
0
8
17
dask/bag/tests/test_random.py
156,163
Bag: add implementation for reservoir sampling (#7068) (#7636) - Implement the [L algorithm](https://en.wikipedia.org/wiki/Reservoir_sampling#An_optimal_algorithm) for reservoir sampling without replacement. - Use the **k** reservoir of size 1 strategy for sampling with replacement (see [reference](http://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf)) of **k** items
dask
11
Python
56
test_random.py
def test_reservoir_sample_with_replacement_map_partitions_correctness(): N, k = 20, 10 seq = list(range(N)) distribution = [0 for _ in range(N)] expected_distribution = [0 for _ in range(N)] reps = 2000 for _ in range(reps): picks, _ = random._sample_with_replacement_map_partitions(seq, k) for pick in picks: distribution[pick] += 1 for pick in rnd.choices(seq, k=k): expected_distribution[pick] += 1 # convert to probabilities distribution = [c / (reps * k) for c in distribution] expected_distribution = [c / (reps * k) for c in expected_distribution] # use bhattacharyya distance to asses the similarity of distributions assert math.isclose( 0.0, bhattacharyya(distribution, expected_distribution), abs_tol=1e-2 )
4e5dfe7463028a39a90e026c7fb9220969093ab3
150
https://github.com/dask/dask.git
179
def test_reservoir_sample_with_replacement_map_partitions_correctness(): N, k = 20, 10 seq = list(range(N)) distribution = [0 for _ in range(N)] expected_distribution = [0 for _ in range(N)] reps = 2000 for _ in range(reps): picks, _ = random._sample_with_replacement_map_partitions(seq, k) for pick in picks: distribution[pick] += 1 for pick in rnd.choices(seq, k=k): expected_distribution[
21
221
test_reservoir_sample_with_replacement_map_partitions_correctness
13
0
1
6
pandas/io/formats/latex.py
169,035
TYP: Autotyping (#48191) * annotate-magics * annotate-imprecise-magics * none-return * scalar-return * pyi files * ignore vendored file * manual changes * ignore pyright in pickle_compat (these errors would be legit if the current __new__ methods were called but I think these pickle tests call older __new__ methods which allowed providing multiple positional arguments) * run autotyping in pre-commit * remove final and expand safe (and add annotate-imprecise-magics)
pandas
13
Python
13
latex.py
def _empty_info_line(self) -> str: return ( f"Empty {type(self.frame).__name__}\n" f"Columns: {self.frame.columns}\n" f"Index: {self.frame.index}" )
54347fe684e0f7844bf407b1fb958a5269646825
16
https://github.com/pandas-dev/pandas.git
59
def _empty_info_line(self) -> str: return ( f"Empty {type(self.frame).__name__}\n" f"Columns: {self.frame.columns}\n" f"Index: {self.frame.index
8
64
_empty_info_line
22
0
1
9
sympy/physics/units/tests/test_quantities.py
197,626
feat(physics.units): add `is_prefixed` property to `Quantity`
sympy
7
Python
12
test_quantities.py
def test_prefixed_property(): assert not meter.is_prefixed assert not joule.is_prefixed assert not day.is_prefixed assert not second.is_prefixed assert centimeter.is_prefixed assert kilometer.is_prefixed assert kilogram.is_prefixed assert pebibyte.is_prefixed
40a89803dbe877edc8ab6672819715f959273e60
40
https://github.com/sympy/sympy.git
45
def test_prefixed_property(): assert not meter.is_prefixed assert not joule.is_prefixed assert not day.is_prefixed assert not second.is_prefixed assert centimeter.is_prefixed assert kilometer.is_prefixed
10
64
test_prefixed_property
29
0
1
8
python3.10.4/Lib/ctypes/test/test_python_api.py
222,147
add python 3.10.4 for windows
XX-Net
10
Python
23
test_python_api.py
def test_PyObj_FromPtr(self): s = "abc def ghi jkl" ref = grc(s) # id(python-object) is the address pyobj = PyObj_FromPtr(id(s)) self.assertIs(s, pyobj) self.assertEqual(grc(s), ref + 1) del pyobj self.assertEqual(grc(s), ref)
8198943edd73a363c266633e1aa5b2a9e9c9f526
57
https://github.com/XX-net/XX-Net.git
84
def test_PyObj_FromPtr(self): s = "abc def ghi jkl" ref = grc(s) # id(python-object) is the address pyobj = PyObj_FromPtr(id(s)) self.assertIs(s, pyobj) self.assertEqual(grc(s), ref + 1) del pyobj self.assertEqual(grc(s), ref)
10
94
test_PyObj_FromPtr
63
0
3
20
keras/integration_test/preprocessing_applied_in_model_test.py
278,142
resolve line-too-long in integration_test
keras
12
Python
57
preprocessing_applied_in_model_test.py
def testDistributedModelFit(self, strategy): if not tf.__internal__.tf2.enabled() and isinstance( strategy, tf.distribute.experimental.ParameterServerStrategy ): self.skipTest( "Parameter Server strategy with dataset creator need to be run " "when eager execution is enabled." ) with strategy.scope(): preprocessing_model = utils.make_preprocessing_model( self.get_temp_dir() ) training_model = utils.make_training_model() # Merge the two separate models into a single model for training. inputs = preprocessing_model.inputs outputs = training_model(preprocessing_model(inputs)) merged_model = tf.keras.Model(inputs, outputs) merged_model.compile(optimizer="sgd", loss="binary_crossentropy")
4f1d333ded256b0315cf02eee067d6fa902b748d
135
https://github.com/keras-team/keras.git
249
def testDistributedModelFit(self, strategy): if not tf.__internal__.tf2.enabled() and isinstance( strategy, tf.distribute.experimental.ParameterServerStrategy ): self.skipTest( "Parameter Server strategy with dataset creator need to be run " "when eager execution is enabled." )
27
176
testDistributedModelFit
33
0
1
14
saleor/order/tests/test_order_utils.py
29,518
Simple (flat rate) taxes API (#9784) * Add empty tax module * Add tax models (#9839) * Add tax API queries (#9856) * Add MANAGE_TAXES permission * Add tax configuration queries * Create tax configuration when channel is created * Drop sorters for now * Add TaxConfigurationPerCountry type * Update migration * Add metadata to TaxConfiguration type * Add tests for tax configuration queries * Add TaxClass types * Improve tests * Add queries for tax configuration per country * Fix query in tests * Update query cost map * Add tax API mutations (#9934) * Add taxConfigurationUpdate mutation * Update schema * Add tax class CRUD mutations * Add mutations to update/delete tax class rates per country * Review fixes * Add taxClass field to ProductType type (#9999) * Add taxClass field to ProductType type * Add taxClass field to Product type * Add taxClass field to shipping method type * Add displayGrossPrices to ProductPricingInfo (#10008) * Add displayGrossPrices to ProductPricingInfo * Add displayGrossPrices to Checkout * Add displayGrossPrices to Order * Add tests * Add ADDED_IN_35 label to new fields' descriptions * Use new display_gross_prices flag (#10121) * Use new display_gross_prices flag * Update tests * Add tests * Review fixes * Drop Vatlayer (#10335) * Add migration from Vatlayer to simple taxes * Review fixes * Review fixes * Drop usages of global include_taxes_in_prices flag (#10406) * Drop `include_taxes_in_prices` function from site settings * Adjust tests * Review fixes * Drop the `charge_taxes_on_shipping` flag from site settings. (#10466) * Include migrating Avatax tax codes in tax class migration * Drop `charge_taxes_on_shipping` function * Add tax_class to ShippingMethodData * Review fixes * Always calculate shipping tax with Avalara * Add default country rate (#10497) * Allow setting default tax rate for a country (without providing a tax class) * Add validation to allow settings only one default rate at once * Code review fixes * Add taxCalculationStrategy field * Add tests * CR fixes * Adjust resolver to use new tax configuration (#10533) * CR fixes * Add database router to fix false positives on relation mismatch. (#10524) * Add database router to fix false positives on relation mismatch. * The db router should have only 'allow_relation' implemented. * The 'db_for_write' part should stay. * Subscription for sync tax webooks (#10433) * Add proposed changes to schema * Add base implementation for sync tax subscription * Add test for empty order * Add clean up and missing part for tests * Use subscription for tax webhooks. Add more tests * Improve descriptions for tax objects * Adjust resolver to use new tax configuration (#10533) * Add taxCalculationStrategy field (#10532) * Add taxCalculationStrategy field * Add tests * CR fixes * CR fixes * Add datamigration to populate taxCalculationStrategy * Migrate Product.charge_taxes to new tax configuration (#10585) * Migrate Product.charge_taxes field to new tax configuration * Rename function * Fix tests * Change assign_tax_code_to_object_meta function to support tax classes * Update tax class fixtures * Improve dataloader * CR fixes * CR fixes * Add deprecation notice to dataloader * Allow removing country rates in the `taxCountryConfigurationUpdate` mutation. (#10647) * Allow deleting rates in taxCountryConfigurationUpdate mutation * Change tax rates ordering to keep default rates first (with null tax classes) * Update existing migration * Remove TaxClass.is_default field (#10660) * Change tax rates ordering to keep default rates first (with null tax classes) * Update existing migration * Drop is_default field from TaxClass model * Drop extra Avalara config (#10673) * Drop extra Avatax config options * Adjust tests * Use flat rates in tax calculations (#10747) * WIP Use new tax configuration in tax calculations * Use new tax calculations for checkout * Adjust tests * Add flat rates calculations for checkout and order * Calculate flat rates in product pricing objects * Adjust tests * Add tests for order calculations * Add tests for product queries tax calculations * Add tests for order calculations * Use base calculations to get default checkout shipping price * Add tests for using tax_class from product_type * Add tests for get_order_country * Adjust tests * Code review fixes * Drop update_taxes_for_order_lines (#11000) * Fix calls to Avalara not validating order (#11012) * Add validation to disallow creating negative rates (#11010) * Add missing recalculation of order.undiscounted_total (#11039) * Optimize getting tax class country rates (#11040) * Tax API adjustments for dashboard (#11042) * Ignore null rates in taxCountryConfigurationUpdate mutation * Allow to pass null rates in taxClassUpdate mutation * Improve tests * Update saleor/graphql/tax/mutations/tax_class_update.py Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> * Update schema Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> * Cleanup before release (#11049) * Update ADDED_IN labels * Fix skippeded test * Regenerate migrations * Deprecate CountryDisplay.vat field * Add changelog * Update order.undiscounted_total calculation to not include taxes (#11068) * Fix assigning rates to tax classes (#11105) * Allow all staff users and apps to query tax-related data (#11113) * Bump dependencies for origin/SALEOR-6391-simple-taxes (#11127) Bumps: - cryptography to 38.0.3 - pillow to 9.3.0 * Fix using tax code from product and product type's tax class (#11111) * Fix using tax code from product and product type's tax class * Extract function * Replace synchronous load_site with promise (#11165) * Denormalize tax class for order lines and orders (#11172) * WIP Denormalize tax class for order lines and orders * Add denormalized fields in GraphQL types * Add tests for denormalized API fields * Return 0 rate in API when rate is null * Add preview/version notes in new field descriptions * Update changelog Co-authored-by: Dominik Kozaczko <dominik@kozaczko.info> Co-authored-by: Maciej Korycinski <maciej@mirumee.com> Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> Co-authored-by: Mika <6186720+NyanKiyoshi@users.noreply.github.com> Co-authored-by: Krzysztof Kwaśniak <mr.brzys@gmail.com>
saleor
10
Python
24
test_order_utils.py
def test_update_order_display_gross_prices_use_country_specific_tax_settings(order): # given country_code = "PT" tax_config = order.channel.tax_configuration tax_config.display_gross_prices = False tax_config.save() tax_config.country_exceptions.create( country=country_code, display_gross_prices=True ) order.display_gross_prices = False order.save(update_fields=["display_gross_prices"]) order.shipping_address.country = country_code order.shipping_address.save() # when update_order_display_gross_prices(order) # then assert order.display_gross_prices
67df28935c555fdd673f17e8c9183e24dde7c51f
76
https://github.com/saleor/saleor.git
84
def test_update_order_display_gross_prices_use_country_specific_tax_settings(order): # given country_code = "PT" tax_config = order.channel.tax_configuration tax_config.display_gross_prices = False tax_config.save() tax_config.country_exceptions.create( country=country_code, display_gross_prices=True ) order.display_gross_prices = False order.save(update_fields=["display_gross_prices"]) order.shipping_address.country = c
14
130
test_update_order_display_gross_prices_use_country_specific_tax_settings
9
0
1
5
modin/pandas/test/test_io.py
154,353
FEAT-#4766: Support fsspec URLs in `read_csv` and `read_csv_glob` (#4898) Signed-off-by: Karthik Velayutham <vkarthik@ponder.io>
modin
9
Python
9
test_io.py
def test_read_csv_google_cloud_storage(self): eval_io( fn_name="read_csv", # read_csv kwargs filepath_or_buffer="gs://modin-testing/testing/multiple_csv/test_data0.csv", )
c5107e5be29089720528c6c0ec4f96bc2a6a1eb3
16
https://github.com/modin-project/modin.git
55
def test_read_csv_google_cloud_storage(self): eval_io( fn_name="read_csv", # read_csv kwargs filepath_or_buffer="gs://modin-testing/testing/multiple_csv/tes
5
30
test_read_csv_google_cloud_storage
87
1
1
17
dask/array/tests/test_creation.py
155,850
increased performance of k-diagonal extraction in da.diag() and da.diagonal() (#8689) * added support for extracting k-diagonals from a 2d-array * included heterogeneous chunks in test_diag() * fixed linting errors in test_diag() * improved efficiency of diagonal extractor a bit * stole @TAdeJong's simple padding solution for diag(v, k) when v is 1d * reduced complexity of `diagonal()` from O(N**2) to O(N) diag() now calls diagonal() * fixed linting errors in diagonal() * reorganized tests and ensured coverage of diag() & diagonal() as per @jcrist's advice * catered for cupy type input arrays to diagonal()
dask
11
Python
38
test_creation.py
def test_diag_2d_array_creation(k): # when input 1d-array is a numpy array: v = np.arange(11) assert_eq(da.diag(v, k), np.diag(v, k)) # when input 1d-array is a dask array: v = da.arange(11, chunks=3) darr = da.diag(v, k) nparr = np.diag(v, k) assert_eq(darr, nparr) assert sorted(da.diag(v, k).dask) == sorted(da.diag(v, k).dask) v = v + v + 3 darr = da.diag(v, k) nparr = np.diag(v, k) assert_eq(darr, nparr) v = da.arange(11, chunks=11) darr = da.diag(v, k) nparr = np.diag(v, k) assert_eq(darr, nparr) assert sorted(da.diag(v, k).dask) == sorted(da.diag(v, k).dask) @pytest.mark.parametrize("k", [0, 3, -3, 8])
e3b3259419c21d0d412b9d5f12531ebe5ad6967a
@pytest.mark.parametrize("k", [0, 3, -3, 8])
198
https://github.com/dask/dask.git
139
def test_diag_2d_array_creation(k): # when input 1d-array is a numpy array: v = np.arange(11) assert_eq(da.diag(v, k), np.diag(v, k)) # when input 1d-array is a dask array: v = da.arange(11, chunks=3) darr = da.diag(v, k) nparr = np.diag(v, k) assert_eq(darr, nparr) assert sorted(da.diag(v, k).dask) == sorted(da.diag(v, k).dask) v = v + v + 3 darr = da.diag(v, k) nparr = np.diag(v, k) assert_eq(darr, nparr) v = da.arange(11, chunks=11) darr = da.diag(v, k) nparr
16
331
test_diag_2d_array_creation
16
0
1
10
datasets/imagenet_sketch/imagenet_sketch.py
104,925
Add ImageNet-Sketch dataset (#4301) * :sparkles: Add ImageNet-Sketch dataset * :memo: add data splits to dataset card * Update datasets/imagenet_sketch/README.md * :sparkles: labels->label and use HF hosted download link * Apply suggestions from code review Co-authored-by: Mario Šaško <mariosasko777@gmail.com> * :memo: update imagenet_sketch README.md * Use dataset repo data url Co-authored-by: Mario Šaško <mariosasko777@gmail.com>
datasets
14
Python
16
imagenet_sketch.py
def _split_generators(self, dl_manager): data_files = dl_manager.download_and_extract(_URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "files": dl_manager.iter_files([data_files]), }, ), ]
78941675d6f39c269f9d445121718c6c27c511dc
48
https://github.com/huggingface/datasets.git
122
def _split_generators(self, dl_manager): data_files = dl_manager.download_and_extract(_URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "files": dl_manager.iter_files([data_files]), }, ), ]
13
74
_split_generators
23
0
1
5
torchvision/datasets/utils.py
192,538
support confirming no virus scan on GDrive download (#5645) * support confirming no virus scan on GDrive download * put gen_bar_updater back * Update torchvision/datasets/utils.py Co-authored-by: Nicolas Hug <contact@nicolas-hug.com> Co-authored-by: Nicolas Hug <contact@nicolas-hug.com>
vision
9
Python
23
utils.py
def gen_bar_updater() -> Callable[[int, int, int], None]: warnings.warn("The function `gen_bar_update` is deprecated since 0.13 and will be removed in 0.15.") pbar = tqdm(total=None)
96f2c0d47f00371dd066c84f69c34fde07e876c3
35
https://github.com/pytorch/vision.git
28
def gen_bar_updater() -> Callable[[int, int, int], None]: warnings.warn("The function `gen_bar_update`
8
49
gen_bar_updater
17
0
1
9
wagtail/admin/tests/api/test_pages.py
71,307
Reformat with black
wagtail
12
Python
16
test_pages.py
def test_revert_to_page_revision(self): self.assertEqual(self.events_page.title, "Evenements") response = self.get_response( self.events_page.id, {"revision_id": self.first_revision.id} ) self.assertEqual(response.status_code, 200) self.events_page.get_latest_revision().publish() self.events_page.refresh_from_db() self.assertEqual(self.events_page.title, "Events")
d10f15e55806c6944827d801cd9c2d53f5da4186
79
https://github.com/wagtail/wagtail.git
76
def test_revert_to_page_revision(self): self.assertEqual(self.events_page.title, "Evenements") response = self.get_response( self.events_page.id, {"revision_id": self.first_revision.id} ) self.assertEqual(response.status_code, 200) self.events_page.get_latest_
13
130
test_revert_to_page_revision
95
0
4
43
freqtrade/optimize/backtesting.py
148,554
Merge index and mark rates as part of dataload
freqtrade
16
Python
63
backtesting.py
def load_bt_data_detail(self) -> None: if self.timeframe_detail: self.detail_data = history.load_data( datadir=self.config['datadir'], pairs=self.pairlists.whitelist, timeframe=self.timeframe_detail, timerange=self.timerange, startup_candles=0, fail_without_data=True, data_format=self.config.get('dataformat_ohlcv', 'json'), candle_type=self.config.get('candle_type_def', CandleType.SPOT) ) else: self.detail_data = {} if self.trading_mode == TradingMode.FUTURES: # Load additional futures data. funding_rates_dict = history.load_data( datadir=self.config['datadir'], pairs=self.pairlists.whitelist, timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'], timerange=self.timerange, startup_candles=0, fail_without_data=True, data_format=self.config.get('dataformat_ohlcv', 'json'), candle_type=CandleType.FUNDING_RATE ) # For simplicity, assign to CandleType.Mark (might contian index candles!) mark_rates_dict = history.load_data( datadir=self.config['datadir'], pairs=self.pairlists.whitelist, timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'], timerange=self.timerange, startup_candles=0, fail_without_data=True, data_format=self.config.get('dataformat_ohlcv', 'json'), candle_type=CandleType.from_string(self.exchange._ft_has["mark_ohlcv_price"]) ) # Combine data to avoid combining the data per trade. for pair in self.pairlists.whitelist: self.futures_data[pair] = funding_rates_dict[pair].merge( mark_rates_dict[pair], on='date', how="inner", suffixes=["_fund", "_mark"]) else: self.futures_data = {}
f26cd191466b792123f3d0b1a18b3b117a23a638
299
https://github.com/freqtrade/freqtrade.git
656
def load_bt_data_detail(self) -> None: if self.timeframe_detail: self.detail_data = history.load_data( datadir=self.config['datadir'], pairs=self.pairlists.whitelist, timeframe=self.timeframe_detail, timerange=self.timerange, startup_candles=0, fail_without_data=True, data_format=self.config.get('dataformat_ohlcv', 'json'), candle_type=self.config.get('candle_type_def', CandleType.SPOT) ) else: self.detail_data = {} if self.trading_mode == TradingMode.FUTURES: # Load additional futures data. funding_rates_dict = history.load_data( datadir=self.config['datadir'], pairs=self.pairlists.whitelist, timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'], timerange=self.timerange, startup_candles=0, fail_without_data=True, data_format=self.config.get('dataformat_ohlcv', 'json'), candle_type=CandleType.FUNDING_RATE ) # For simplicity, assign to CandleType.Mark (might contian index candles!) mark_rates_dict = history.load_data( datadir=self.config['datadir'], pairs=self.pairlists.whitelist, timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'], timerange=self.timerange, startup_candles=0, fail_without_data=True, data_format=self.config.get('dataformat_ohlcv', 'json'), candle_type=CandleType.from_string(se
35
467
load_bt_data_detail
29
0
1
8
homeassistant/components/bond/light.py
297,803
String formatting and max line length - Part 1 (#84390) Co-authored-by: Erik Montnemery <erik@montnemery.com>
core
11
Python
28
light.py
async def async_stop(self) -> None: _LOGGER.warning( "The bond.stop service is deprecated and has been replaced with a button;" " Call the button.press service instead" ) self._async_has_action_or_raise(Action.STOP) await self._hub.bond.action(self._device.device_id, Action(Action.STOP))
b0cee0bc46cbd7efe0e6421da18d91595c7a25ad
45
https://github.com/home-assistant/core.git
86
async def async_stop(self) -> None: _LOGGER.warning( "The bond.stop service is deprecated and has been replaced with a button;" " Call the button.press service instead" ) self._async_has_a
12
80
async_stop
30
0
2
5
homeassistant/components/zwave_js/sensor.py
301,321
Clean zwave_js platform typing (#72439) * Fix binary sensor * Fix climate * Fix cover * Fix fan * Fix light * Fix lock * Fix number * Fix select * Fix sensor * Add back type ignore until library bump
core
11
Python
28
sensor.py
def extra_state_attributes(self) -> dict[str, str] | None: if (value := self.info.primary_value.value) is None: return None # add the value's int value as property for multi-value (list) items return {ATTR_VALUE: value}
6cac1dadeba6cb81285960db1ab6ec6239547cd9
38
https://github.com/home-assistant/core.git
69
def extra_state_attributes(self) -> dict[str, str] | None: if (value := se
8
61
extra_state_attributes
15
0
2
5
homeassistant/components/smartthings/cover.py
314,506
Adjust smartthings cover type hints (#73948)
core
8
Python
14
cover.py
def current_cover_position(self) -> int | None: if not self._attr_supported_features & CoverEntityFeature.SET_POSITION: return None return self._device.status.level
3743d42ade80528325d36357ca6f9629d4970eaa
30
https://github.com/home-assistant/core.git
47
def current_cover_position(self) -> int | None:
9
50
current_cover_position
57
1
2
8
ivy_tests/test_nn/test_functional/test_activations.py
213,207
created backends sub-folder for all backend implementations.
ivy
11
Python
42
test_activations.py
def test_softmax(x, dtype_str, tensor_fn, dev_str, call): # smoke test x = tensor_fn(x, dtype_str, dev_str) ret = ivy.softmax(x) # type test assert ivy.is_array(ret) # cardinality test assert ret.shape == x.shape # value test assert np.allclose(call(ivy.softmax, x), ivy.backends.numpy.softmax(ivy.to_numpy(x))) # compilation test if not ivy.wrapped_mode(): helpers.assert_compilable(ivy.softmax) # softplus @pytest.mark.parametrize( "x", [[[-1., 1., 2.]]]) @pytest.mark.parametrize( "dtype_str", ['float32']) @pytest.mark.parametrize( "tensor_fn", [ivy.array, helpers.var_fn])
b50046a631badcf15ee25b6355a2d2052f6f5bf9
@pytest.mark.parametrize( "x", [[[-1., 1., 2.]]]) @pytest.mark.parametrize( "dtype_str", ['float32']) @pytest.mark.parametrize( "tensor_fn", [ivy.array, helpers.var_fn])
92
https://github.com/unifyai/ivy.git
101
def test_softmax(x, dtype_str, tensor_fn, dev_str, call): # smoke test x = tensor_fn(x, dtype_str, dev_str) ret = ivy.softmax(x) # type test assert ivy.is_array(ret) # cardinality test assert ret.shape == x.shape # value test assert np.allclose(call(ivy.softmax, x), ivy.backends.numpy.softmax(ivy.to_numpy(x))) # compilation test if not ivy.wrapped_mode(): helpers.assert_compilable(ivy.soft
24
231
test_softmax
75
0
3
6
jax/_src/lax/convolution.py
120,722
Fix batching rule for convolution for batch dimensions of size 0.
jax
11
Python
57
convolution.py
def _reshape_axis_into(src, dst, x): # NB: `dst` is the number of the dimension that we should reshape into # *after* `src` is removed from `x`'s list of dimensions. For example, if # `src` is an added batch dimension, `dst` might name a target dimension in # the unbatched list of dimensions. perm = [i for i in range(x.ndim) if i != src] perm.insert(dst, src) new_shape = list(np.delete(x.shape, src)) new_shape[dst] *= x.shape[src] return lax.reshape(x, new_shape, perm)
ece9b999fb5f85eee6570e5f987ad6704c130503
73
https://github.com/google/jax.git
83
def _reshape_axis_into(src, dst, x): # NB: `dst` is the number of the dimension that we should reshape into # *after* `src` is removed from `x`'s list of dimensions. For example, if # `src` is an added batch dimension, `dst` might name a target dimension in # the
16
110
_reshape_axis_into
12
0
3
4
bootloader/waflib/TaskGen.py
263,221
Bootloader: Building: Unpack waf's lib archive. Doing so makes it easier to modify. This is a temporary measure until the next waf version is released (although I'm tempted to keep it since it's much more IDE completion friendly).
pyinstaller
13
Python
12
TaskGen.py
def force_permissions(self): if getattr(self.generator, 'chmod', None): for x in self.outputs: os.chmod(x.abspath(), self.generator.chmod)
64ccb7aea824fbec57f7ed1bbe483ec486183c13
40
https://github.com/pyinstaller/pyinstaller.git
44
def force_permissions(self): if getattr(self.generator, 'chm
9
63
force_permissions
10
0
1
3
tests/components/generic/test_diagnostics.py
317,791
Fix diagnostics export for generic camera (#75665) Fix url redaction and add tests Co-authored-by: Dave T <davet2001@users.noreply.github.com>
core
8
Python
9
test_diagnostics.py
def test_redact_url(url_in, url_out_expected): url_out = redact_url(url_in) assert url_out == url_out_expected
7075032bf743f8702d942410c0c41214c90c212b
18
https://github.com/home-assistant/core.git
19
def test_redact_url(url_in, url_out_expected): url_out = redact_url(
5
31
test_redact_url
36
0
2
8
numpy/lib/tests/test_io.py
159,774
Port over tests from npreadtext test suite - Add test for parsing scientific notation. - Add multiple-char comment test. - Port over tests for structured dtypes. - Add tests for exceptions on skiprows/max_rows. - port over ndmin tests. - Make structured data reusable, add unpack tests. - Port over delimiter tests. - Port over maxrows test w/ various dtypes. - Port over test of exception msg on parse failure. - Port over test for converters w/neg indices. - Port over usecols tests - Port over unicode tests. - Port over more converter tests. - Port over test for large rows. - Port over test for string-len discovery. - Port over float conversion accuracy test. - Port over bool test. - Add test for implicit float->int conversion. - Port over complex parsing tests. - Port over tests for reading from generator. - Port over object cleanup test. - Port over bytes incompat test. - Port over converters tests. Co-authored-by: Warren Weckesser <warren.weckesser@gmail.com> Co-authored-by: Sebastian Berg <sebastian@sipsolutions.net>
numpy
12
Python
33
test_io.py
def test_loadtxt_converters_negative_indices(): txt = TextIO('1.5,2.5\n3.0,XXX\n5.5,6.0') conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} expected = np.array([[1.5, 2.5], [3.0, np.nan], [5.5, 6.0]]) res = np.loadtxt( txt, dtype=np.float64, delimiter=",", converters=conv, encoding=None ) assert_equal(res, expected)
66a61b03658f3c9f312505dcf7eab07e4cf91ac6
102
https://github.com/numpy/numpy.git
60
def test_loadtxt_converters_negative_indices(): txt = TextIO('1.5,2.5\n3.0,XXX\n5.5,6.0') conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} expected = np.array([[1.5, 2
18
143
test_loadtxt_converters_negative_indices
228
0
3
56
python/ray/tests/test_autoscaler_yaml.py
139,341
[Autoscaler][Local Node Provider] Log a warning if max_workers < len(worker_ips) (#24635) Logs a warning when a user sets max_workers for local node provider less than the number of available ips. Also removes defaults of 0 for min_workers and max_workers from example configs to help prevent users inadvertantly setting max_workers=0 again.
ray
12
Python
138
test_autoscaler_yaml.py
def testValidateLocal(self): local_config_path = os.path.join( RAY_PATH, "autoscaler/local/example-minimal-manual.yaml" ) base_config = yaml.safe_load(open(local_config_path).read()) base_config["provider"]["head_ip"] = "xxx.yyy" base_config["provider"]["worker_ips"] = ["aaa.bbb", "ccc.ddd", "eee.fff"] base_config["auth"]["ssh_user"] = "user" base_config["auth"]["ssh_private_key"] = "~/.ssh/id_rsa" test_prepare_config = copy.deepcopy(base_config) prepared_config = prepare_config(test_prepare_config) try: validate_config(prepared_config) except Exception: self.fail("Failed to validate local/example-minimal-manual.yaml") expected_prepared = yaml.safe_load(EXPECTED_LOCAL_CONFIG_STR) assert prepared_config == expected_prepared no_worker_config = copy.deepcopy(base_config) del no_worker_config["provider"]["worker_ips"] with pytest.raises(ClickException): prepare_config(no_worker_config) no_head_config = copy.deepcopy(base_config) del no_head_config["provider"]["head_ip"] with pytest.raises(ClickException): prepare_config(no_head_config) for field in "head_node", "worker_nodes", "available_node_types": faulty_config = copy.deepcopy(base_config) faulty_config[field] = "This field shouldn't be in here." with pytest.raises(ClickException): prepare_config(faulty_config) too_many_workers_config = copy.deepcopy(base_config) # More workers requested than the three available ips. too_many_workers_config["max_workers"] = 10 too_many_workers_config["min_workers"] = 10 prepared_config = prepare_config(too_many_workers_config) # Check that worker config numbers were clipped to 3. assert prepared_config == expected_prepared not_enough_workers_config = copy.deepcopy(base_config) # Max workers is less than than the three available ips. # The user is probably has probably made an error. Make sure we log a warning. not_enough_workers_config["max_workers"] = 0 not_enough_workers_config["min_workers"] = 0 with mock.patch( "ray.autoscaler._private.local.config.cli_logger.warning" ) as warning: prepared_config = prepare_config(not_enough_workers_config) warning.assert_called_with( "The value of `max_workers` supplied (0) is less" " than the number of available worker ips (3)." " At most 0 Ray worker nodes will connect to the cluster." ) expected_prepared = yaml.safe_load(EXPECTED_LOCAL_CONFIG_STR) # We logged a warning. # However, prepare_config does not repair the strange config setting: expected_prepared["max_workers"] = 0 expected_prepared["available_node_types"]["local.cluster.node"][ "max_workers" ] = 0 expected_prepared["available_node_types"]["local.cluster.node"][ "min_workers" ] = 0 assert prepared_config == expected_prepared
29eebdfef2acb7d278042f38247a7d82473c3fd6
323
https://github.com/ray-project/ray.git
750
def testValidateLocal(self): local_config_path = os.path.join( RAY_PATH, "autoscaler/local/example-minimal-manual.yaml" ) base_config = yaml.safe_load(open(local_config_path).read()) base_config["provider"]["head_ip"] = "xxx.yyy" base_config["provider"]["worker_ips"] = ["aaa.bbb", "ccc.ddd", "eee.fff"] base_config["
35
597
testValidateLocal
15
0
1
5
wagtail/contrib/sitemaps/tests.py
73,564
Reformat with black
wagtail
10
Python
13
tests.py
def get_request_and_django_site(self, url): request = RequestFactory().get(url) request.META["HTTP_HOST"] = self.site.hostname request.META["SERVER_PORT"] = self.site.port return request, get_current_site(request)
d10f15e55806c6944827d801cd9c2d53f5da4186
48
https://github.com/wagtail/wagtail.git
42
def get_request_and_django_site(self, url): request = RequestFactory().get(url) request.META["HTTP_HOST"] = self.site.hostname request.META["SERVER_PORT"] = self.site.port return request, get_current_site(request)
11
79
get_request_and_django_site
37
0
4
6
jax/_src/ad_checkpoint.py
122,094
Fix lax imports
jax
10
Python
32
ad_checkpoint.py
def dot_with_no_batch_dims(prim, *_, **params) -> bool: # This is a useful heuristic for transformers. if prim is lax_internal.dot_general_p: (_, _), (lhs_b, rhs_b) = params['dimension_numbers'] if not lhs_b and not rhs_b: return True return False name_p = core.Primitive('name')
1d895b2c85e17b9f563cd41d9a340528179d29aa
47
https://github.com/google/jax.git
49
def dot_with_no_batch_dims(prim, *_, **params) -> bool: # This is a useful heuristic for transformers. if prim is lax_inter
12
89
dot_with_no_batch_dims
26
0
2
7
python/ray/data/impl/block_list.py
139,015
[Datasets] Provide more efficient + intuitive block clearing semantics for different execution modes (#24127) **TL;DR:** Don't clear for eager, clear all but non-lazy input blocks if lazy, clear everything if pipelining. This PR provides more efficient and intuitive block clearing semantics for eager mode, lazy mode, and pipelining, while still supporting multiple operations applied to the same base dataset, i.e. fan-out. For example, two different map operations are applied to the same base `ds` in this example: ```python ds = ray.data.range(10).map(lambda x: x+1) ds1 = ds.map(lambda x: 2*x) ds2 = ds.map(lambda x: 3*x) ``` If naively clear the blocks when executing the map to produce `ds1`, the map producing `ds2` will fail. ### Desired Semantics - **Eager mode** - don’t clear input blocks, thereby supporting fan-out from cached data at any point in the stage chain without triggering unexpected recomputation. - **Lazy mode** - if lazy datasource, clear the input blocks for every stage, relying on recomputing via stage lineage if fan-out occurs; if non-lazy datasource, do not clear source blocks for execution plan when executing first stage, but do clear input blocks for every subsequent stage. - **Pipelines** - Same as lazy mode, although the only fan-out that can occur is from the pipeline source blocks when repeating a dataset/pipeline, so unintended intermediate recomputation will never happen.
ray
11
Python
26
block_list.py
def _check_if_cleared(self) -> None: if self.is_cleared(): raise ValueError( "This Dataset's blocks have been moved, which means that you " "can no longer use this Dataset." )
f72555262afbbfc1aabb87c9e40839aaaee3ba0b
21
https://github.com/ray-project/ray.git
92
def _check_if_cleared(self) -> None: if self.is_cleared(): raise ValueError(
4
42
_check_if_cleared
78
1
1
23
dask/dataframe/tests/test_groupby.py
156,539
Implement {Series,DataFrame}GroupBy `fillna` methods (#8869) Co-authored-by: Ian Rose <ian.r.rose@gmail.com>
dask
13
Python
54
test_groupby.py
def test_bfill(): df = pd.DataFrame( { "A": [1, 1, 2, 2], "B": [3, 4, 3, 4], "C": [np.nan, 3, np.nan, np.nan], "D": [np.nan, 4, np.nan, 5], "E": [np.nan, 6, np.nan, 7], } ) ddf = dd.from_pandas(df, npartitions=2) assert_eq( df.groupby("A").bfill(), ddf.groupby("A").bfill(), ) assert_eq( df.groupby("A").B.bfill(), ddf.groupby("A").B.bfill(), ) assert_eq( df.groupby(["A", "B"]).bfill(), ddf.groupby(["A", "B"]).bfill(), ) @pytest.mark.parametrize( "grouper", [ lambda df: ["a"], lambda df: ["a", "b"], lambda df: df["a"], lambda df: [df["a"], df["b"]], lambda df: [df["a"] > 2, df["b"] > 1], ], )
5fbda77cfc5bc1b8f1453a2dbb034b048fc10726
@pytest.mark.parametrize( "grouper", [ lambda df: ["a"], lambda df: ["a", "b"], lambda df: df["a"], lambda df: [df["a"], df["b"]], lambda df: [df["a"] > 2, df["b"] > 1], ], )
186
https://github.com/dask/dask.git
257
def test_bfill(): df = pd.DataFrame( { "A": [1, 1, 2, 2], "B": [3, 4, 3, 4], "C": [np.nan, 3, np.nan, np.nan], "D": [np.nan, 4, np.nan, 5], "E": [np.nan, 6, np.nan, 7], } ) ddf = dd.from_pandas(df, npartit
17
411
test_bfill
42
0
1
20
example/components/plot_line.py
106,625
test: split demo.py into seperate files and functions
visdom
14
Python
41
plot_line.py
def plot_line_stackedarea(viz, env): Y = np.linspace(0, 4, 200) return viz.line( Y=np.column_stack((np.sqrt(Y), np.sqrt(Y) + 2)), X=np.column_stack((Y, Y)), opts=dict( fillarea=True, showlegend=False, width=800, height=800, xlabel='Time', ylabel='Volume', ytype='log', title='Stacked area plot', marginleft=30, marginright=30, marginbottom=80, margintop=30, ), ) # Assure that the stacked area plot isn't giant
b4115c0337b1bacc876bef1ece97e8fa8b3e2834
117
https://github.com/fossasia/visdom.git
209
def plot_line_stackedarea(viz, env): Y = np.linspace(0, 4, 200)
24
171
plot_line_stackedarea
11
0
1
2
.venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/provider.py
61,123
upd; format
transferlearning
6
Python
11
provider.py
def identify(self, requirement_or_candidate): # type: (Union[Requirement, Candidate]) -> str return requirement_or_candidate.name
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
https://github.com/jindongwang/transferlearning.git
24
def identify(self, requirement_or_candidate): # type: (Union[Requirement, Candidate]) -> str return requirement_or_candidate.na
4
18
identify
25
0
3
5
code/deep/BJMMD/caffe/scripts/cpp_lint.py
60,419
Balanced joint maximum mean discrepancy for deep transfer learning
transferlearning
12
Python
23
cpp_lint.py
def CleanseComments(line): commentpos = line.find('//') if commentpos != -1 and not IsCppString(line[:commentpos]): line = line[:commentpos].rstrip() # get rid of /* ... */ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
cc4d0564756ca067516f71718a3d135996525909
50
https://github.com/jindongwang/transferlearning.git
33
def CleanseComments(line): comment
8
88
CleanseComments
22
0
1
4
.venv/lib/python3.8/site-packages/pip/_internal/metadata/pkg_resources.py
60,801
upd; format
transferlearning
11
Python
22
pkg_resources.py
def from_wheel(cls, path, name): # type: (str, str) -> Distribution with zipfile.ZipFile(path, allowZip64=True) as zf: dist = pkg_resources_distribution_for_wheel(zf, name, path) return cls(dist)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
38
https://github.com/jindongwang/transferlearning.git
53
def from_wheel(cls, path, name): # type: (str, str) -> Distribution with zipfile.ZipFile(path, allowZip64=True) as zf: d
10
60
from_wheel
17
0
3
6
airflow/utils/sqlalchemy.py
43,497
Have consistent types between the ORM and the migration files (#24044) We currently don't compare column types between ORM and the migration files. Some columns in the migration files have different types from the same columns in the ORM. Here, I made effort to match the types in migration files with the types in ORM, using the migration files as the source of truth in most cases. I couldn't convert the MySQL VARCHAR collation in db(utf8_bin) to use the one in ORM(utf8mb3_bin). It seems it's not possible to convert a collation of an already existing column in MySQL.
airflow
11
Python
13
sqlalchemy.py
def load_dialect_impl(self, dialect): if dialect.name == 'mssql': return mssql.DATETIME2(precision=6) elif dialect.name == 'mysql': return mysql.TIMESTAMP(fsp=6) return super().load_dialect_impl(dialect)
25537acfa28eebc82a90274840e0e6fb5c91e271
48
https://github.com/apache/airflow.git
59
def load_dialect_impl(self, dialect): if dialect.name == 'mssq
11
80
load_dialect_impl
67
0
1
23
tests/sentry/api/endpoints/test_organization_release_details.py
90,873
ref(models): `ActivityType` (#34978) ## Objective: We want to separate enum logic from Model logic. This breaks a lot of circular dependencies.
sentry
16
Python
53
test_organization_release_details.py
def test_activity_generation_long_release(self): user = self.create_user(is_staff=False, is_superuser=False) org = self.organization org.flags.allow_joinleave = False org.save() team = self.create_team(organization=org) project = self.create_project(teams=[team], organization=org) release = Release.objects.create(organization_id=org.id, version="x" * 65) release.add_project(project) self.create_member(teams=[team], user=user, organization=org) self.login_as(user=user) url = reverse( "sentry-api-0-organization-release-details", kwargs={"organization_slug": org.slug, "version": release.version}, ) response = self.client.put(url, data={"dateReleased": datetime.utcnow().isoformat() + "Z"}) assert response.status_code == 200, (response.status_code, response.content) release = Release.objects.get(id=release.id) assert release.date_released activity = Activity.objects.filter( type=ActivityType.RELEASE.value, project=project, ident=release.version[:64] ) assert activity.exists()
b9f5a910dc841b85f58d46266ec049ae5a7fd305
235
https://github.com/getsentry/sentry.git
232
def test_activity_generation_long_release(self): user = self.create_user(is_staff=False, is_superuser=False) org = self.organization org.flags.allow_joinleave = False org.save() team = self.create_team(organization=org) project = self.create_project(teams=[team], organization=org) release = Release.objects.create(organization_id=org.id, version="x" * 65) release.add_project(project) self.create_member(teams=[team], user=user, organization=org) self.login_as(user=user) url = reverse( "sentry-api-0-organization-release-details", kwargs={"organization_slug": org.slug, "version": release.version}, ) response = self.client.put(url, data={"dateRel
50
370
test_activity_generation_long_release
15
0
1
3
fastai/data_block.py
190,250
Upgrading to support latest Pytorch version
DeOldify
8
Python
14
data_block.py
def split_by_list(self, train, valid): "Split the data between `train` and `valid`." return self._split(self.path, train, valid)
4fc3616712edb19179b17dd270ad6cf63abf99c2
23
https://github.com/jantic/DeOldify.git
28
def split_by_list(self, train, valid): "Split the data between `train` and `val
6
35
split_by_list
19
0
4
40
tests/integration/gateway_clients/test_clients_gateways.py
12,744
fix: fix endpoint discovery tries (#5014)
jina
8
Python
15
test_clients_gateways.py
def test_grpc_gateway_runtime_lazy_request_access(linear_graph_dict, monkeypatch): call_counts = multiprocessing.Queue() monkeypatch.setattr( networking.GrpcConnectionPool, 'send_requests_once', DummyNoDocAccessMockConnectionPool.send_requests_once, ) monkeypatch.setattr( networking.GrpcConnectionPool, 'send_discover_endpoint', DummyMockConnectionPool.send_discover_endpoint, ) port = random_port()
6f5b3f2a9b13c2eae78b746531132cbfcdc8c2da
183
https://github.com/jina-ai/jina.git
78
def test_grpc_gateway_runtime_lazy_request_access(linear_graph_dict, monkeypatch): call_counts = multiprocessing.Queue() monkeypatch.setattr( networking.GrpcConnectionPool, 'send_requests_once', DummyNoDocAccessMockConnectionPool.send_requests_once, ) monkeypatch.setattr( networking.GrpcConnectionPool, 'send
15
78
test_grpc_gateway_runtime_lazy_request_access
197
0
4
29
ludwig/utils/defaults.py
6,983
Comprehensive configs for trainer and combiner. (#2118)
ludwig
11
Python
78
defaults.py
def _perform_sanity_checks(config): assert "input_features" in config, "config does not define any input features" assert "output_features" in config, "config does not define any output features" assert isinstance(config["input_features"], list), ( "Ludwig expects input features in a list. Check your model " "config format" ) assert isinstance(config["output_features"], list), ( "Ludwig expects output features in a list. Check your model " "config format" ) assert len(config["input_features"]) > 0, "config needs to have at least one input feature" assert len(config["output_features"]) > 0, "config needs to have at least one output feature" if TRAINER in config: assert isinstance(config[TRAINER], dict), ( "There is an issue while reading the training section of the " "config. The parameters are expected to be" "read as a dictionary. Please check your config format." ) if "preprocessing" in config: assert isinstance(config["preprocessing"], dict), ( "There is an issue while reading the preprocessing section of the " "config. The parameters are expected to be read" "as a dictionary. Please check your config format." ) if COMBINER in config: assert isinstance(config[COMBINER], dict), ( "There is an issue while reading the combiner section of the " "config. The parameters are expected to be read" "as a dictionary. Please check your config format." )
ae25cc4c5a229bbc44339249e1f94bf256f18317
134
https://github.com/ludwig-ai/ludwig.git
384
def _perform_sanity_checks(config): assert "input_features" in config, "config does not define any input features" assert "output_features" in config, "config does not define any output features" assert isinstance(config["input_features"], list), ( "Ludwig expects input features in a list. Check your model " "config format" ) assert isinstance(config["output_features"], list), ( "Ludwig expects output features in a list. Check your model " "config format" ) assert len(config["input_fe
8
241
_perform_sanity_checks
9
0
1
3
test/units/plugins/callback/test_callback.py
266,388
Avoid deprecated TestCase functions in unit tests. (#76678) * Avoid deprecated TestCase functions in unit tests. * Add assertRaisesRegex for Python 2.7. * Fix indentation.
ansible
12
Python
9
test_callback.py
def test_host_label(self): result = TaskResult(host=Host('host1'), task=mock_task, return_data={}) self.assertEqual(CallbackBase.host_label(result), 'host1')
97104f1221b64ef36cf42cb90c5a0eff263a2adb
38
https://github.com/ansible/ansible.git
22
def test_host_label(self):
12
63
test_host_label
105
0
2
31
dask/dataframe/io/tests/test_parquet.py
156,242
Remove pyarrow-legacy engine from parquet API (#8835) * remove pyarrow-legacy * Small fixup * Small fixup for pyarrow < 5 Co-authored-by: Jim Crist-Harif <jcristharif@gmail.com>
dask
14
Python
84
test_parquet.py
def test_writing_parquet_with_kwargs(tmpdir, engine): fn = str(tmpdir) path1 = os.path.join(fn, "normal") path2 = os.path.join(fn, "partitioned") df = pd.DataFrame( { "a": np.random.choice(["A", "B", "C"], size=100), "b": np.random.random(size=100), "c": np.random.randint(1, 5, size=100), } ) df.index.name = "index" ddf = dd.from_pandas(df, npartitions=3) engine_kwargs = { "pyarrow": { "compression": "snappy", "coerce_timestamps": None, "use_dictionary": True, }, "fastparquet": {"compression": "snappy", "times": "int64", "fixed_text": None}, } ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine]) out = dd.read_parquet(path1, engine=engine) assert_eq(out, ddf, check_index=(engine != "fastparquet")) # Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets with dask.config.set(scheduler="sync"): ddf.to_parquet( path2, engine=engine, partition_on=["a"], **engine_kwargs[engine] ) out = dd.read_parquet(path2, engine=engine).compute() for val in df.a.unique(): assert set(df.b[df.a == val]) == set(out.b[out.a == val])
0b36d7fcaf54ee9a78fff4b07f124cb0c8741cdf
284
https://github.com/dask/dask.git
285
def test_writing_parquet_with_kwargs(tmpdir, engine): fn = str(tmpdir) path1 = os.path.join(fn, "normal") path2 = os.path.join(fn, "partitioned") df = pd.DataFrame( { "a": np.random.choice(["A", "B", "C"], size=100), "b": np.random.random(size=100), "c": np.random.randint(1, 5, size=100), } ) df.index.name = "index" ddf = dd.from_pandas(df, npartitions=3) engine_kwargs = { "pyarrow": { "compression": "snappy", "coerce_timestamps": None, "use_dictionary": True, }, "fastparquet": {"compression": "snappy", "times": "int64", "fixed_text": None}, } ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine
40
465
test_writing_parquet_with_kwargs
8
1
1
4
saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py
28,271
GraphQL subscription support for synchronous webhook events (#9763) * WIP add sync webhooks subscription payload handling * add tests, fix minor things * update schema * remove unneeded code * add fix for circular field resolve * fix-filter-shipping-methods-payload * added_in added to desription * add missing types * revert refactor, precommit issues * fixes after review * cosmetix fixes post-review * subscription types description fixes * remove unneeded description from PaymentBase * add validation for creating webhook with two top level fields, add tests for shippingListMethodsForCheckout * add docstring, refactor prevent_sync_event_circular_wuery wrapper * fix docstring of revent_sync_event_circular_query * fix linters
saleor
8
Python
8
fixtures.py
def subscription_order_fulfilled_webhook(subscription_webhook): return subscription_webhook( queries.ORDER_FULFILLED, WebhookEventAsyncType.ORDER_FULFILLED ) @pytest.fixture
8201efcde2d7aacccf3512c544cceea6780a0598
@pytest.fixture
16
https://github.com/saleor/saleor.git
19
def subscription_order_fulfilled_webhook(subscription_webhook): return subscription_webhook( queries.ORDER_FULFILLED, WebhookEventAsyncType.ORDER_FULFILLED ) @pytest.fixture
7
32
subscription_order_fulfilled_webhook
83
0
1
13
packages/syft/src/syft/core/tensor/autodp/phi_tensor.py
1,804
add data subject and data subject shape serialization to GammaTensor
PySyft
11
Python
63
phi_tensor.py
def _object2bytes(self) -> bytes: schema = get_capnp_schema(schema_file="phi_tensor.capnp") pt_struct: CapnpModule = schema.PT # type: ignore pt_msg = pt_struct.new_message() # this is how we dispatch correct deserialization of bytes pt_msg.magicHeader = serde_magic_header(type(self)) # We always have FPT as the child of an PT in the tensor chain. chunk_bytes(serialize(self.child, to_bytes=True), "child", pt_msg) # type: ignore pt_msg.minVals = serialize(self.min_vals, to_bytes=True) pt_msg.maxVals = serialize(self.max_vals, to_bytes=True) pt_msg.dataSubjects = serialize( dslarraytonumpyutf8(self.data_subjects), to_bytes=True ) pt_msg.dataSubjectsShape = serialize(self.data_subjects.shape, to_bytes=True) # to pack or not to pack? # to_bytes = pt_msg.to_bytes() return pt_msg.to_bytes_packed()
a81b66ea18721dc36c77aefac733dd224f48cc87
124
https://github.com/OpenMined/PySyft.git
200
def _object2bytes(self) -> bytes: schema = get_capnp_schema(schema_file="phi_tensor.capnp") pt_struct: CapnpModule = schema.PT # type: ignore pt_msg = pt_struct.new_message() # this is how we dispatch correct deserialization of bytes pt_msg.magicHeader = serde_magic_header(type(self)) # We always have FPT as the child of an PT in the tensor chain. chunk_bytes(serialize(self.child, to_bytes=True), "child", pt_msg) # type: ignore pt_msg.minVals = serialize(self.min_vals, to_bytes=True) pt_msg.maxVals = serialize(self.max_vals, to_bytes=True) pt_msg.dataSubjects = serialize( dslarraytonumpyutf8(self.da
28
202
_object2bytes
29
0
5
12
homeassistant/components/tplink/light.py
298,501
Use ColorMode enum in tplink (#70542)
core
10
Python
21
light.py
def supported_color_modes(self) -> set[ColorMode | str] | None: modes: set[ColorMode | str] = set() if self.device.is_variable_color_temp: modes.add(ColorMode.COLOR_TEMP) if self.device.is_color: modes.add(ColorMode.HS) if self.device.is_dimmable: modes.add(ColorMode.BRIGHTNESS) if not modes: modes.add(ColorMode.ONOFF) return modes
121d2008c2e98c94775f0379ccd4eedc15476d7d
86
https://github.com/home-assistant/core.git
122
def supported_color_modes(self) -> set[ColorMode | str] | None: modes: set[ColorMode | str] = set() if self.device.is_variable_color_temp: modes.add(ColorMode.COLOR_TEMP) if self.device.is_color: m
15
140
supported_color_modes
52
0
1
6
tests/test_patching.py
105,154
Support DataLoader with num_workers > 0 in streaming mode (#4375) * make TorchIterableDataset work in parallel - make it picklable - paralellize over the shards when num_workers is passed * start writing some tests * fix streaming extension and fsspec issues in subprocesses * fix some tests * fix more tests * fix import * fix and add tests * fix patch (handle successive patches and builtins) * revert unnecessary change to enriched_web_blg * style * use open locally to fix win permission errors * keep file opened in read_csv * fix compression for read_csv * consistency of read_csv: don't infer compression for file-like objects * stringify Path objects * comments + raise error if sharding is ambiguous * minor * Update src/datasets/iterable_dataset.py Co-authored-by: Mario Šaško <mariosasko777@gmail.com> Co-authored-by: Mario Šaško <mariosasko777@gmail.com>
datasets
10
Python
39
test_patching.py
def test_patch_submodule_missing_builtin(): # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point mock = "__test_patch_submodule_missing_builtin_mock__" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching, "len", None) is None with patch_submodule(_test_patching, "len", mock): assert _test_patching.len is mock assert _test_patching.len is len
ab7d3045ac9154e9c1c2602d0869130defdc6dc7
40
https://github.com/huggingface/datasets.git
79
def test_patch_submodule_missing_builtin(): # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point mock = "__test
6
71
test_patch_submodule_missing_builtin
33
1
1
8
tests/openbb_terminal/stocks/fundamental_analysis/test_market_watch_view.py
283,567
Updating some names (#1575) * quick econ fix * black * keys and feature flags * terminal name :eyes: * some more replacements * some more replacements * edit pyproject * gst -> openbb * add example portfolios back to git * Update api from gst * sorry. skipping some tests * another round of names * another round of test edits * Missed some .gst refs and update timezone * water mark stuff * Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS * fix more GST to OpenBB Terminal * Logging : merge conflicts with main * Revert wrong files Co-authored-by: Andrew <andrew.kenreich@gmail.com> Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
OpenBBTerminal
11
Python
28
test_market_watch_view.py
def test_call_func_no_parser(func, mocker): mocker.patch( "openbb_terminal.stocks.fundamental_analysis.market_watch_view.parse_known_args_and_warn", return_value=None, ) func_result = getattr(market_watch_view, func)(other_args=list(), ticker="TSLA") assert func_result is None getattr(market_watch_view, "parse_known_args_and_warn").assert_called_once() @pytest.mark.vcr @pytest.mark.record_stdout @pytest.mark.parametrize( "func", [ "income", "balance", "cash", ], ) @pytest.mark.parametrize( "use_color", [True, False], )
b71abcfbf4d7e8ac1855522aff0378e13c8b5362
@pytest.mark.vcr @pytest.mark.record_stdout @pytest.mark.parametrize( "func", [ "income", "balance", "cash", ], ) @pytest.mark.parametrize( "use_color", [True, False], )
51
https://github.com/OpenBB-finance/OpenBBTerminal.git
91
def test_call_func_no_parser(func, mocker): mocker.patch( "openbb_terminal.stocks.fundamental_analysis.market_watch_view.parse_known_args_and_warn", return_value=None, ) func_result = getattr(market_watch_view, func)(other_args=list(), ticker="TSLA") assert func_result is None getattr(market_watch_view, "parse_known_args_and_warn").assert_called_once() @pytest.mark.vcr @pytest.mark.record_stdout @pytest.mark.parametrize(
17
160
test_call_func_no_parser
24
0
2
10
wagtail/admin/tests/api/test_pages.py
71,347
Reformat with black
wagtail
15
Python
22
test_pages.py
def test_all_nested_fields(self): response = self.get_response( type="demosite.BlogEntryPage", fields="feed_image(*)" ) content = json.loads(response.content.decode("UTF-8")) for page in content["items"]: self.assertEqual( set(page["feed_image"].keys()), {"id", "meta", "title", "width", "height", "thumbnail"}, )
d10f15e55806c6944827d801cd9c2d53f5da4186
73
https://github.com/wagtail/wagtail.git
114
def test_all_nested_fields(self): response = self.get_response( type="demosite.BlogEntryPage", fields="feed_image(*)" ) content = json.loads(response.content.decode("UTF-8")) for page in content["items"]: self.assertEqual( set(page["feed_image"].keys()),
14
129
test_all_nested_fields
71
1
1
23
tests/snuba/sessions/test_sessions_v2.py
95,790
fix(sessions): Order results by timestamp and log error if snuba limit exceeded (#31214) As described in https://getsentry.atlassian.net/browse/ISSUE-1372, gaps occur in sessions_v2 time series when the number of releases is large. This seems to be caused by the fact that snuba applies a default limit of 1000. The sessions API queries these series without an orderBy constraint, so a random subset of entries default to zero. This PR logs an error if this limit is actually reached. Furthermore, we add an order by clause to the snuba query, such that at least the most recent part of the time series is complete.
sentry
14
Python
56
test_sessions_v2.py
def test_massage_simple_timeseries(): query = _make_query("statsPeriod=1d&interval=6h&field=sum(session)") result_totals = [{"sessions": 4}] # snuba returns the datetimes as strings for now result_timeseries = [ {"sessions": 2, "bucketed_started": "2020-12-18T06:00:00+00:00"}, {"sessions": 2, "bucketed_started": "2020-12-17T12:00:00+00:00"}, ] expected_result = { "start": "2020-12-17T12:00:00Z", "end": "2020-12-18T11:15:00Z", "query": "", "intervals": [ "2020-12-17T12:00:00Z", "2020-12-17T18:00:00Z", "2020-12-18T00:00:00Z", "2020-12-18T06:00:00Z", ], "groups": [ {"by": {}, "series": {"sum(session)": [2, 0, 0, 2]}, "totals": {"sum(session)": 4}} ], } actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries)) assert actual_result == expected_result @freeze_time("2020-12-18T11:14:17.105Z")
7fbf708470ba13992a5d53b088be2388a8ed93df
@freeze_time("2020-12-18T11:14:17.105Z")
125
https://github.com/getsentry/sentry.git
218
def test_massage_simple_timeseries(): query = _make_query("statsPeriod=1d&interval=6h&field=sum(session)") result_totals = [{"sessions": 4}] # snuba returns the datetimes as strings for now result_timeseries = [ {"sessions": 2, "bucketed_started": "2020-12-18T06:00:00+00:00"}, {"sessions": 2, "bucketed_started": "2020-12-17T12:00:00+00:00"}, ] expected_result = { "start": "2020-12-17T12:00:00Z", "end": "2020-12-18T11:15:00Z", "query": "", "intervals": [ "2020-12-17T12:00:00Z", "2020-12-1
10
240
test_massage_simple_timeseries
235
0
1
41
sklearn/metrics/tests/test_pairwise_distances_reduction.py
260,984
FEA Fused sparse-dense support for `PairwiseDistancesReduction` (#23585) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Christian Lorentzen <lorentzen.ch@gmail.com> Co-authored-by: Jérémie du Boisberranger <jeremiedbb@users.noreply.github.com> Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Meekail Zain <Micky774@users.noreply.github.com>
scikit-learn
11
Python
114
test_pairwise_distances_reduction.py
def test_pairwise_distances_reduction_is_usable_for(): rng = np.random.RandomState(0) X = rng.rand(100, 10) Y = rng.rand(100, 10) X_csr = csr_matrix(X) Y_csr = csr_matrix(Y) metric = "manhattan" # Must be usable for all possible pair of {dense, sparse} datasets assert BaseDistanceReductionDispatcher.is_usable_for(X, Y, metric) assert BaseDistanceReductionDispatcher.is_usable_for(X_csr, Y_csr, metric) assert BaseDistanceReductionDispatcher.is_usable_for(X_csr, Y, metric) assert BaseDistanceReductionDispatcher.is_usable_for(X, Y_csr, metric) assert BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.float64), Y.astype(np.float64), metric ) assert BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.float32), Y.astype(np.float32), metric ) assert not BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.int64), Y.astype(np.int64), metric ) assert not BaseDistanceReductionDispatcher.is_usable_for(X, Y, metric="pyfunc") assert not BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.float32), Y, metric ) assert not BaseDistanceReductionDispatcher.is_usable_for( X, Y.astype(np.int32), metric ) # F-ordered arrays are not supported assert not BaseDistanceReductionDispatcher.is_usable_for( np.asfortranarray(X), Y, metric ) # We prefer not to use those implementations for fused sparse-dense when # metric="(sq)euclidean" because it's not yet the most efficient one on # all configurations of datasets. # See: https://github.com/scikit-learn/scikit-learn/pull/23585#issuecomment-1247996669 # noqa # TODO: implement specialisation for (sq)euclidean on fused sparse-dense # using sparse-dense routines for matrix-vector multiplications. assert not BaseDistanceReductionDispatcher.is_usable_for( X_csr, Y, metric="euclidean" ) assert not BaseDistanceReductionDispatcher.is_usable_for( X_csr, Y_csr, metric="sqeuclidean" ) # CSR matrices without non-zeros elements aren't currently supported # TODO: support CSR matrices without non-zeros elements X_csr_0_nnz = csr_matrix(X * 0) assert not BaseDistanceReductionDispatcher.is_usable_for(X_csr_0_nnz, Y, metric) # CSR matrices with int64 indices and indptr (e.g. large nnz, or large n_features) # aren't supported as of now. # See: https://github.com/scikit-learn/scikit-learn/issues/23653 # TODO: support CSR matrices with int64 indices and indptr X_csr_int64 = csr_matrix(X) X_csr_int64.indices = X_csr_int64.indices.astype(np.int64) assert not BaseDistanceReductionDispatcher.is_usable_for(X_csr_int64, Y, metric)
60cc5b596f38d0d236dab34e02c05d98b5a72bad
318
https://github.com/scikit-learn/scikit-learn.git
429
def test_pairwise_distances_reduction_is_usable_for(): rng = np.random.RandomState(0) X = rng.rand(100, 10) Y = rng.rand(100, 10) X_csr = csr_matrix(X) Y_csr = csr_matrix(Y) metric = "manhattan" # Must be usable for all possible pair of {dense, sparse} datasets assert BaseDistanceReductionDispatcher.is_usable_for(X, Y, metric) assert BaseDistanceReductionDispatcher.is_usable_for(X_csr, Y_csr, metric) assert BaseDistanceReductionDispatcher.is_usable_for(X_csr, Y, metric) assert BaseDistanceReductionDispatcher.is_usable_for(X, Y_csr, metric) assert BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.float64), Y.astype(np.float64), metric ) assert BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.float32), Y.astype(np.float32), metric ) assert not BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.int64), Y.astype(np.int64), metric ) assert not BaseDistanceReductionDispatcher.is_usable_for(X, Y, metric="pyfunc") assert not BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.float32), Y, metric ) assert not BaseDistanceReductionDispatcher.is_usable_for( X, Y.astype(np.int32), metric ) # F-ordered arrays are not supported assert not BaseDistanceReductionDispatcher.is_usable_for( np.asfortranarray(X), Y, metric ) # We prefer not to use those implementations for fused sparse-dense when # metric="(sq)euclidean" because it's not yet the most efficient o
23
494
test_pairwise_distances_reduction_is_usable_for
17
0
2
7
airflow/providers/arangodb/hooks/arangodb.py
46,738
Adding ArangoDB Provider (#22548) * Adding ArangoDB Provider
airflow
11
Python
16
arangodb.py
def create_database(self, name): if not self.db_conn.has_database(name): self.db_conn.create_database(name) return True else: self.log.info('Database already exists: %s', name) return False
c758c76ac336c054fd17d4b878378aa893b7a979
42
https://github.com/apache/airflow.git
74
def create_database(self, name): if not self.db_conn.has_database(name): self.db_conn.create_database(name)
7
69
create_database
34
0
3
15
erpnext/manufacturing/doctype/bom_update_tool/bom_update_tool.py
64,563
refactor: Add exception handling in background job within BOM Update Tool
erpnext
14
Python
27
bom_update_tool.py
def replace_bom(args): try: frappe.db.auto_commit_on_many_writes = 1 args = frappe._dict(args) doc = frappe.get_doc("BOM Update Tool") doc.current_bom = args.current_bom doc.new_bom = args.new_bom doc.replace_bom() except Exception: frappe.log_error( msg=frappe.get_traceback(), title=_("BOM Update Tool Error") ) finally: frappe.db.auto_commit_on_many_writes = 0
f57725f8fa016b9826e8fdf2f14dbf1a3d9991f7
80
https://github.com/frappe/erpnext.git
19
def replace_bom(args): try: frappe.db.auto_commit_on_many_writes = 1 args = frappe._dict(args) doc = frappe.get_doc("BOM Update Tool") doc.current_bom
16
135
replace_bom
47
0
3
11
mmdet/models/dense_heads/ddod_head.py
244,274
[Feature] Support DDOD: Disentangle Your Dense Object Detector(ACM MM2021 oral) (#7279) * add ddod feature * add ddod feature * modify new * [Feature] modify ddod code0225 * [Feature] modify ddod code0226 * [Feature] modify ddod code0228 * [Feature] modify ddod code0228#7279 * [Feature] modify ddod code0301 * [Feature] modify ddod code0301 test draft * [Feature] modify ddod code0301 test * [Feature] modify ddod code0301 extra * [Feature] modify ddod code0301 delete src/mmtrack * [Feature] modify ddod code0302 * [Feature] modify ddod code0302(2) * [Feature] modify ddod code0303 * [Feature] modify ddod code0303(2) * [Feature] modify ddod code0303(3) * [Feature] modify ddod code0305 * [Feature] modify ddod code0305(2) delete diou * [Feature] modify ddod code0305(3) * modify ddod code0306 * [Feature] modify ddod code0307 * [Feature] modify ddod code0311 * [Feature] modify ddod code0311(2) * [Feature] modify ddod code0313 * update * [Feature] modify ddod code0319 * fix * fix lint * [Feature] modify ddod code0321 * update readme * [0502] compute common vars at once for get_target * [0504] update ddod conflicts * [0518] seperate reg and cls loss and get_target compute * [0518] merge ATSSCostAssigner to ATSSAssigner * [0518] refine ATSSAssigner * [0518] refine ATSSAssigner 2 * [0518] refine ATSSAssigner 2 * [0518] refine ATSSAssigner 3 * [0519] fix bugs * update * fix lr * update weight Co-authored-by: hha <1286304229@qq.com>
mmdetection
12
Python
33
ddod_head.py
def forward_single(self, x, scale): cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.atss_cls(cls_feat) # we just follow atss, not apply exp in bbox_pred bbox_pred = scale(self.atss_reg(reg_feat)).float() iou_pred = self.atss_iou(reg_feat) return cls_score, bbox_pred, iou_pred
151a803ed0119560f59dbe7b73824dbdcae08fc6
79
https://github.com/open-mmlab/mmdetection.git
139
def forward_single(self, x, scale): cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.atss_cls(cls_feat) # we just follow atss, not apply exp in bbox_pred bbox_pred = scale(self.atss_reg(reg_feat)).float() iou_pred = self.atss_iou(reg_f
17
130
forward_single
7
0
1
3
erpnext/patches/v14_0/update_batch_valuation_flag.py
64,440
refactor: use qb for patching flag
erpnext
12
Python
7
update_batch_valuation_flag.py
def execute(): batch = frappe.qb.DocType("Batch") frappe.qb.update(batch).set(batch.use_batchwise_valuation, 0).run()
312db429e4605d6d0ce47d1034662fdf0ec053b7
36
https://github.com/frappe/erpnext.git
4
def execute(): batch = frappe.qb.D
9
63
execute
104
0
3
14
PyInstaller/utils/hooks/tcl_tk.py
264,062
hookutils: tcl/tk: port to PyInstaller.isolated framework
pyinstaller
10
Python
76
tcl_tk.py
def _get_tcl_tk_info(): try: import tkinter from _tkinter import TCL_VERSION, TK_VERSION except ImportError: # tkinter unavailable return None, None, None, False tcl = tkinter.Tcl() # Query the location of Tcl library/data directory. tcl_dir = tcl.eval("info library") # Check if Tcl/Tk is built with multi-threaded support (built with --enable-threads), as indicated by the presence # of optional `threaded` member in `tcl_platform` array. try: tcl.getvar("tcl_platform(threaded)") # Ignore the actual value. tcl_threaded = True except tkinter.TclError: tcl_threaded = False return tcl_dir, TCL_VERSION, TK_VERSION, tcl_threaded # Populate the variables. If `tkinter` is unavailable, the values are set to `None` or `False`. ( tcl_dir, tcl_version, tk_version, tcl_threaded, ) = _get_tcl_tk_info()
2b2559af1c7790596e7b2040f48e56baef608f9d
68
https://github.com/pyinstaller/pyinstaller.git
196
def _get_tcl_tk_info(): try: import tkinter from _tkinter import TCL_VERSION, TK_VERSION except ImportError: # tkinter unavailable return None, None, None, False tcl = tkinter.Tcl() # Query the location of Tcl library/data directory. t
15
141
_get_tcl_tk_info
6
0
1
2
tests/sentry/utils/locking/backends/test_redis.py
92,287
ref(locks): Make the post_process locks backend configurable (#36328)
sentry
11
Python
6
test_redis.py
def test_cluster_as_str(self): assert RedisLockBackend(cluster="default").cluster == self.cluster
5cf12753665512f60b32a99dd8fd9aa27d0a4a3a
18
https://github.com/getsentry/sentry.git
12
def test_cluster_as_str(self): assert RedisLockBackend(cluster="defau
4
31
test_cluster_as_str
33
0
1
22
datasets/crd3/crd3.py
104,781
Fix yield for crd3 (#4240) * yielding example per chunk id * changing data type for turns * removing unused variable * Update crd3.py Co-authored-by: Shanya Sharma - s0s0cr3 <Shanya.Sharma@walmartlabs.com>
datasets
20
Python
27
crd3.py
def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "chunk": datasets.Value("string"), "chunk_id": datasets.Value("int32"), "turn_start": datasets.Value("int32"), "turn_end": datasets.Value("int32"), "alignment_score": datasets.Value("float32"), "turns": [ { "names": datasets.features.Sequence(datasets.Value("string")), "utterances": datasets.features.Sequence(datasets.Value("string")), "number": datasets.Value("int32"), } ], } ), homepage="https://github.com/RevanthRameshkumar/CRD3", citation=_CITATION, )
23efe55f5547c640f9efdcb2bc678fb7b76e663e
126
https://github.com/huggingface/datasets.git
391
def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "chunk": datasets.Value("string"), "chunk_id": datasets.Value("int32"), "turn_start": datasets.Value("int32"), "turn_end": datasets.Value("int32"), "alignment_score": datasets.Value("float32"), "turns": [ { "names": datasets.features.Sequence(datas
13
221
_info