commit_message
stringlengths
3
18.1k
n_identifiers
int64
1
157
ast_levels
int64
4
28
random_cut
stringlengths
21
13.9k
n_ast_nodes
int64
10
3.6k
file_name
stringlengths
5
67
n_whitespaces
int64
4
13.9k
vocab_size
int64
3
677
code
stringlengths
101
24k
fun_name
stringlengths
3
72
nloc
int64
2
546
n_words
int64
3
1.95k
url
stringlengths
31
61
token_counts
int64
7
3.77k
repo
stringlengths
3
28
language
stringclasses
1 value
n_ast_errors
int64
0
2
complexity
int64
1
151
ast_errors
stringlengths
0
2.76k
commit_id
stringlengths
40
40
path
stringlengths
8
125
id
int64
280
339k
feat(alert-preview): last triggered (#42098) Attaches `last_triggered` to group info. `preview` now returns a mapping of group_ids to triggers, updated tests to reflect that.
20
16
def test_frequency_condition_alone(self): prev_hour = timezone.now() - timedelta(hours=1) group = None for i in range(5): group = self.store_event( project_id=self.project.id, data={"timestamp": iso_format(prev_hour)} ).group conditions = [ {
207
test_preview.py
230
39
def test_frequency_condition_alone(self): prev_hour = timezone.now() - timedelta(hours=1) group = None for i in range(5): group = self.store_event( project_id=self.project.id, data={"timestamp": iso_format(prev_hour)} ).group conditions = [ { "id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition", "value": 4, "interval": "5m", } ] result = preview(self.project, conditions, [], *MATCH_ARGS) assert group.id in result conditions[0]["value"] = 5 result = preview(self.project, conditions, [], *MATCH_ARGS) assert group.id not in result
test_frequency_condition_alone
19
57
https://github.com/getsentry/sentry.git
129
sentry
Python
0
2
583a7ec15744b2ca8a9c56df484516111dbf783d
tests/sentry/rules/history/test_preview.py
89,412
TEST-#3655: Check that Modin is defaulting to Pandas. (#3656) Co-authored-by: Dmitry Chigarev <62142979+dchigarev@users.noreply.github.com> Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com> Signed-off-by: mvashishtha <mahesh@ponder.io>
13
9
def test_expanding(data): modin_series, _ = create_
67
test_series.py
27
15
def test_expanding(data): modin_series, _ = create_test_series(data) # noqa: F841 with warns_that_defaulting_to_pandas(): modin_series.expanding() @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
test_expanding
4
15
https://github.com/modin-project/modin.git
23
modin
Python
1
1
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
be2716f393fddd2f669f26616f80e051fc7ceee6
modin/pandas/test/test_series.py
153,032
TST: Remove unused fixtures (#45692) * TST: Remove unused fixtures * Undo a removed fixture * Add back other fixtures * Undo a file * Try undoing this? * Revert "Try undoing this?" This reverts commit 0e56cb04f5e8cb1f7b2ac4c5e6191485bb2fe1ab.
7
11
def setup_method(self): self.df = DataFrame({"A": [1, 2, 3]}) self.expected1 = self.df[self.df.A > 0] self.expected2 = self.df.A + 1
78
test_query_eval.py
38
16
def setup_method(self): self.df = DataFrame({"A": [1, 2, 3]}) self.expected1 = self.df[self.df.A > 0] self.expected2 = self.df.A + 1
setup_method
4
18
https://github.com/pandas-dev/pandas.git
50
pandas
Python
0
1
f46df091df3afea25a273f491d1f6b2c7d20b32c
pandas/tests/frame/test_query_eval.py
164,047
Fixed bug when chaining a chord with a group (#7919) * Reproduced Bug from Issue #5958 * Fixed Issue #5958 * Added unit test: test_chord__or__group_of_single_task() * Added unit test: test_chord_upgrade_on_chaining() * Added unit test: test_chain_of_chord__or__group_of_single_task() * Added unit test: test_chain_of_chord_upgrade_on_chaining()
41
16
def test_chaining_upgraded_chords_mixed_canvas(self, manager, subtests): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_connection = get_redis_connection() redis_key = 'echo_chamber' c = chain( chord(group([redis_echo.si('1', redis_key=redis_key), redis_echo.si('2', redis_key=redis_key), redis_echo.si('3', redis_key=redis_key)]), group([redis_echo.si('4', redis_key=redis_key), redis_echo.si('5', redis_key=redis_key), redis_echo.si('6', redis_key=redis_key)])), redis_echo.si('7', redis_key=redis_key), group( redis_echo.si('8', redis_key=redis_key), ), redis_echo.si('9', redis_key=redis_key), redis_echo.si('Done', redis_key='Done'), ) with subtests.test(msg='Run the chain and wait for completion'): redis_connection.delete(redis_key, 'Done') c.delay().get(timeout=TIMEOUT) await_redis_list_message_length(1, redis_key='Done', timeout=10) with subtests.test(msg='All tasks are executed once'): actual = [sig.decode('utf-8') for sig
532
test_canvas.py
495
80
def test_chaining_upgraded_chords_mixed_canvas(self, manager, subtests): try: manager.app.backend.ensure_chords_allowed() except NotImplementedError as e: raise pytest.skip(e.args[0]) if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_connection = get_redis_connection() redis_key = 'echo_chamber' c = chain( chord(group([redis_echo.si('1', redis_key=redis_key), redis_echo.si('2', redis_key=redis_key), redis_echo.si('3', redis_key=redis_key)]), group([redis_echo.si('4', redis_key=redis_key), redis_echo.si('5', redis_key=redis_key), redis_echo.si('6', redis_key=redis_key)])), redis_echo.si('7', redis_key=redis_key), group( redis_echo.si('8', redis_key=redis_key), ), redis_echo.si('9', redis_key=redis_key), redis_echo.si('Done', redis_key='Done'), ) with subtests.test(msg='Run the chain and wait for completion'): redis_connection.delete(redis_key, 'Done') c.delay().get(timeout=TIMEOUT) await_redis_list_message_length(1, redis_key='Done', timeout=10) with subtests.test(msg='All tasks are executed once'): actual = [sig.decode('utf-8') for sig in redis_connection.lrange(redis_key, 0, -1)] expected = [str(i) for i in range(1, 10)] with subtests.test(msg='All tasks are executed once'): assert sorted(actual) == sorted(expected) # Cleanup redis_connection.delete(redis_key, 'Done')
test_chaining_upgraded_chords_mixed_canvas
33
103
https://github.com/celery/celery.git
321
celery
Python
0
5
87613c780ccd92c8b2694becfb50511a6052e8f1
t/integration/test_canvas.py
208,362
[jax2tf] Fix conv1d padding; it's already normalized before the _pad_spatial_dims call. Enable non-XLA tests of conv1d. PiperOrigin-RevId: 461556553
16
11
def _pad_spatial_dims(x, x_shape, padding): # Add empty padding for batch and feature dimensions. no_pad = ((0, 0),) padding = tuple(padding) padding = no_pad + padding + no_pad x = tf.pad(x, padding) assert len(x.shape) == len(p
141
impl_no_xla.py
64
40
def _pad_spatial_dims(x, x_shape, padding): # Add empty padding for batch and feature dimensions. no_pad = ((0, 0),) padding = tuple(padding) padding = no_pad + padding + no_pad x = tf.pad(x, padding) assert len(x.shape) == len(padding) x_shape = tuple(p0 + xs + p1 for xs, (p0, p1) in zip(x_shape, padding)) jax2tf._assert_matching_abstract_shape(x, x_shape) return x, x_shape
_pad_spatial_dims
9
54
https://github.com/google/jax.git
92
jax
Python
0
2
ae4aee762a6ab18b17d61b68d8ee32d2c4e3b957
jax/experimental/jax2tf/impl_no_xla.py
121,216
upd; format
7
12
def iteritems(self): fo
63
_collections.py
67
13
def iteritems(self): for key in self: vals = self._container[key.lower()] for val in vals[1:]: yield vals[0], val
iteritems
5
16
https://github.com/jindongwang/transferlearning.git
39
transferlearning
Python
0
3
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
.venv/lib/python3.8/site-packages/pip/_vendor/urllib3/_collections.py
63,913
Refactor sleepiq as async with config flow (#64850) Co-authored-by: J. Nick Koston <nick@koston.org>
6
13
def mock_sleepers(): return [ Sleeper(sleeper) for sleeper in json.loads(load_fixture("sleeper.json", "sleepiq"))["sleepers
53
conftest.py
34
11
def mock_sleepers(): return [ Sleeper(sleeper) for sleeper in json.loads(load_fixture("sleeper.json", "sleepiq"))["sleepers"] ]
mock_sleepers
5
11
https://github.com/home-assistant/core.git
29
core
Python
0
2
0bd0b4766e8221584a74bffc7c2f0430c23169df
tests/components/sleepiq/conftest.py
292,026
String formatting and max line length - Part 4 (#84445) Co-authored-by: jjlawren <jjlawren@users.noreply.github.com>
23
12
async def _async_update_data(self) -> PlugwiseData: try: if not self._connected: await self._connect() data = await self.api.async_update() except InvalidAuthentication as err: raise ConfigEntryError("Invalid username or Smile ID") from err except (InvalidXMLError, ResponseError) as err: raise UpdateFailed( "Invalid XML data, or error indication received for the Plugwise" " Adam/Smile/Stretch" ) from err
197
coordinator.py
282
59
async def _async_update_data(self) -> PlugwiseData: try: if not self._connected: await self._connect() data = await self.api.async_update() except InvalidAuthentication as err: raise ConfigEntryError("Invalid username or Smile ID") from err except (InvalidXMLError, ResponseError) as err: raise UpdateFailed( "Invalid XML data, or error indication received for the Plugwise" " Adam/Smile/Stretch" ) from err except UnsupportedDeviceError as err: raise ConfigEntryError("Device with unsupported firmware") from err except ConnectionFailedError as err: raise UpdateFailed("Failed to connect to the Plugwise Smile") from err return PlugwiseData( gateway=cast(GatewayData, data[0]), devices=cast(dict[str, DeviceData], data[1]), )
_async_update_data
21
82
https://github.com/home-assistant/core.git
118
core
Python
0
6
94755a5773f8197153ab9bffe83b9711f3a76d9d
homeassistant/components/plugwise/coordinator.py
297,951
[dynamic-shapes] revive basic bounded int machinery, add tests
35
14
def _iota_abstract_eval(*, dtype, shape, dimension): _check_shapelike("iota", "shape", shape) if not any(dtypes.issubdtype(dtype, t) for t in _num): msg = 'iota does not accept dtyp
264
lax.py
137
77
def _iota_abstract_eval(*, dtype, shape, dimension): _check_shapelike("iota", "shape", shape) if not any(dtypes.issubdtype(dtype, t) for t in _num): msg = 'iota does not accept dtype {}. Accepted dtypes are subtypes of {}.' typename = str(np.dtype(dtype).name) accepted_typenames = (t.__name__ for t in _num) raise TypeError(msg.format(typename, ', '.join(accepted_typenames))) if not 0 <= dimension < len(shape): raise ValueError("iota dimension must be between 0 and len(shape), got " f"dimension={dimension} for shape {shape}") if not any(isinstance(d, core.BInt) for d in shape): return ShapedArray(shape, dtype) # TODO(mattjj): unify DShapedArray with ShapedArray, and remove this code return core.DShapedArray(shape, dtype, False) iota_p = Primitive('iota') iota_p.def_impl(partial(xla.apply_primitive, iota_p)) iota_p.def_abstract_eval(_iota_abstract_eval)
_iota_abstract_eval
13
97
https://github.com/google/jax.git
135
jax
Python
0
7
98e71fe31de8f6ea26be76488d41fb471fef56eb
jax/_src/lax/lax.py
121,136
Feature/optional opencv (#1400) * Removed opencv dependency Now OpenCV is optional and detectors are smart to skip if cv2 could not be imported. Also refactored face detector a bit to make it more maintainable. Now thumbor can be installed with pip install thumbor pip install thumbor[all] pip install thumbor[opencv] pip install thumbor[tests]
8
11
def cascade(self) -> None: if not hasattr(self, "_cascade"): setattr(self, "_cascade", cv2.CascadeClassifier(CASCADE_FILE_PATH)) return getattr(self, "_cas
61
redeye.py
38
14
def cascade(self) -> None: if not hasattr(self, "_cascade"): setattr(self, "_cascade", cv2.CascadeClassifier(CASCADE_FILE_PATH)) return getattr(self, "_cascade")
cascade
4
14
https://github.com/thumbor/thumbor.git
36
thumbor
Python
0
2
d34fd16034e307b545c3e3adfa4d9d472a582cc6
thumbor/filters/redeye.py
190,898
Refs #33476 -- Reformatted code with Black.
11
13
def __deepcopy__(self, memo): obj = self.__class__() for k, v in self.
98
query.py
105
21
def __deepcopy__(self, memo): obj = self.__class__() for k, v in self.__dict__.items(): if k == "_result_cache": obj.__dict__[k] = None else: obj.__dict__[k] = copy.deepcopy(v, memo) return obj
__deepcopy__
8
25
https://github.com/django/django.git
60
django
Python
0
3
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django/db/models/query.py
205,744
Make execution plan/blocklist aware of the memory ownership and who runs the plan (#26650) Having the indicator about who's running the stage and who created a blocklist will enable the eager memory releasing. This is an alternative with better abstraction to https://github.com/ray-project/ray/pull/26196. Note: this doesn't work for Dataset.split() yet, will do in a followup PR.
16
11
def copy(self) -> "LazyBlockList": return LazyBlockList( self._tasks.copy(), block_partition_refs=self._block_partition_refs.copy(), block_partition_meta_refs=self._block_partition_meta_refs.copy(),
102
lazy_block_list.py
104
14
def copy(self) -> "LazyBlockList": return LazyBlockList( self._tasks.copy(), block_partition_refs=self._block_partition_refs.copy(), block_partition_meta_refs=self._block_partition_meta_refs.copy(), cached_metadata=self._cached_metadata, ray_remote_args=self._remote_args.copy(), owned_by_consumer=self._owned_by_consumer, stats_uuid=self._stats_uuid, )
copy
10
14
https://github.com/ray-project/ray.git
67
ray
Python
0
1
8553df49bba654a9edd6befce198be90d6524fca
python/ray/data/_internal/lazy_block_list.py
125,417
Initial commit for the couchbase handler
22
12
def get_tables(self) -> Response: cluster = self.connect() bucket = cluster.bucket(self.bucket_name) collections = [] for _scope in bucket.collections().get_all_scopes(): for __collections in _scope.collections: collections.append(__collections.name) collections_ar = [ [i] for i in collections ] df = pd.DataFrame(collections_ar, col
152
couchbase_handler.py
212
31
def get_tables(self) -> Response: cluster = self.connect() bucket = cluster.bucket(self.bucket_name) collections = [] for _scope in bucket.collections().get_all_scopes(): for __collections in _scope.collections: collections.append(__collections.name) collections_ar = [ [i] for i in collections ] df = pd.DataFrame(collections_ar, columns=['TABLE_NAME']) response = Response( RESPONSE_TYPE.TABLE, df ) return response
get_tables
19
43
https://github.com/mindsdb/mindsdb.git
94
mindsdb
Python
0
4
41f58415fbd45c9ce0fb47962949e40e488424c6
mindsdb/integrations/handlers/couchbase_handler/couchbase_handler.py
115,589
Add QuickDraw dataset (#3592) * Add QuickDraw dataset * Style * Add infos file, dummy data, improve script * Add info and dummy data * Test readme * Finish readme * Delete generate_dummy.py * Remove whitespace
23
13
def process_struct(fileobj): (key_id,) = struct.unpack("Q", fileobj.read(8)) (country_code,) = struct.unpack("2s", fileobj.read(2)) (recognized,) = struct.unpack("b", fileobj.read(1)) (timestamp,) = struct.unpack("I", fileobj.read(4)) (n_strokes,) = struct.unpack("H", fileobj.read(2)) drawing = [] for _ in range(n_str
365
quickdraw.py
163
49
def process_struct(fileobj): (key_id,) = struct.unpack("Q", fileobj.read(8)) (country_code,) = struct.unpack("2s", fileobj.read(2)) (recognized,) = struct.unpack("b", fileobj.read(1)) (timestamp,) = struct.unpack("I", fileobj.read(4)) (n_strokes,) = struct.unpack("H", fileobj.read(2)) drawing = [] for _ in range(n_strokes): (n_points,) = struct.unpack("H", fileobj.read(2)) fmt = str(n_points) + "B" x = struct.unpack(fmt, fileobj.read(n_points)) y = struct.unpack(fmt, fileobj.read(n_points)) drawing.append({"x": list(x), "y": list(y)}) return { "key_id": str(key_id), "recognized": recognized, "timestamp": datetime.fromtimestamp(timestamp), "countrycode": country_code.decode("utf-8"), "drawing": drawing, }
process_struct
20
63
https://github.com/huggingface/datasets.git
220
datasets
Python
0
2
1c1eaf96d5ef4623e36c9124d49e88ab476dd655
datasets/quickdraw/quickdraw.py
105,091
apply black py to all python files
7
10
def check_connection(self, timeout_seconds=0):
72
__init__.py
71
15
def check_connection(self, timeout_seconds=0): while not self._has_connection() and timeout_seconds > 0: time.sleep(0.1) timeout_seconds -= 0.1 print("waiting") return self._has_connection()
check_connection
6
17
https://github.com/fossasia/visdom.git
45
visdom
Python
0
3
5b8b7f267cfaf76a2a39a727ef31a62b3909a093
py/visdom/__init__.py
106,840
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
10
8
def test_namespace_client(): cluster = Cluster() cluster.add_node(num_cpus=4, ray_client_server_port=8080) cluster.wait_for_nodes(1) template =
57
test_namespace.py
22
9
def test_namespace_client(): cluster = Cluster() cluster.add_node(num_cpus=4, ray_client_server_port=8080) cluster.wait_for_nodes(1) template =
test_namespace_client
28
10
https://github.com/ray-project/ray.git
104
ray
Python
2
1
template = """ import ray ray.util.connect("{address}", namespace="{namespace}")@ray.remote
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
python/ray/tests/test_namespace.py
131,615
Refs #33476 -- Reformatted code with Black.
9
10
def test_adapt_unknown_value_decimal(self): value = decimal.Decimal("3.14") self.assertEqual(
59
test_operations.py
51
9
def test_adapt_unknown_value_decimal(self): value = decimal.Decimal("3.14") self.assertEqual( self.ops.adapt_unknown_value(value), self.ops.adapt_decimalfield_value(value), )
test_adapt_unknown_value_decimal
6
9
https://github.com/django/django.git
36
django
Python
0
1
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests/backends/base/test_operations.py
201,689
Adjust pylint plugin to enforce device_tracker type hints (#64903) * Adjust pylint plugin to enforce device_tracker type hints * Use a constant for the type hint matchers * Add tests * Add x_of_y match * Adjust bluetooth_tracker * Adjust mysensors * Adjust tile Co-authored-by: epenet <epenet@users.noreply.github.com>
11
11
def test_regex_x_of_y_comma_z(string, expected_x, expected_y, expected_z):
145
test_enforce_type_hints.py
57
28
def test_regex_x_of_y_comma_z(string, expected_x, expected_y, expected_z): assert (match := _TYPE_HINT_MATCHERS["x_of_y_comma_z"].match(string)) assert match.group(0) == string assert match.group(1) == expected_x assert match.group(2) == expected_y assert match.group(3) == expected_z @pytest.mark.parametrize( ("string", "expected_a", "expected_b"), [("DiscoveryInfoType | None", "DiscoveryInfoType", "None")], )
test_regex_x_of_y_comma_z
6
35
https://github.com/home-assistant/core.git
62
core
Python
1
1
@pytest.mark.parametrize( ("string", "expected_a", "expected_b"), [("DiscoveryInfoType | None", "DiscoveryInfoType", "None")], )
367521e369839e6504989603b1282c2ba31dad49
tests/pylint/test_enforce_type_hints.py
311,059
fix the rest
19
12
def add_update(self, updates): call_context = base_layer_utils.call_context() if ( tf.distribute.has_strategy() and tf.distribute.in_cross_replica_context()
138
base_layer_v1.py
227
54
def add_update(self, updates): call_context = base_layer_utils.call_context() if ( tf.distribute.has_strategy() and tf.distribute.in_cross_replica_context() # When saving the model, the distribution strategy context should be # ignored, following the default path for adding updates. and not call_context.saving ): # Updates don't need to be run in a cross-replica context. return updates = generic_utils.to_list(updates) if call_context.in_call: relevant_inputs = call_context.inputs else: inbound_nodes = getattr(self, "_inbound_nodes", []) relevant_inputs = [node.input_tensors for node in inbound_nodes]
add_update
17
68
https://github.com/keras-team/keras.git
104
keras
Python
0
7
5cf72f4934f3104ac2378c8b9b3638afea38ba1e
keras/engine/base_layer_v1.py
278,599
[dev] fix export model bug in DETR (#7120)
21
14
def forward(self, src, src_mask=None, pos_embed=None): residual = src if self.normalize_before: src = self.norm1(src) q = k = self.with_pos_embed(src, pos_embed) src = self.self_attn(q, k, value=src, attn_mask=src_mask) src = residual + self.dropout1(src) if not self.normalize_before: src = self.norm1(src) residual = src if self.normalize_before: src = self.norm2(src) src = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = residual + self.dropout2(src)
234
detr_transformer.py
187
26
def forward(self, src, src_mask=None, pos_embed=None): residual = src if self.normalize_before: src = self.norm1(src) q = k = self.with_pos_embed(src, pos_embed) src = self.self_attn(q, k, value=src, attn_mask=src_mask) src = residual + self.dropout1(src) if not self.normalize_before: src = self.norm1(src) residual = src if self.normalize_before: src = self.norm2(src) src = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = residual + self.dropout2(src) if not self.normalize_before: src = self.norm2(src) return src
forward
18
60
https://github.com/PaddlePaddle/PaddleDetection.git
160
PaddleDetection
Python
0
5
fa67fb9f88ff7b03ca24a4f80e0fde2ef6d80384
ppdet/modeling/transformers/detr_transformer.py
211,590
Refs #33476 -- Reformatted code with Black.
21
18
def _check_m2m_through_same_relationship(cls): errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures:
215
base.py
460
53
def _check_m2m_through_same_relationship(cls): errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id="models.E003", ) ) else: seen_intermediary_signatures.append(signature) return errors
_check_m2m_through_same_relationship
26
88
https://github.com/django/django.git
136
django
Python
0
7
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django/db/models/base.py
205,412
Add optional frame argument to parallel axis method
30
14
def test_parallel_axis(): N = Refe
293
test_rigidbody.py
162
60
def test_parallel_axis(): N = ReferenceFrame('N') m, Ix, Iy, Iz, a, b = symbols('m, I_x, I_y, I_z, a, b') Io = inertia(N, Ix, Iy, Iz) o = Point('o') p = o.locatenew('p', a * N.x + b * N.y) R = RigidBody('R', o, N, m, (Io, o)) Ip = R.parallel_axis(p) Ip_expected = inertia(N, Ix + m * b**2, Iy + m * a**2, Iz + m * (a**2 + b**2), ixy=-m * a * b) assert Ip == Ip_expected A = ReferenceFrame('A') A.orient_axis(N, N.z, 1) assert (R.parallel_axis(p, A).to_matrix(A) - Ip_expected.to_matrix(A)).simplify() == zeros(3, 3)
test_parallel_axis
15
91
https://github.com/sympy/sympy.git
191
sympy
Python
0
1
801e149d69d5f88919a735f8b55b6024f97c6950
sympy/physics/mechanics/tests/test_rigidbody.py
199,472
renamed dev_str arg to dev for all methods.
7
7
def test_set_framework(fw_str, dev, call): ivy.set_framework(fw_str) ivy.unset_framework() # use_framework
33
test_general.py
12
8
def test_set_framework(fw_str, dev, call): ivy.set_framework(fw_str) ivy.unset_framework() # use_framework
test_set_framework
3
8
https://github.com/unifyai/ivy.git
20
ivy
Python
0
1
d743336b1f3654cd0315f380f43eed4116997c1d
ivy_tests/test_core/test_general.py
213,825
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
9
9
def test_recurrent_dropout_with_implementation_restriction(self): laye
51
gru_test.py
42
21
def test_recurrent_dropout_with_implementation_restriction(self): layer = keras.layers.GRU(2, recurrent_dropout=0.1, implementation=2) # The implementation is force to 1 due to the limit of recurrent_dropout. self.assertEqual(layer.implementation, 1)
test_recurrent_dropout_with_implementation_restriction
3
22
https://github.com/keras-team/keras.git
35
keras
Python
0
1
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras/layers/rnn/gru_test.py
273,929
add vl
27
13
def forward(self, predicts, batch): text_pre = predicts[0] target = batch[1].astype('int64') label_flatten, length = self.flatten_label(target) text_pre = self._flatten(text_pre, length) if self.mode == 'LF_1': loss = self.loss_func(text_pre, label_flatten) else: text_rem = predicts[1] text_mas = predicts[2]
301
rec_vl_loss.py
272
54
def forward(self, predicts, batch): text_pre = predicts[0] target = batch[1].astype('int64') label_flatten, length = self.flatten_label(target) text_pre = self._flatten(text_pre, length) if self.mode == 'LF_1': loss = self.loss_func(text_pre, label_flatten) else: text_rem = predicts[1] text_mas = predicts[2] target_res = batch[2].astype('int64') target_sub = batch[3].astype('int64') label_flatten_res, length_res = self.flatten_label(target_res) label_flatten_sub, length_sub = self.flatten_label(target_sub) text_rem = self._flatten(text_rem, length_res) text_mas = self._flatten(text_mas, length_sub) loss_ori = self.loss_func(text_pre, label_flatten) loss_res = self.loss_func(text_rem, label_flatten_res) loss_mas = self.loss_func(text_mas, label_flatten_sub) loss = loss_ori + loss_res * self.weight_res + loss_mas * self.weight_mas return {'loss': loss}
forward
21
81
https://github.com/PaddlePaddle/PaddleOCR.git
190
PaddleOCR
Python
0
2
a3a095150e8e1f56dd03d88ac71db6ad6262611a
ppocr/losses/rec_vl_loss.py
24,351
Add move concept, with Dodge and Jitter, and ordered GroupBy
9
10
def test_height(self, df, groupby): df["height"] = df["width"] height = .4 res = Jitter(height=height)(df, groupby, "y") self.check_same(res, df, "y", "grp2", "width") self.check_pos(res, df, "x", height *
110
test_moves.py
60
21
def test_height(self, df, groupby): df["height"] = df["width"] height = .4 res = Jitter(height=height)(df, groupby, "y") self.check_same(res, df, "y", "grp2", "width") self.check_pos(res, df, "x", height * df["height"])
test_height
6
26
https://github.com/mwaskom/seaborn.git
68
seaborn
Python
0
1
430cb8fe332a752b79fb74bd618038ac51e82df8
seaborn/tests/_core/test_moves.py
41,173
add a basic prototype of piles, behind jax_dynamic_shapes Co-authored-by: Adam Paszke <apaszke@google.com> Co-authored-by: Dougal Maclaurin <dougalm@google.com>
16
14
def _pile_flatten(pile): lengths = [] new_shape = [lengths.append(d.lengths) or d.replace(lengths=len(lengths)) if type(d)
141
batching.py
62
25
def _pile_flatten(pile): lengths = [] new_shape = [lengths.append(d.lengths) or d.replace(lengths=len(lengths)) if type(d) is IndexedAxisSize else d for d in pile.aval.elt_ty.shape] elt_ty = pile.aval.elt_ty.update(shape=tuple(new_shape)) aval = pile.aval.replace(elt_ty=elt_ty) return (lengths, pile.data), aval
_pile_flatten
8
30
https://github.com/google/jax.git
91
jax
Python
0
4
f2f2faa4fa166f40a4a93bc966379cf1ebb720d1
jax/interpreters/batching.py
122,451
Fix fallout from turning off work signing in docker-compose
11
12
def get_receptor_ctl(config_data=None): if config_data is None: config_data = read_receptor_config() receptor_sockfile = get_receptor_sockfile(config_data) try: return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(config_data, True)) except RuntimeError: r
81
receptor.py
54
19
def get_receptor_ctl(config_data=None): if config_data is None: config_data = read_receptor_config() receptor_sockfile = get_receptor_sockfile(config_data) try: return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(config_data, True)) except RuntimeError: return ReceptorControl(receptor_sockfile)
get_receptor_ctl
8
22
https://github.com/ansible/awx.git
51
awx
Python
0
3
84f2b91105c959c4d89a63063cca441f3d67fc0f
awx/main/tasks/receptor.py
82,163
chore(discover): Cleanup events tests (#36797) - Delete the deprecated eventsv2 tests - Move MEP tests to its own file
19
13
def test_failed_dry_run_does_not_error(self, mock_builder): with self.feature("organizations:performance-dry-run-mep"): mock_builder.side_effect = InvalidSearchQuery("Something bad") query = { "field": ["count()"], "project": [self.project.id], } response = self.do_request(query) assert response.status_code == 200, response.content assert len(mock_builder.mock_calls) == 1 assert mock_builder.call_args.kwargs["dry_run"] mock_builder.side_effect = IncompatibleMetricsQuery("Something bad") query = { "field": ["count()"], "project": [self.project.id], } response = self.do_request(query) assert response.status_code == 200, response.content assert len(mock_builder.mock_calls) == 2 assert mock_builder.call_args.kwargs["dry_run"] mock_builder.side_effect = InvalidConditionError("Something bad") query = { "field": ["count()"], "project": [self.project.id]
346
test_organization_events_mep.py
410
30
def test_failed_dry_run_does_not_error(self, mock_builder): with self.feature("organizations:performance-dry-run-mep"): mock_builder.side_effect = InvalidSearchQuery("Something bad") query = { "field": ["count()"], "project": [self.project.id], } response = self.do_request(query) assert response.status_code == 200, response.content assert len(mock_builder.mock_calls) == 1 assert mock_builder.call_args.kwargs["dry_run"] mock_builder.side_effect = IncompatibleMetricsQuery("Something bad") query = { "field": ["count()"], "project": [self.project.id], } response = self.do_request(query) assert response.status_code == 200, response.content assert len(mock_builder.mock_calls) == 2 assert mock_builder.call_args.kwargs["dry_run"] mock_builder.side_effect = InvalidConditionError("Something bad") query = { "field": ["count()"], "project": [self.project.id], } response = self.do_request(query) assert response.status_code == 200, response.content assert len(mock_builder.mock_calls) == 3 assert mock_builder.call_args.kwargs["dry_run"]
test_failed_dry_run_does_not_error
29
83
https://github.com/getsentry/sentry.git
207
sentry
Python
0
1
d3b8c9dd7bef6bccb5e70d2ccf3cda8463444a34
tests/snuba/api/endpoints/test_organization_events_mep.py
93,164
Improve documentation around index restrictions (#5029) * Improve documentation around index restrictions * Update docs/advanced.rst * Refine index documentation updates. Factor out and re-use method before closing down other PR. * Fully remove the --extra-index-url argument Co-authored-by: Yusuke Nishioka <yusuke.nishioka.0713@gmail.com>
29
17
def collect_hashes(self, ireq): link = ireq.link # Handle VCS and file links first if link and (link.is_vcs or (link.is_file and link.is_existing_dir())): return set() if not is_pinned_requirement(ireq): return set() sources = self.sources # Enforc
313
resolver.py
364
60
def collect_hashes(self, ireq): link = ireq.link # Handle VCS and file links first if link and (link.is_vcs or (link.is_file and link.is_existing_dir())): return set() if not is_pinned_requirement(ireq): return set() sources = self.sources # Enforce index restrictions if ireq.name in self.index_lookup: sources = list( filter(lambda s: s.get("name") == self.index_lookup[ireq.name], sources) ) if any(is_pypi_url(source["url"]) for source in sources): hashes = self._get_hashes_from_pypi(ireq) if hashes: return hashes applicable_candidates = self.ignore_compatibility_finder.find_best_candidate( ireq.name, ireq.specifier ).iter_applicable() applicable_candidates = list(applicable_candidates) if applicable_candidates: return { self._get_hash_from_link(candidate.link) for candidate in applicable_candidates } if link: return {self._get_hash_from_link(link)} if ireq.original_link: return {self._get_hash_from_link(ireq.original_link)} return set()
collect_hashes
29
91
https://github.com/pypa/pipenv.git
195
pipenv
Python
0
14
99cf729dd52100efba406b9c6af585425de0788c
pipenv/utils/resolver.py
19,824
Rename fc_size to output_size (#1641) * Rename fc_size to output_size * Responding to comments
44
13
def test_api_callbacks(csv_filename): mock_callback = mock.Mock() epochs = 2 batch_size = 8 num_examples = 32 with tempfile.TemporaryDirectory() as output_dir: input_features = [sequence_feature(reduce_output="sum")] output_features = [category_feature(vocab_size=5, reduce_input="sum")] config = { "input_features": input_features, "output_features": output_features, "combiner": {"type": "concat", "output_size": 14}, "training": {"epochs": epochs, "batch_size": batch_size}, } model = LudwigModel(config, callbacks=[mock_callback]) data_csv = generate_data( input_features, output_features, os.path.join(output_dir, csv_filename), num_examples=num_examples ) val_csv = shutil.copyfile(data_csv, os.path.join(output_dir, "validation.csv")) test_csv = shutil.copyfile(data_csv, os.path.join(output_dir, "test.csv")) model.
409
test_api.py
272
68
def test_api_callbacks(csv_filename): mock_callback = mock.Mock() epochs = 2 batch_size = 8 num_examples = 32 with tempfile.TemporaryDirectory() as output_dir: input_features = [sequence_feature(reduce_output="sum")] output_features = [category_feature(vocab_size=5, reduce_input="sum")] config = { "input_features": input_features, "output_features": output_features, "combiner": {"type": "concat", "output_size": 14}, "training": {"epochs": epochs, "batch_size": batch_size}, } model = LudwigModel(config, callbacks=[mock_callback]) data_csv = generate_data( input_features, output_features, os.path.join(output_dir, csv_filename), num_examples=num_examples ) val_csv = shutil.copyfile(data_csv, os.path.join(output_dir, "validation.csv")) test_csv = shutil.copyfile(data_csv, os.path.join(output_dir, "test.csv")) model.train(training_set=data_csv, validation_set=val_csv, test_set=test_csv) assert mock_callback.on_epoch_start.call_count == epochs assert mock_callback.on_epoch_end.call_count == epochs assert mock_callback.on_validation_start.call_count == epochs assert mock_callback.on_validation_end.call_count == epochs assert mock_callback.on_test_start.call_count == epochs assert mock_callback.on_test_end.call_count == epochs assert mock_callback.on_batch_start.call_count == epochs * (num_examples / batch_size) assert mock_callback.on_batch_end.call_count == epochs * (num_examples / batch_size)
test_api_callbacks
29
109
https://github.com/ludwig-ai/ludwig.git
255
ludwig
Python
0
1
69604268c2ddc06a4ee0b3dce0e05a8fb73b5d16
tests/integration_tests/test_api.py
5,908
Implement bucketed weighted sampling for VITS (#1871)
23
15
def mls(root_path, meta_files=None, ignored_speakers=None): items = [] with open(os.path.join(root_path, meta_files), "r", encoding="utf-8") as meta: for line in meta: file, text = line.split("\t") text = text[:-1] speaker, book, *_ = file.split("_") wav_file = os.path.join(root_path, os.path.dirname(meta_files), "audio", speaker, book, file + ".wav") # ignore speakers if isinstance(ignored_speakers, list): if speaker in ignored_speakers: continue items.append( {"text": text, "audio_
246
formatters.py
225
56
def mls(root_path, meta_files=None, ignored_speakers=None): items = [] with open(os.path.join(root_path, meta_files), "r", encoding="utf-8") as meta: for line in meta: file, text = line.split("\t") text = text[:-1] speaker, book, *_ = file.split("_") wav_file = os.path.join(root_path, os.path.dirname(meta_files), "audio", speaker, book, file + ".wav") # ignore speakers if isinstance(ignored_speakers, list): if speaker in ignored_speakers: continue items.append( {"text": text, "audio_file": wav_file, "speaker_name": "MLS_" + speaker, "root_path": root_path} ) return items # ======================================== VOX CELEB ===========================================
mls
15
70
https://github.com/coqui-ai/TTS.git
146
TTS
Python
0
4
bfc63829ac869f479bf9e8bf0fb75a2fb6d04959
TTS/tts/datasets/formatters.py
262,547
ENH: Add dtypes/converters arguments for pandas.read_xml (#45411)
13
15
def test_dtype_float(parser): df_resul
133
test_xml_dtypes.py
86
27
def test_dtype_float(parser): df_result = read_xml(xml_types, dtype={"degrees": "float"}, parser=parser) df_expected = DataFrame( { "shape": ["square", "circle", "triangle"], "degrees": Series([360, 360, 180]).astype("float"), "sides": [4.0, float("nan"), 3.0], } ) tm.assert_frame_equal(df_result, df_expected)
test_dtype_float
10
28
https://github.com/pandas-dev/pandas.git
83
pandas
Python
0
1
d2d7ffb56f0f12c412c36c0c867ab3bb240d04ca
pandas/tests/io/xml/test_xml_dtypes.py
163,734
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
4
7
def test_actor_broadcast(ray_start_cluster_with_resource): cluster, num_nodes = ray_start_cluster_with_resource
17
test_object_manager.py
8
6
def test_actor_broadcast(ray_start_cluster_with_resource): cluster, num_nodes = ray_start_cluster_with_resource
test_actor_broadcast
19
6
https://github.com/ray-project/ray.git
147
ray
Python
0
5
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
python/ray/tests/test_object_manager.py
131,627
adds mnist dataset test case
14
14
def _rescale_dataset_split_sizes(left_size,right_size,total_length): left_size_type = type(left_size) right_size_type = type(right_size) # check both left_size and right_size are integers or floats if ((left_size is not None and left_size_type not in [int,float]) and (right_size is not None and right_size_type not in [int,float])): raise TypeError('Invalid `left_size` and `right_size` Types. Expected: ' 'integer or float or None, Received: type(left_size)=' f'{left_size_type} and type(right_size)={right_size_type}') # check left_size is a integer or float if left_size is not None and left_size_type not in [int,float]: raise TypeError('Invalid `left_size` Type.Expected: int or float or None,
651
dataset_utils.py
882
146
def _rescale_dataset_split_sizes(left_size,right_size,total_length): left_size_type = type(left_size) right_size_type = type(right_size) # check both left_size and right_size are integers or floats if ((left_size is not None and left_size_type not in [int,float]) and (right_size is not None and right_size_type not in [int,float])): raise TypeError('Invalid `left_size` and `right_size` Types. Expected: ' 'integer or float or None, Received: type(left_size)=' f'{left_size_type} and type(right_size)={right_size_type}') # check left_size is a integer or float if left_size is not None and left_size_type not in [int,float]: raise TypeError('Invalid `left_size` Type.Expected: int or float or None, ' f'Received: type(left_size)={left_size_type}. ') # check right_size is a integer or float if right_size is not None and right_size_type not in [int,float]: raise TypeError(f'Invalid `right_size` Type.Expected: int or float or None,' f'Received: type(right_size)={right_size_type}. ') # check left_size and right_size are non-zero if left_size == 0 and right_size == 0: raise ValueError('Both `left_size` and `right_size` are zero. ' 'Atleast one of the split sizes must be non-zero.') # check left_size is non-negative and less than 1 and less than total_length if (left_size_type == int and (left_size <= 0 or left_size>= total_length) or left_size_type == float and (left_size <= 0 or left_size>= 1) ): raise ValueError('`left_size` should be either a positive integer ' f'and smaller than {total_length} or a float ' 'within the range `[0, 1]`. Received: left_size=' f'{left_size}') # check right_size is non-negative and less than 1 and less than total_length if (right_size_type == int and (right_size <= 0 or right_size>= total_length) or right_size_type == float and (right_size <= 0 or right_size>= 1)): raise ValueError('`right_size` should be either a positive integer ' f'and smaller than {total_length} or a float ' 'within the range `[0, 1]`. Received: right_size=' f'{right_size}') # check sum of left_size and right_size is less than or equal to total_length if right_size_type == left_size_type == float and right_size + left_size > 1: raise ValueError('The sum of `left_size` and `right_size` is greater ' 'than 1. It must be less than or equal to 1.') if left_size_type == float: left_size = round(left_size*total_length) elif left_size_type == int: left_size = float(left_size) if right_size_type == float: right_size = round(right_size*total_length) elif right_size_type == int: right_size = float(right_size) if left_size is None: left_size = total_length - right_size elif right_size is None: right_size = total_length - left_size if left_size + right_size > total_length: raise ValueError('The sum of `left_size` and `right_size` should ' 'be smaller than the {total_length}. ' f'Received: left_size + right_size = {left_size+right_size}' f'and total_length = {total_length}') for split,side in [(left_size,'left'),(right_size,'right')]: if split == 0: raise ValueError(f'With `dataset` of length={total_length}, `left_size`=' '{left_size} and `right_size`={right_size}.' f'Resulting {side} side dataset split will be empty. ' 'Adjust any of the aforementioned parameters') left_size,right_size = int(left_size) ,int(right_size) return left_size,right_size
_rescale_dataset_split_sizes
57
432
https://github.com/keras-team/keras.git
362
keras
Python
0
34
3337f8716967b9b5c9c575e73c66cef0a17e891f
keras/utils/dataset_utils.py
269,240
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
10
9
def flatten(x): return tf.reshape(x, [-1]) @keras_export("keras.backend.batch_flatten") @tf.__internal__.dispatch.add_
60
backend.py
11
8
def flatten(x): return tf.reshape(x, [-1]) @keras_export("keras.backend.batch_flatten") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
flatten
2
8
https://github.com/keras-team/keras.git
18
keras
Python
1
1
@keras_export("keras.backend.batch_flatten") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras/backend.py
269,616
Stricter signatures for resolvers and mutations (#9649)
8
8
def resolve_voucher(_root, _info, *, id, channel=None): _, id = from_global_id_or_error(id, Voucher)
46
schema.py
27
14
def resolve_voucher(_root, _info, *, id, channel=None): _, id = from_global_id_or_error(id, Voucher) return resolve_voucher(id, channel)
resolve_voucher
3
14
https://github.com/saleor/saleor.git
32
saleor
Python
0
1
513fc80bc698c177b87774b3aff3da7b9aedbe06
saleor/graphql/discount/schema.py
26,959
Reformat with black
12
11
def test_descendant_of_filter(self): response = self.get_response(descendant_of=6) content = json.loads(response.content.decode("UTF-8")) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [10, 15, 17, 21, 22,
89
test_pages.py
45
16
def test_descendant_of_filter(self): response = self.get_response(descendant_of=6) content = json.loads(response.content.decode("UTF-8")) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [10, 15, 17, 21, 22, 23])
test_descendant_of_filter
5
18
https://github.com/wagtail/wagtail.git
58
wagtail
Python
0
1
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail/api/v2/tests/test_pages.py
72,798
Call _check_arraylike for jnp.linalg & jnp.fft functions
24
17
def _multi_dot_matrix_chain_order(arrays, return_costs=False): n = len(arrays) # p stores the dimensions of the matrices
290
linalg.py
214
92
def _multi_dot_matrix_chain_order(arrays, return_costs=False): n = len(arrays) # p stores the dimensions of the matrices # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] # m is a matrix of costs of the subproblems # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} m = np.zeros((n, n), dtype=np.double) # s is the actual ordering # s[i, j] is the value of k at which we split the product A_i..A_j s = np.empty((n, n), dtype=np.intp) for l in range(1, n): for i in range(n - l): j = i + l m[i, j] = jnp.inf for k in range(i, j): q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1] if q < m[i, j]: m[i, j] = q s[i, j] = k # Note that Cormen uses 1-based index return (s, m) if return_costs else s
_multi_dot_matrix_chain_order
15
150
https://github.com/google/jax.git
196
jax
Python
0
7
2416d154355f19e77b5c1ddf1de1f8552e4a98ad
jax/_src/third_party/numpy/linalg.py
122,411
bpo-46426: Improve tests for the dir_fd argument (GH-30668) Ensure that directory file descriptors refer to directories different from the current directory, and that src_dir_fd and dst_dir_fd refer to different directories. Add context manager open_dir_fd() in test.support.os_helper.
15
11
def test_chmod_dir_fd(self): with self.prepare_file() as (dir_fd, name, fullname): posix.chmod(fullname, stat.S_I
118
test_posix.py
102
22
def test_chmod_dir_fd(self): with self.prepare_file() as (dir_fd, name, fullname): posix.chmod(fullname, stat.S_IRUSR) posix.chmod(name, stat.S_IRUSR | stat.S_IWUSR, dir_fd=dir_fd) s = posix.stat(fullname) self.assertEqual(s.st_mode & stat.S_IRWXU, stat.S_IRUSR | stat.S_IWUSR)
test_chmod_dir_fd
7
24
https://github.com/python/cpython.git
76
cpython
Python
0
1
54610bb448a9cf5be77d53b66169fca4c11be6cb
Lib/test/test_posix.py
175,833
Extract generic RevisionsCompareView from page revisions_compare view
6
9
def dispatch(self, request, *args, **kwargs): return super().dispatch(request, *args, **kwargs)
42
revisions.py
15
8
def dispatch(self, request, *args, **kwargs): return super().dispatch(request, *args, **kwargs)
dispatch
2
9
https://github.com/wagtail/wagtail.git
28
wagtail
Python
0
1
2664a4c1fc7df471225d3e71355802401217889a
wagtail/admin/views/pages/revisions.py
77,926
Add hardware revision support to homekit (#63336)
2
9
async def test_format_version(): assert format_version("soho+3.6.8+soho-release-rt120+10") == "3.6.8" assert format_version("undefined-undefined-1.6.8") == "1.6.8" assert format_version("56.0-76060") == "56.0.76060" assert format_version(3.6) == "3.6" assert format_version("AK001-ZJ100") == "001.100" assert format_version("HF-LPB100-") == "100" assert
144
test_util.py
69
24
async def test_format_version(): assert format_version("soho+3.6.8+soho-release-rt120+10") == "3.6.8" assert format_version("undefined-undefined-1.6.8") == "1.6.8" assert format_version("56.0-76060") == "56.0.76060" assert format_version(3.6) == "3.6" assert format_version("AK001-ZJ100") == "001.100" assert format_version("HF-LPB100-") == "100" assert format_version("AK001-ZJ2149") == "001.2149" assert format_version("0.1") == "0.1" assert format_version("unknown") is None
test_format_version
10
39
https://github.com/home-assistant/core.git
70
core
Python
0
1
5c8271552a3023808e272125f71ba79f3a1e97d8
tests/components/homekit/test_util.py
308,607
Bugfix: Alignments tool - don't error on from-faces job
18
13
def _set_skip_list(self) -> Optional[List[int]]: skip_num = self._arguments.extract_every_n if skip_num == 1: logger.debug("Not skipping any frames") return None skip_list = [] for idx, item in enumerate(s
151
jobs.py
196
45
def _set_skip_list(self) -> Optional[List[int]]: skip_num = self._arguments.extract_every_n if skip_num == 1: logger.debug("Not skipping any frames") return None skip_list = [] for idx, item in enumerate(self._frames.file_list_sorted): if idx % skip_num != 0: logger.trace("Adding image '%s' to skip list due to " # type:ignore "extract_every_n = %s", item["frame_fullname"], skip_num) skip_list.append(idx) logger.debug("Adding skip list: %s", skip_list) return skip_list
_set_skip_list
22
55
https://github.com/deepfakes/faceswap.git
89
faceswap
Python
0
4
892d8626ed4e7f834ac5607af59f14f5476d5997
tools/alignments/jobs.py
101,659
Simple (flat rate) taxes API (#9784) * Add empty tax module * Add tax models (#9839) * Add tax API queries (#9856) * Add MANAGE_TAXES permission * Add tax configuration queries * Create tax configuration when channel is created * Drop sorters for now * Add TaxConfigurationPerCountry type * Update migration * Add metadata to TaxConfiguration type * Add tests for tax configuration queries * Add TaxClass types * Improve tests * Add queries for tax configuration per country * Fix query in tests * Update query cost map * Add tax API mutations (#9934) * Add taxConfigurationUpdate mutation * Update schema * Add tax class CRUD mutations * Add mutations to update/delete tax class rates per country * Review fixes * Add taxClass field to ProductType type (#9999) * Add taxClass field to ProductType type * Add taxClass field to Product type * Add taxClass field to shipping method type * Add displayGrossPrices to ProductPricingInfo (#10008) * Add displayGrossPrices to ProductPricingInfo * Add displayGrossPrices to Checkout * Add displayGrossPrices to Order * Add tests * Add ADDED_IN_35 label to new fields' descriptions * Use new display_gross_prices flag (#10121) * Use new display_gross_prices flag * Update tests * Add tests * Review fixes * Drop Vatlayer (#10335) * Add migration from Vatlayer to simple taxes * Review fixes * Review fixes * Drop usages of global include_taxes_in_prices flag (#10406) * Drop `include_taxes_in_prices` function from site settings * Adjust tests * Review fixes * Drop the `charge_taxes_on_shipping` flag from site settings. (#10466) * Include migrating Avatax tax codes in tax class migration * Drop `charge_taxes_on_shipping` function * Add tax_class to ShippingMethodData * Review fixes * Always calculate shipping tax with Avalara * Add default country rate (#10497) * Allow setting default tax rate for a country (without providing a tax class) * Add validation to allow settings only one default rate at once * Code review fixes * Add taxCalculationStrategy field * Add tests * CR fixes * Adjust resolver to use new tax configuration (#10533) * CR fixes * Add database router to fix false positives on relation mismatch. (#10524) * Add database router to fix false positives on relation mismatch. * The db router should have only 'allow_relation' implemented. * The 'db_for_write' part should stay. * Subscription for sync tax webooks (#10433) * Add proposed changes to schema * Add base implementation for sync tax subscription * Add test for empty order * Add clean up and missing part for tests * Use subscription for tax webhooks. Add more tests * Improve descriptions for tax objects * Adjust resolver to use new tax configuration (#10533) * Add taxCalculationStrategy field (#10532) * Add taxCalculationStrategy field * Add tests * CR fixes * CR fixes * Add datamigration to populate taxCalculationStrategy * Migrate Product.charge_taxes to new tax configuration (#10585) * Migrate Product.charge_taxes field to new tax configuration * Rename function * Fix tests * Change assign_tax_code_to_object_meta function to support tax classes * Update tax class fixtures * Improve dataloader * CR fixes * CR fixes * Add deprecation notice to dataloader * Allow removing country rates in the `taxCountryConfigurationUpdate` mutation. (#10647) * Allow deleting rates in taxCountryConfigurationUpdate mutation * Change tax rates ordering to keep default rates first (with null tax classes) * Update existing migration * Remove TaxClass.is_default field (#10660) * Change tax rates ordering to keep default rates first (with null tax classes) * Update existing migration * Drop is_default field from TaxClass model * Drop extra Avalara config (#10673) * Drop extra Avatax config options * Adjust tests * Use flat rates in tax calculations (#10747) * WIP Use new tax configuration in tax calculations * Use new tax calculations for checkout * Adjust tests * Add flat rates calculations for checkout and order * Calculate flat rates in product pricing objects * Adjust tests * Add tests for order calculations * Add tests for product queries tax calculations * Add tests for order calculations * Use base calculations to get default checkout shipping price * Add tests for using tax_class from product_type * Add tests for get_order_country * Adjust tests * Code review fixes * Drop update_taxes_for_order_lines (#11000) * Fix calls to Avalara not validating order (#11012) * Add validation to disallow creating negative rates (#11010) * Add missing recalculation of order.undiscounted_total (#11039) * Optimize getting tax class country rates (#11040) * Tax API adjustments for dashboard (#11042) * Ignore null rates in taxCountryConfigurationUpdate mutation * Allow to pass null rates in taxClassUpdate mutation * Improve tests * Update saleor/graphql/tax/mutations/tax_class_update.py Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> * Update schema Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> * Cleanup before release (#11049) * Update ADDED_IN labels * Fix skippeded test * Regenerate migrations * Deprecate CountryDisplay.vat field * Add changelog * Update order.undiscounted_total calculation to not include taxes (#11068) * Fix assigning rates to tax classes (#11105) * Allow all staff users and apps to query tax-related data (#11113) * Bump dependencies for origin/SALEOR-6391-simple-taxes (#11127) Bumps: - cryptography to 38.0.3 - pillow to 9.3.0 * Fix using tax code from product and product type's tax class (#11111) * Fix using tax code from product and product type's tax class * Extract function * Replace synchronous load_site with promise (#11165) * Denormalize tax class for order lines and orders (#11172) * WIP Denormalize tax class for order lines and orders * Add denormalized fields in GraphQL types * Add tests for denormalized API fields * Return 0 rate in API when rate is null * Add preview/version notes in new field descriptions * Update changelog Co-authored-by: Dominik Kozaczko <dominik@kozaczko.info> Co-authored-by: Maciej Korycinski <maciej@mirumee.com> Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> Co-authored-by: Mika <6186720+NyanKiyoshi@users.noreply.github.com> Co-authored-by: Krzysztof Kwaśniak <mr.brzys@gmail.com>
17
12
def _save_lines(info, instance, lines_data, app, manager): if lines_data: lines = [] for line_data in lines_data: new_line = create_order_line( instance, line_data, manager, ) lines.append(new_line) # New event events.order_added_products_event( order=instance, user=info.context.user, app=app, order_lines=lines,
97
draft_order_create.py
255
28
def _save_lines(info, instance, lines_data, app, manager): if lines_data: lines = [] for line_data in lines_data: new_line = create_order_line( instance, line_data, manager, ) lines.append(new_line) # New event events.order_added_products_event( order=instance, user=info.context.user, app=app, order_lines=lines, )
_save_lines
16
32
https://github.com/saleor/saleor.git
67
saleor
Python
0
3
67df28935c555fdd673f17e8c9183e24dde7c51f
saleor/graphql/order/mutations/draft_order_create.py
29,400
Reformat with black
24
12
def test_reupload_different_file_size_and_file_hash(self): # Build a fake file, and create it through the admin view # since self.document doesn't have a file_size set. fake_file = SimpleUploaded
227
test_admin_views.py
259
58
def test_reupload_different_file_size_and_file_hash(self): # Build a fake file, and create it through the admin view # since self.document doesn't have a file_size set. fake_file = SimpleUploadedFile("some_file.txt", b"this is the content") post_data = { "title": "My doc", "file": fake_file, } self.client.post(reverse("wagtaildocs:add"), post_data) document = models.Document.objects.get(title="My doc") old_file_size, old_file_hash = document.file_size, document.file_hash new_file = SimpleUploadedFile(document.filename, b"less content") self.client.post( reverse("wagtaildocs:edit", args=(document.pk,)), { "title": document.title, "file": new_file, }, ) document.refresh_from_db() self.assertNotEqual(document.file_size, old_file_size) self.assertNotEqual(document.file_hash, old_file_hash)
test_reupload_different_file_size_and_file_hash
20
69
https://github.com/wagtail/wagtail.git
135
wagtail
Python
0
1
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail/documents/tests/test_admin_views.py
74,800
[api] Annotate as public / move ray-core APIs to _private and add enforcement rule (#25695) Enable checking of the ray core module, excluding serve, workflows, and tune, in ./ci/lint/check_api_annotations.py. This required moving many files to ray._private and associated fixes.
26
15
def do_remote(self, arg): # Tell the next task to drop into the debugger. ray._private.worker.global_worker.debugger_breakpoint = self._breakpoint_uuid # Tell the debug loop to connect to the next task. data = json.dumps( { "job_id": ray.get_runtime_context().job_id.hex(), } ) _internal_kv_put( "RAY_PDB_CONTINUE_{}".format(self._breakpoint_uuid), data, namespace=ray_constants.KV_NAMESPACE_PDB, ) self.__restore() self.handle.connection.close() return Pdb.do_continue(self, arg)
144
rpdb.py
192
35
def do_remote(self, arg): # Tell the next task to drop into the debugger. ray._private.worker.global_worker.debugger_breakpoint = self._breakpoint_uuid # Tell the debug loop to connect to the next task. data = json.dumps( { "job_id": ray.get_runtime_context().job_id.hex(), } ) _internal_kv_put( "RAY_PDB_CONTINUE_{}".format(self._breakpoint_uuid), data, namespace=ray_constants.KV_NAMESPACE_PDB, ) self.__restore() self.handle.connection.close() return Pdb.do_continue(self, arg)
do_remote
15
45
https://github.com/ray-project/ray.git
87
ray
Python
0
1
43aa2299e6623c8f8c7c4a1b80133459d0aa68b0
python/ray/util/rpdb.py
142,705
Rework ConfigOption schemas as class-based This is NOT a breaking change, the old style keeps working. Now developers can make a subclass of Config, declare the schema of the config as fields of the class, and instances of this class will hold the processed config. This better represents the relationship between what a config definition and a config instance is, now you think of configs definitions as classes and parsed configs as instances. We also can write these fields as descriptors and enable safe attribute-based access. Static analysis will be able to see when a missing fields is accessed. And in followup changes I plan to add type annotations which will make even type checking fully sound.
10
12
def test_missing_required(self): conf = defa
79
base_tests.py
64
17
def test_missing_required(self): conf = defaults.MkDocsConfig() errors, warnings = conf.validate() self.assertEqual( errors, [('site_name', ValidationError('Required configuration not provided.'))] ) self.assertEqual(warnings, [])
test_missing_required
7
19
https://github.com/mkdocs/mkdocs.git
47
mkdocs
Python
0
1
73e8fef5068d47ab7bdc4c49bc4abcc74434b57e
mkdocs/tests/config/base_tests.py
225,158
Refactored Crypto Tests (#1743) * Refactored tests * Removed unused command * Added tests * Tests : remove cassettes files + add fixture * Black * Tests : skip tests Co-authored-by: didierlopes.eth <dro.lopes@campus.fct.unl.pt> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
5
9
def test_read_data_file(recorder): file = read_data_file("coinbase_gecko_map.json") recorder.captur
31
test_cryptocurrency_helpers.py
11
6
def test_read_data_file(recorder): file = read_data_file("coinbase_gecko_map.json") recorder.capture(file)
test_read_data_file
3
6
https://github.com/OpenBB-finance/OpenBBTerminal.git
17
OpenBBTerminal
Python
0
1
9068ad01249c1e1adaca3ef9a704d70da7e3a17b
tests/openbb_terminal/cryptocurrency/test_cryptocurrency_helpers.py
284,453
Refactor and improve anthemav (#75852)
14
8
def mock_anthemav() -> AsyncMock: avr = AsyncMock() avr.protocol.macaddress = "000000000001" avr.protocol.model = "MRX 520" avr.reconnect = AsyncMock() avr.close = MagicMock() avr.protocol.input_list = [] avr.protocol.audio_listening_mode_list = [] avr.protocol.power = False return avr @pytest.fixture
121
conftest.py
61
22
def mock_anthemav() -> AsyncMock: avr = AsyncMock() avr.protocol.macaddress = "000000000001" avr.protocol.model = "MRX 520" avr.reconnect = AsyncMock() avr.close = MagicMock() avr.protocol.input_list = [] avr.protocol.audio_listening_mode_list = [] avr.protocol.power = False return avr @pytest.fixture
mock_anthemav
11
32
https://github.com/home-assistant/core.git
65
core
Python
1
1
@pytest.fixture
bbd7041a73572547be49ead53b183aa1e55a6d75
tests/components/anthemav/conftest.py
318,057
Bag: add implementation for reservoir sampling (#7068) (#7636) - Implement the [L algorithm](https://en.wikipedia.org/wiki/Reservoir_sampling#An_optimal_algorithm) for reservoir sampling without replacement. - Use the **k** reservoir of size 1 strategy for sampling with replacement (see [reference](http://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf)) of **k** items
21
11
def test_reservoir_sample_with_replacement_map_partitions_correctness(): N, k = 20, 10 seq = list(range(N)) distribution = [0 for _ in range(N)] expected_distribution = [0 for _ in range(N)] reps = 2000 for _ in range(reps): picks, _ = random._sample_with_replacement_map_partitions(seq, k) for pick in picks: distribution[pick] += 1 for pick in rnd.choices(seq, k=k): expected_distribution[
221
test_random.py
179
56
def test_reservoir_sample_with_replacement_map_partitions_correctness(): N, k = 20, 10 seq = list(range(N)) distribution = [0 for _ in range(N)] expected_distribution = [0 for _ in range(N)] reps = 2000 for _ in range(reps): picks, _ = random._sample_with_replacement_map_partitions(seq, k) for pick in picks: distribution[pick] += 1 for pick in rnd.choices(seq, k=k): expected_distribution[pick] += 1 # convert to probabilities distribution = [c / (reps * k) for c in distribution] expected_distribution = [c / (reps * k) for c in expected_distribution] # use bhattacharyya distance to asses the similarity of distributions assert math.isclose( 0.0, bhattacharyya(distribution, expected_distribution), abs_tol=1e-2 )
test_reservoir_sample_with_replacement_map_partitions_correctness
17
94
https://github.com/dask/dask.git
150
dask
Python
0
8
4e5dfe7463028a39a90e026c7fb9220969093ab3
dask/bag/tests/test_random.py
156,163
TYP: Autotyping (#48191) * annotate-magics * annotate-imprecise-magics * none-return * scalar-return * pyi files * ignore vendored file * manual changes * ignore pyright in pickle_compat (these errors would be legit if the current __new__ methods were called but I think these pickle tests call older __new__ methods which allowed providing multiple positional arguments) * run autotyping in pre-commit * remove final and expand safe (and add annotate-imprecise-magics)
8
13
def _empty_info_line(self) -> str: return ( f"Empty {type(self.frame).__name__}\n" f"Columns: {self.frame.columns}\n" f"Index: {self.frame.index
64
latex.py
59
13
def _empty_info_line(self) -> str: return ( f"Empty {type(self.frame).__name__}\n" f"Columns: {self.frame.columns}\n" f"Index: {self.frame.index}" )
_empty_info_line
6
13
https://github.com/pandas-dev/pandas.git
16
pandas
Python
0
1
54347fe684e0f7844bf407b1fb958a5269646825
pandas/io/formats/latex.py
169,035
feat(physics.units): add `is_prefixed` property to `Quantity`
10
7
def test_prefixed_property(): assert not meter.is_prefixed assert not joule.is_prefixed assert not day.is_prefixed assert not second.is_prefixed assert centimeter.is_prefixed assert kilometer.is_prefixed
64
test_quantities.py
45
12
def test_prefixed_property(): assert not meter.is_prefixed assert not joule.is_prefixed assert not day.is_prefixed assert not second.is_prefixed assert centimeter.is_prefixed assert kilometer.is_prefixed assert kilogram.is_prefixed assert pebibyte.is_prefixed
test_prefixed_property
9
22
https://github.com/sympy/sympy.git
40
sympy
Python
0
1
40a89803dbe877edc8ab6672819715f959273e60
sympy/physics/units/tests/test_quantities.py
197,626
add python 3.10.4 for windows
10
10
def test_PyObj_FromPtr(self): s = "abc def ghi jkl" ref = grc(s) # id(python-object) is the address pyobj = PyObj_FromPtr(id(s)) self.assertIs(s, pyobj) self.assertEqual(grc(s), ref + 1) del pyobj self.assertEqual(grc(s), ref)
94
test_python_api.py
84
23
def test_PyObj_FromPtr(self): s = "abc def ghi jkl" ref = grc(s) # id(python-object) is the address pyobj = PyObj_FromPtr(id(s)) self.assertIs(s, pyobj) self.assertEqual(grc(s), ref + 1) del pyobj self.assertEqual(grc(s), ref)
test_PyObj_FromPtr
8
29
https://github.com/XX-net/XX-Net.git
57
XX-Net
Python
0
1
8198943edd73a363c266633e1aa5b2a9e9c9f526
python3.10.4/Lib/ctypes/test/test_python_api.py
222,147
resolve line-too-long in integration_test
27
12
def testDistributedModelFit(self, strategy): if not tf.__internal__.tf2.enabled() and isinstance( strategy, tf.distribute.experimental.ParameterServerStrategy ): self.skipTest( "Parameter Server strategy with dataset creator need to be run " "when eager execution is enabled." )
176
preprocessing_applied_in_model_test.py
249
57
def testDistributedModelFit(self, strategy): if not tf.__internal__.tf2.enabled() and isinstance( strategy, tf.distribute.experimental.ParameterServerStrategy ): self.skipTest( "Parameter Server strategy with dataset creator need to be run " "when eager execution is enabled." ) with strategy.scope(): preprocessing_model = utils.make_preprocessing_model( self.get_temp_dir() ) training_model = utils.make_training_model() # Merge the two separate models into a single model for training. inputs = preprocessing_model.inputs outputs = training_model(preprocessing_model(inputs)) merged_model = tf.keras.Model(inputs, outputs) merged_model.compile(optimizer="sgd", loss="binary_crossentropy")
testDistributedModelFit
20
63
https://github.com/keras-team/keras.git
135
keras
Python
0
3
4f1d333ded256b0315cf02eee067d6fa902b748d
keras/integration_test/preprocessing_applied_in_model_test.py
278,142
Simple (flat rate) taxes API (#9784) * Add empty tax module * Add tax models (#9839) * Add tax API queries (#9856) * Add MANAGE_TAXES permission * Add tax configuration queries * Create tax configuration when channel is created * Drop sorters for now * Add TaxConfigurationPerCountry type * Update migration * Add metadata to TaxConfiguration type * Add tests for tax configuration queries * Add TaxClass types * Improve tests * Add queries for tax configuration per country * Fix query in tests * Update query cost map * Add tax API mutations (#9934) * Add taxConfigurationUpdate mutation * Update schema * Add tax class CRUD mutations * Add mutations to update/delete tax class rates per country * Review fixes * Add taxClass field to ProductType type (#9999) * Add taxClass field to ProductType type * Add taxClass field to Product type * Add taxClass field to shipping method type * Add displayGrossPrices to ProductPricingInfo (#10008) * Add displayGrossPrices to ProductPricingInfo * Add displayGrossPrices to Checkout * Add displayGrossPrices to Order * Add tests * Add ADDED_IN_35 label to new fields' descriptions * Use new display_gross_prices flag (#10121) * Use new display_gross_prices flag * Update tests * Add tests * Review fixes * Drop Vatlayer (#10335) * Add migration from Vatlayer to simple taxes * Review fixes * Review fixes * Drop usages of global include_taxes_in_prices flag (#10406) * Drop `include_taxes_in_prices` function from site settings * Adjust tests * Review fixes * Drop the `charge_taxes_on_shipping` flag from site settings. (#10466) * Include migrating Avatax tax codes in tax class migration * Drop `charge_taxes_on_shipping` function * Add tax_class to ShippingMethodData * Review fixes * Always calculate shipping tax with Avalara * Add default country rate (#10497) * Allow setting default tax rate for a country (without providing a tax class) * Add validation to allow settings only one default rate at once * Code review fixes * Add taxCalculationStrategy field * Add tests * CR fixes * Adjust resolver to use new tax configuration (#10533) * CR fixes * Add database router to fix false positives on relation mismatch. (#10524) * Add database router to fix false positives on relation mismatch. * The db router should have only 'allow_relation' implemented. * The 'db_for_write' part should stay. * Subscription for sync tax webooks (#10433) * Add proposed changes to schema * Add base implementation for sync tax subscription * Add test for empty order * Add clean up and missing part for tests * Use subscription for tax webhooks. Add more tests * Improve descriptions for tax objects * Adjust resolver to use new tax configuration (#10533) * Add taxCalculationStrategy field (#10532) * Add taxCalculationStrategy field * Add tests * CR fixes * CR fixes * Add datamigration to populate taxCalculationStrategy * Migrate Product.charge_taxes to new tax configuration (#10585) * Migrate Product.charge_taxes field to new tax configuration * Rename function * Fix tests * Change assign_tax_code_to_object_meta function to support tax classes * Update tax class fixtures * Improve dataloader * CR fixes * CR fixes * Add deprecation notice to dataloader * Allow removing country rates in the `taxCountryConfigurationUpdate` mutation. (#10647) * Allow deleting rates in taxCountryConfigurationUpdate mutation * Change tax rates ordering to keep default rates first (with null tax classes) * Update existing migration * Remove TaxClass.is_default field (#10660) * Change tax rates ordering to keep default rates first (with null tax classes) * Update existing migration * Drop is_default field from TaxClass model * Drop extra Avalara config (#10673) * Drop extra Avatax config options * Adjust tests * Use flat rates in tax calculations (#10747) * WIP Use new tax configuration in tax calculations * Use new tax calculations for checkout * Adjust tests * Add flat rates calculations for checkout and order * Calculate flat rates in product pricing objects * Adjust tests * Add tests for order calculations * Add tests for product queries tax calculations * Add tests for order calculations * Use base calculations to get default checkout shipping price * Add tests for using tax_class from product_type * Add tests for get_order_country * Adjust tests * Code review fixes * Drop update_taxes_for_order_lines (#11000) * Fix calls to Avalara not validating order (#11012) * Add validation to disallow creating negative rates (#11010) * Add missing recalculation of order.undiscounted_total (#11039) * Optimize getting tax class country rates (#11040) * Tax API adjustments for dashboard (#11042) * Ignore null rates in taxCountryConfigurationUpdate mutation * Allow to pass null rates in taxClassUpdate mutation * Improve tests * Update saleor/graphql/tax/mutations/tax_class_update.py Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> * Update schema Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> * Cleanup before release (#11049) * Update ADDED_IN labels * Fix skippeded test * Regenerate migrations * Deprecate CountryDisplay.vat field * Add changelog * Update order.undiscounted_total calculation to not include taxes (#11068) * Fix assigning rates to tax classes (#11105) * Allow all staff users and apps to query tax-related data (#11113) * Bump dependencies for origin/SALEOR-6391-simple-taxes (#11127) Bumps: - cryptography to 38.0.3 - pillow to 9.3.0 * Fix using tax code from product and product type's tax class (#11111) * Fix using tax code from product and product type's tax class * Extract function * Replace synchronous load_site with promise (#11165) * Denormalize tax class for order lines and orders (#11172) * WIP Denormalize tax class for order lines and orders * Add denormalized fields in GraphQL types * Add tests for denormalized API fields * Return 0 rate in API when rate is null * Add preview/version notes in new field descriptions * Update changelog Co-authored-by: Dominik Kozaczko <dominik@kozaczko.info> Co-authored-by: Maciej Korycinski <maciej@mirumee.com> Co-authored-by: Krzysztof Waliczek <krzysztof.waliczek@saleor.io> Co-authored-by: Mika <6186720+NyanKiyoshi@users.noreply.github.com> Co-authored-by: Krzysztof Kwaśniak <mr.brzys@gmail.com>
14
10
def test_update_order_display_gross_prices_use_country_specific_tax_settings(order): # given country_code = "PT" tax_config = order.channel.tax_configuration tax_config.display_gross_prices = False tax_config.save() tax_config.country_exceptions.create( country=country_code, display_gross_prices=True ) order.display_gross_prices = False order.save(update_fields=["display_gross_prices"]) order.shipping_address.country = c
130
test_order_utils.py
84
24
def test_update_order_display_gross_prices_use_country_specific_tax_settings(order): # given country_code = "PT" tax_config = order.channel.tax_configuration tax_config.display_gross_prices = False tax_config.save() tax_config.country_exceptions.create( country=country_code, display_gross_prices=True ) order.display_gross_prices = False order.save(update_fields=["display_gross_prices"]) order.shipping_address.country = country_code order.shipping_address.save() # when update_order_display_gross_prices(order) # then assert order.display_gross_prices
test_update_order_display_gross_prices_use_country_specific_tax_settings
14
33
https://github.com/saleor/saleor.git
76
saleor
Python
0
1
67df28935c555fdd673f17e8c9183e24dde7c51f
saleor/order/tests/test_order_utils.py
29,518
FEAT-#4766: Support fsspec URLs in `read_csv` and `read_csv_glob` (#4898) Signed-off-by: Karthik Velayutham <vkarthik@ponder.io>
5
9
def test_read_csv_google_cloud_storage(self): eval_io( fn_name="read_csv", # read_csv kwargs filepath_or_buffer="gs://modin-testing/testing/multiple_csv/tes
30
test_io.py
55
9
def test_read_csv_google_cloud_storage(self): eval_io( fn_name="read_csv", # read_csv kwargs filepath_or_buffer="gs://modin-testing/testing/multiple_csv/test_data0.csv", )
test_read_csv_google_cloud_storage
5
9
https://github.com/modin-project/modin.git
16
modin
Python
0
1
c5107e5be29089720528c6c0ec4f96bc2a6a1eb3
modin/pandas/test/test_io.py
154,353
increased performance of k-diagonal extraction in da.diag() and da.diagonal() (#8689) * added support for extracting k-diagonals from a 2d-array * included heterogeneous chunks in test_diag() * fixed linting errors in test_diag() * improved efficiency of diagonal extractor a bit * stole @TAdeJong's simple padding solution for diag(v, k) when v is 1d * reduced complexity of `diagonal()` from O(N**2) to O(N) diag() now calls diagonal() * fixed linting errors in diagonal() * reorganized tests and ensured coverage of diag() & diagonal() as per @jcrist's advice * catered for cupy type input arrays to diagonal()
16
11
def test_diag_2d_array_creation(k): # when input 1d-array is a numpy array: v = np.arange(11) assert_eq(da.diag(v, k), np.diag(v, k)) # when input 1d-array is a dask array: v = da.arange(11, chunks=3) darr = da.diag(v, k) nparr = np.diag(v, k) assert_eq(darr, nparr) assert sorted(da.diag(v, k).dask) == sorted(da.diag(v, k).dask) v = v + v + 3 darr = da.diag(v, k) nparr = np.diag(v, k) assert_eq(darr, nparr) v = da.arange(11, chunks=11) darr = da.diag(v, k) nparr
331
test_creation.py
139
38
def test_diag_2d_array_creation(k): # when input 1d-array is a numpy array: v = np.arange(11) assert_eq(da.diag(v, k), np.diag(v, k)) # when input 1d-array is a dask array: v = da.arange(11, chunks=3) darr = da.diag(v, k) nparr = np.diag(v, k) assert_eq(darr, nparr) assert sorted(da.diag(v, k).dask) == sorted(da.diag(v, k).dask) v = v + v + 3 darr = da.diag(v, k) nparr = np.diag(v, k) assert_eq(darr, nparr) v = da.arange(11, chunks=11) darr = da.diag(v, k) nparr = np.diag(v, k) assert_eq(darr, nparr) assert sorted(da.diag(v, k).dask) == sorted(da.diag(v, k).dask) @pytest.mark.parametrize("k", [0, 3, -3, 8])
test_diag_2d_array_creation
17
87
https://github.com/dask/dask.git
198
dask
Python
1
1
@pytest.mark.parametrize("k", [0, 3, -3, 8])
e3b3259419c21d0d412b9d5f12531ebe5ad6967a
dask/array/tests/test_creation.py
155,850
Add ImageNet-Sketch dataset (#4301) * :sparkles: Add ImageNet-Sketch dataset * :memo: add data splits to dataset card * Update datasets/imagenet_sketch/README.md * :sparkles: labels->label and use HF hosted download link * Apply suggestions from code review Co-authored-by: Mario Šaško <mariosasko777@gmail.com> * :memo: update imagenet_sketch README.md * Use dataset repo data url Co-authored-by: Mario Šaško <mariosasko777@gmail.com>
13
14
def _split_generators(self, dl_manager): data_files = dl_manager.download_and_extract(_URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "files": dl_manager.iter_files([data_files]), }, ), ]
74
imagenet_sketch.py
122
16
def _split_generators(self, dl_manager): data_files = dl_manager.download_and_extract(_URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "files": dl_manager.iter_files([data_files]), }, ), ]
_split_generators
10
16
https://github.com/huggingface/datasets.git
48
datasets
Python
0
1
78941675d6f39c269f9d445121718c6c27c511dc
datasets/imagenet_sketch/imagenet_sketch.py
104,925
support confirming no virus scan on GDrive download (#5645) * support confirming no virus scan on GDrive download * put gen_bar_updater back * Update torchvision/datasets/utils.py Co-authored-by: Nicolas Hug <contact@nicolas-hug.com> Co-authored-by: Nicolas Hug <contact@nicolas-hug.com>
8
9
def gen_bar_updater() -> Callable[[int, int, int], None]: warnings.warn("The function `gen_bar_update`
49
utils.py
28
23
def gen_bar_updater() -> Callable[[int, int, int], None]: warnings.warn("The function `gen_bar_update` is deprecated since 0.13 and will be removed in 0.15.") pbar = tqdm(total=None)
gen_bar_updater
5
23
https://github.com/pytorch/vision.git
35
vision
Python
0
1
96f2c0d47f00371dd066c84f69c34fde07e876c3
torchvision/datasets/utils.py
192,538
Reformat with black
13
12
def test_revert_to_page_revision(self): self.assertEqual(self.events_page.title, "Evenements") response = self.get_response( self.events_page.id, {"revision_id": self.first_revision.id} ) self.assertEqual(response.status_code, 200) self.events_page.get_latest_
130
test_pages.py
76
16
def test_revert_to_page_revision(self): self.assertEqual(self.events_page.title, "Evenements") response = self.get_response( self.events_page.id, {"revision_id": self.first_revision.id} ) self.assertEqual(response.status_code, 200) self.events_page.get_latest_revision().publish() self.events_page.refresh_from_db() self.assertEqual(self.events_page.title, "Events")
test_revert_to_page_revision
9
17
https://github.com/wagtail/wagtail.git
79
wagtail
Python
0
1
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail/admin/tests/api/test_pages.py
71,307
Merge index and mark rates as part of dataload
35
16
def load_bt_data_detail(self) -> None: if self.timeframe_detail: self.detail_data = history.load_data( datadir=self.config['datadir'], pairs=self.pairlists.whitelist, timeframe=self.timeframe_detail, timerange=self.timerange, startup_candles=0, fail_without_data=True, data_format=self.config.get('dataformat_ohlcv', 'json'), candle_type=self.config.get('candle_type_def', CandleType.SPOT) ) else: self.detail_data = {} if self.trading_mode == TradingMode.FUTURES: # Load additional futures data. funding_rates_dict = history.load_data( datadir=self.config['datadir'], pairs=self.pairlists.whitelist, timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'], timerange=self.timerange, startup_candles=0, fail_without_data=True, data_format=self.config.get('dataformat_ohlcv', 'json'), candle_type=CandleType.FUNDING_RATE ) # For simplicity, assign to CandleType.Mark (might contian index candles!) mark_rates_dict = history.load_data( datadir=self.config['datadir'], pairs=self.pairlists.whitelist, timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'], timerange=self.timerange, startup_candles=0, fail_without_data=True, data_format=self.config.get('dataformat_ohlcv', 'json'), candle_type=CandleType.from_string(se
467
backtesting.py
656
63
def load_bt_data_detail(self) -> None: if self.timeframe_detail: self.detail_data = history.load_data( datadir=self.config['datadir'], pairs=self.pairlists.whitelist, timeframe=self.timeframe_detail, timerange=self.timerange, startup_candles=0, fail_without_data=True, data_format=self.config.get('dataformat_ohlcv', 'json'), candle_type=self.config.get('candle_type_def', CandleType.SPOT) ) else: self.detail_data = {} if self.trading_mode == TradingMode.FUTURES: # Load additional futures data. funding_rates_dict = history.load_data( datadir=self.config['datadir'], pairs=self.pairlists.whitelist, timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'], timerange=self.timerange, startup_candles=0, fail_without_data=True, data_format=self.config.get('dataformat_ohlcv', 'json'), candle_type=CandleType.FUNDING_RATE ) # For simplicity, assign to CandleType.Mark (might contian index candles!) mark_rates_dict = history.load_data( datadir=self.config['datadir'], pairs=self.pairlists.whitelist, timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'], timerange=self.timerange, startup_candles=0, fail_without_data=True, data_format=self.config.get('dataformat_ohlcv', 'json'), candle_type=CandleType.from_string(self.exchange._ft_has["mark_ohlcv_price"]) ) # Combine data to avoid combining the data per trade. for pair in self.pairlists.whitelist: self.futures_data[pair] = funding_rates_dict[pair].merge( mark_rates_dict[pair], on='date', how="inner", suffixes=["_fund", "_mark"]) else: self.futures_data = {}
load_bt_data_detail
43
95
https://github.com/freqtrade/freqtrade.git
299
freqtrade
Python
0
4
f26cd191466b792123f3d0b1a18b3b117a23a638
freqtrade/optimize/backtesting.py
148,554
String formatting and max line length - Part 1 (#84390) Co-authored-by: Erik Montnemery <erik@montnemery.com>
12
11
async def async_stop(self) -> None: _LOGGER.warning( "The bond.stop service is deprecated and has been replaced with a button;" " Call the button.press service instead" ) self._async_has_a
80
light.py
86
28
async def async_stop(self) -> None: _LOGGER.warning( "The bond.stop service is deprecated and has been replaced with a button;" " Call the button.press service instead" ) self._async_has_action_or_raise(Action.STOP) await self._hub.bond.action(self._device.device_id, Action(Action.STOP))
async_stop
8
29
https://github.com/home-assistant/core.git
45
core
Python
0
1
b0cee0bc46cbd7efe0e6421da18d91595c7a25ad
homeassistant/components/bond/light.py
297,803
Clean zwave_js platform typing (#72439) * Fix binary sensor * Fix climate * Fix cover * Fix fan * Fix light * Fix lock * Fix number * Fix select * Fix sensor * Add back type ignore until library bump
8
11
def extra_state_attributes(self) -> dict[str, str] | None: if (value := se
61
sensor.py
69
28
def extra_state_attributes(self) -> dict[str, str] | None: if (value := self.info.primary_value.value) is None: return None # add the value's int value as property for multi-value (list) items return {ATTR_VALUE: value}
extra_state_attributes
5
30
https://github.com/home-assistant/core.git
38
core
Python
0
2
6cac1dadeba6cb81285960db1ab6ec6239547cd9
homeassistant/components/zwave_js/sensor.py
301,321
Adjust smartthings cover type hints (#73948)
9
8
def current_cover_position(self) -> int | None:
50
cover.py
47
14
def current_cover_position(self) -> int | None: if not self._attr_supported_features & CoverEntityFeature.SET_POSITION: return None return self._device.status.level
current_cover_position
5
15
https://github.com/home-assistant/core.git
30
core
Python
0
2
3743d42ade80528325d36357ca6f9629d4970eaa
homeassistant/components/smartthings/cover.py
314,506
created backends sub-folder for all backend implementations.
24
11
def test_softmax(x, dtype_str, tensor_fn, dev_str, call): # smoke test x = tensor_fn(x, dtype_str, dev_str) ret = ivy.softmax(x) # type test assert ivy.is_array(ret) # cardinality test assert ret.shape == x.shape # value test assert np.allclose(call(ivy.softmax, x), ivy.backends.numpy.softmax(ivy.to_numpy(x))) # compilation test if not ivy.wrapped_mode(): helpers.assert_compilable(ivy.soft
231
test_activations.py
101
42
def test_softmax(x, dtype_str, tensor_fn, dev_str, call): # smoke test x = tensor_fn(x, dtype_str, dev_str) ret = ivy.softmax(x) # type test assert ivy.is_array(ret) # cardinality test assert ret.shape == x.shape # value test assert np.allclose(call(ivy.softmax, x), ivy.backends.numpy.softmax(ivy.to_numpy(x))) # compilation test if not ivy.wrapped_mode(): helpers.assert_compilable(ivy.softmax) # softplus @pytest.mark.parametrize( "x", [[[-1., 1., 2.]]]) @pytest.mark.parametrize( "dtype_str", ['float32']) @pytest.mark.parametrize( "tensor_fn", [ivy.array, helpers.var_fn])
test_softmax
8
57
https://github.com/unifyai/ivy.git
92
ivy
Python
1
2
@pytest.mark.parametrize( "x", [[[-1., 1., 2.]]]) @pytest.mark.parametrize( "dtype_str", ['float32']) @pytest.mark.parametrize( "tensor_fn", [ivy.array, helpers.var_fn])
b50046a631badcf15ee25b6355a2d2052f6f5bf9
ivy_tests/test_nn/test_functional/test_activations.py
213,207
Fix batching rule for convolution for batch dimensions of size 0.
16
11
def _reshape_axis_into(src, dst, x): # NB: `dst` is the number of the dimension that we should reshape into # *after* `src` is removed from `x`'s list of dimensions. For example, if # `src` is an added batch dimension, `dst` might name a target dimension in # the
110
convolution.py
83
57
def _reshape_axis_into(src, dst, x): # NB: `dst` is the number of the dimension that we should reshape into # *after* `src` is removed from `x`'s list of dimensions. For example, if # `src` is an added batch dimension, `dst` might name a target dimension in # the unbatched list of dimensions. perm = [i for i in range(x.ndim) if i != src] perm.insert(dst, src) new_shape = list(np.delete(x.shape, src)) new_shape[dst] *= x.shape[src] return lax.reshape(x, new_shape, perm)
_reshape_axis_into
6
75
https://github.com/google/jax.git
73
jax
Python
0
3
ece9b999fb5f85eee6570e5f987ad6704c130503
jax/_src/lax/convolution.py
120,722
Bootloader: Building: Unpack waf's lib archive. Doing so makes it easier to modify. This is a temporary measure until the next waf version is released (although I'm tempted to keep it since it's much more IDE completion friendly).
9
13
def force_permissions(self): if getattr(self.generator, 'chm
63
TaskGen.py
44
12
def force_permissions(self): if getattr(self.generator, 'chmod', None): for x in self.outputs: os.chmod(x.abspath(), self.generator.chmod)
force_permissions
4
12
https://github.com/pyinstaller/pyinstaller.git
40
pyinstaller
Python
0
3
64ccb7aea824fbec57f7ed1bbe483ec486183c13
bootloader/waflib/TaskGen.py
263,221
Fix diagnostics export for generic camera (#75665) Fix url redaction and add tests Co-authored-by: Dave T <davet2001@users.noreply.github.com>
5
8
def test_redact_url(url_in, url_out_expected): url_out = redact_url(
31
test_diagnostics.py
19
9
def test_redact_url(url_in, url_out_expected): url_out = redact_url(url_in) assert url_out == url_out_expected
test_redact_url
3
10
https://github.com/home-assistant/core.git
18
core
Python
0
1
7075032bf743f8702d942410c0c41214c90c212b
tests/components/generic/test_diagnostics.py
317,791
Port over tests from npreadtext test suite - Add test for parsing scientific notation. - Add multiple-char comment test. - Port over tests for structured dtypes. - Add tests for exceptions on skiprows/max_rows. - port over ndmin tests. - Make structured data reusable, add unpack tests. - Port over delimiter tests. - Port over maxrows test w/ various dtypes. - Port over test of exception msg on parse failure. - Port over test for converters w/neg indices. - Port over usecols tests - Port over unicode tests. - Port over more converter tests. - Port over test for large rows. - Port over test for string-len discovery. - Port over float conversion accuracy test. - Port over bool test. - Add test for implicit float->int conversion. - Port over complex parsing tests. - Port over tests for reading from generator. - Port over object cleanup test. - Port over bytes incompat test. - Port over converters tests. Co-authored-by: Warren Weckesser <warren.weckesser@gmail.com> Co-authored-by: Sebastian Berg <sebastian@sipsolutions.net>
18
12
def test_loadtxt_converters_negative_indices(): txt = TextIO('1.5,2.5\n3.0,XXX\n5.5,6.0') conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} expected = np.array([[1.5, 2
143
test_io.py
60
33
def test_loadtxt_converters_negative_indices(): txt = TextIO('1.5,2.5\n3.0,XXX\n5.5,6.0') conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} expected = np.array([[1.5, 2.5], [3.0, np.nan], [5.5, 6.0]]) res = np.loadtxt( txt, dtype=np.float64, delimiter=",", converters=conv, encoding=None ) assert_equal(res, expected)
test_loadtxt_converters_negative_indices
8
36
https://github.com/numpy/numpy.git
102
numpy
Python
0
2
66a61b03658f3c9f312505dcf7eab07e4cf91ac6
numpy/lib/tests/test_io.py
159,774
[Autoscaler][Local Node Provider] Log a warning if max_workers < len(worker_ips) (#24635) Logs a warning when a user sets max_workers for local node provider less than the number of available ips. Also removes defaults of 0 for min_workers and max_workers from example configs to help prevent users inadvertantly setting max_workers=0 again.
35
12
def testValidateLocal(self): local_config_path = os.path.join( RAY_PATH, "autoscaler/local/example-minimal-manual.yaml" ) base_config = yaml.safe_load(open(local_config_path).read()) base_config["provider"]["head_ip"] = "xxx.yyy" base_config["provider"]["worker_ips"] = ["aaa.bbb", "ccc.ddd", "eee.fff"] base_config["
597
test_autoscaler_yaml.py
750
138
def testValidateLocal(self): local_config_path = os.path.join( RAY_PATH, "autoscaler/local/example-minimal-manual.yaml" ) base_config = yaml.safe_load(open(local_config_path).read()) base_config["provider"]["head_ip"] = "xxx.yyy" base_config["provider"]["worker_ips"] = ["aaa.bbb", "ccc.ddd", "eee.fff"] base_config["auth"]["ssh_user"] = "user" base_config["auth"]["ssh_private_key"] = "~/.ssh/id_rsa" test_prepare_config = copy.deepcopy(base_config) prepared_config = prepare_config(test_prepare_config) try: validate_config(prepared_config) except Exception: self.fail("Failed to validate local/example-minimal-manual.yaml") expected_prepared = yaml.safe_load(EXPECTED_LOCAL_CONFIG_STR) assert prepared_config == expected_prepared no_worker_config = copy.deepcopy(base_config) del no_worker_config["provider"]["worker_ips"] with pytest.raises(ClickException): prepare_config(no_worker_config) no_head_config = copy.deepcopy(base_config) del no_head_config["provider"]["head_ip"] with pytest.raises(ClickException): prepare_config(no_head_config) for field in "head_node", "worker_nodes", "available_node_types": faulty_config = copy.deepcopy(base_config) faulty_config[field] = "This field shouldn't be in here." with pytest.raises(ClickException): prepare_config(faulty_config) too_many_workers_config = copy.deepcopy(base_config) # More workers requested than the three available ips. too_many_workers_config["max_workers"] = 10 too_many_workers_config["min_workers"] = 10 prepared_config = prepare_config(too_many_workers_config) # Check that worker config numbers were clipped to 3. assert prepared_config == expected_prepared not_enough_workers_config = copy.deepcopy(base_config) # Max workers is less than than the three available ips. # The user is probably has probably made an error. Make sure we log a warning. not_enough_workers_config["max_workers"] = 0 not_enough_workers_config["min_workers"] = 0 with mock.patch( "ray.autoscaler._private.local.config.cli_logger.warning" ) as warning: prepared_config = prepare_config(not_enough_workers_config) warning.assert_called_with( "The value of `max_workers` supplied (0) is less" " than the number of available worker ips (3)." " At most 0 Ray worker nodes will connect to the cluster." ) expected_prepared = yaml.safe_load(EXPECTED_LOCAL_CONFIG_STR) # We logged a warning. # However, prepare_config does not repair the strange config setting: expected_prepared["max_workers"] = 0 expected_prepared["available_node_types"]["local.cluster.node"][ "max_workers" ] = 0 expected_prepared["available_node_types"]["local.cluster.node"][ "min_workers" ] = 0 assert prepared_config == expected_prepared
testValidateLocal
56
228
https://github.com/ray-project/ray.git
323
ray
Python
0
3
29eebdfef2acb7d278042f38247a7d82473c3fd6
python/ray/tests/test_autoscaler_yaml.py
139,341
Reformat with black
11
10
def get_request_and_django_site(self, url): request = RequestFactory().get(url) request.META["HTTP_HOST"] = self.site.hostname request.META["SERVER_PORT"] = self.site.port return request, get_current_site(request)
79
tests.py
42
13
def get_request_and_django_site(self, url): request = RequestFactory().get(url) request.META["HTTP_HOST"] = self.site.hostname request.META["SERVER_PORT"] = self.site.port return request, get_current_site(request)
get_request_and_django_site
5
15
https://github.com/wagtail/wagtail.git
48
wagtail
Python
0
1
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail/contrib/sitemaps/tests.py
73,564
Fix lax imports
12
10
def dot_with_no_batch_dims(prim, *_, **params) -> bool: # This is a useful heuristic for transformers. if prim is lax_inter
89
ad_checkpoint.py
49
32
def dot_with_no_batch_dims(prim, *_, **params) -> bool: # This is a useful heuristic for transformers. if prim is lax_internal.dot_general_p: (_, _), (lhs_b, rhs_b) = params['dimension_numbers'] if not lhs_b and not rhs_b: return True return False name_p = core.Primitive('name')
dot_with_no_batch_dims
6
37
https://github.com/google/jax.git
47
jax
Python
0
4
1d895b2c85e17b9f563cd41d9a340528179d29aa
jax/_src/ad_checkpoint.py
122,094
[Datasets] Provide more efficient + intuitive block clearing semantics for different execution modes (#24127) **TL;DR:** Don't clear for eager, clear all but non-lazy input blocks if lazy, clear everything if pipelining. This PR provides more efficient and intuitive block clearing semantics for eager mode, lazy mode, and pipelining, while still supporting multiple operations applied to the same base dataset, i.e. fan-out. For example, two different map operations are applied to the same base `ds` in this example: ```python ds = ray.data.range(10).map(lambda x: x+1) ds1 = ds.map(lambda x: 2*x) ds2 = ds.map(lambda x: 3*x) ``` If naively clear the blocks when executing the map to produce `ds1`, the map producing `ds2` will fail. ### Desired Semantics - **Eager mode** - don’t clear input blocks, thereby supporting fan-out from cached data at any point in the stage chain without triggering unexpected recomputation. - **Lazy mode** - if lazy datasource, clear the input blocks for every stage, relying on recomputing via stage lineage if fan-out occurs; if non-lazy datasource, do not clear source blocks for execution plan when executing first stage, but do clear input blocks for every subsequent stage. - **Pipelines** - Same as lazy mode, although the only fan-out that can occur is from the pipeline source blocks when repeating a dataset/pipeline, so unintended intermediate recomputation will never happen.
4
11
def _check_if_cleared(self) -> None: if self.is_cleared(): raise ValueError(
42
block_list.py
92
26
def _check_if_cleared(self) -> None: if self.is_cleared(): raise ValueError( "This Dataset's blocks have been moved, which means that you " "can no longer use this Dataset." )
_check_if_cleared
7
26
https://github.com/ray-project/ray.git
21
ray
Python
0
2
f72555262afbbfc1aabb87c9e40839aaaee3ba0b
python/ray/data/impl/block_list.py
139,015
Implement {Series,DataFrame}GroupBy `fillna` methods (#8869) Co-authored-by: Ian Rose <ian.r.rose@gmail.com>
17
13
def test_bfill(): df = pd.DataFrame( { "A": [1, 1, 2, 2], "B": [3, 4, 3, 4], "C": [np.nan, 3, np.nan, np.nan], "D": [np.nan, 4, np.nan, 5], "E": [np.nan, 6, np.nan, 7], } ) ddf = dd.from_pandas(df, npartit
411
test_groupby.py
257
54
def test_bfill(): df = pd.DataFrame( { "A": [1, 1, 2, 2], "B": [3, 4, 3, 4], "C": [np.nan, 3, np.nan, np.nan], "D": [np.nan, 4, np.nan, 5], "E": [np.nan, 6, np.nan, 7], } ) ddf = dd.from_pandas(df, npartitions=2) assert_eq( df.groupby("A").bfill(), ddf.groupby("A").bfill(), ) assert_eq( df.groupby("A").B.bfill(), ddf.groupby("A").B.bfill(), ) assert_eq( df.groupby(["A", "B"]).bfill(), ddf.groupby(["A", "B"]).bfill(), ) @pytest.mark.parametrize( "grouper", [ lambda df: ["a"], lambda df: ["a", "b"], lambda df: df["a"], lambda df: [df["a"], df["b"]], lambda df: [df["a"] > 2, df["b"] > 1], ], )
test_bfill
23
78
https://github.com/dask/dask.git
186
dask
Python
1
1
@pytest.mark.parametrize( "grouper", [ lambda df: ["a"], lambda df: ["a", "b"], lambda df: df["a"], lambda df: [df["a"], df["b"]], lambda df: [df["a"] > 2, df["b"] > 1], ], )
5fbda77cfc5bc1b8f1453a2dbb034b048fc10726
dask/dataframe/tests/test_groupby.py
156,539
test: split demo.py into seperate files and functions
24
14
def plot_line_stackedarea(viz, env): Y = np.linspace(0, 4, 200)
171
plot_line.py
209
41
def plot_line_stackedarea(viz, env): Y = np.linspace(0, 4, 200) return viz.line( Y=np.column_stack((np.sqrt(Y), np.sqrt(Y) + 2)), X=np.column_stack((Y, Y)), opts=dict( fillarea=True, showlegend=False, width=800, height=800, xlabel='Time', ylabel='Volume', ytype='log', title='Stacked area plot', marginleft=30, marginright=30, marginbottom=80, margintop=30, ), ) # Assure that the stacked area plot isn't giant
plot_line_stackedarea
20
42
https://github.com/fossasia/visdom.git
117
visdom
Python
0
1
b4115c0337b1bacc876bef1ece97e8fa8b3e2834
example/components/plot_line.py
106,625
upd; format
4
6
def identify(self, requirement_or_candidate): # type: (Union[Requirement, Candidate]) -> str return requirement_or_candidate.na
18
provider.py
24
11
def identify(self, requirement_or_candidate): # type: (Union[Requirement, Candidate]) -> str return requirement_or_candidate.name
identify
2
11
https://github.com/jindongwang/transferlearning.git
11
transferlearning
Python
0
1
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
.venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/provider.py
61,123
Balanced joint maximum mean discrepancy for deep transfer learning
8
12
def CleanseComments(line): comment
88
cpp_lint.py
33
23
def CleanseComments(line): commentpos = line.find('//') if commentpos != -1 and not IsCppString(line[:commentpos]): line = line[:commentpos].rstrip() # get rid of /* ... */ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
CleanseComments
5
25
https://github.com/jindongwang/transferlearning.git
50
transferlearning
Python
0
3
cc4d0564756ca067516f71718a3d135996525909
code/deep/BJMMD/caffe/scripts/cpp_lint.py
60,419
upd; format
10
11
def from_wheel(cls, path, name): # type: (str, str) -> Distribution with zipfile.ZipFile(path, allowZip64=True) as zf: d
60
pkg_resources.py
53
22
def from_wheel(cls, path, name): # type: (str, str) -> Distribution with zipfile.ZipFile(path, allowZip64=True) as zf: dist = pkg_resources_distribution_for_wheel(zf, name, path) return cls(dist)
from_wheel
4
22
https://github.com/jindongwang/transferlearning.git
38
transferlearning
Python
0
1
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
.venv/lib/python3.8/site-packages/pip/_internal/metadata/pkg_resources.py
60,801
Have consistent types between the ORM and the migration files (#24044) We currently don't compare column types between ORM and the migration files. Some columns in the migration files have different types from the same columns in the ORM. Here, I made effort to match the types in migration files with the types in ORM, using the migration files as the source of truth in most cases. I couldn't convert the MySQL VARCHAR collation in db(utf8_bin) to use the one in ORM(utf8mb3_bin). It seems it's not possible to convert a collation of an already existing column in MySQL.
11
11
def load_dialect_impl(self, dialect): if dialect.name == 'mssq
80
sqlalchemy.py
59
13
def load_dialect_impl(self, dialect): if dialect.name == 'mssql': return mssql.DATETIME2(precision=6) elif dialect.name == 'mysql': return mysql.TIMESTAMP(fsp=6) return super().load_dialect_impl(dialect)
load_dialect_impl
6
17
https://github.com/apache/airflow.git
48
airflow
Python
0
3
25537acfa28eebc82a90274840e0e6fb5c91e271
airflow/utils/sqlalchemy.py
43,497
ref(models): `ActivityType` (#34978) ## Objective: We want to separate enum logic from Model logic. This breaks a lot of circular dependencies.
50
16
def test_activity_generation_long_release(self): user = self.create_user(is_staff=False, is_superuser=False) org = self.organization org.flags.allow_joinleave = False org.save() team = self.create_team(organization=org) project = self.create_project(teams=[team], organization=org) release = Release.objects.create(organization_id=org.id, version="x" * 65) release.add_project(project) self.create_member(teams=[team], user=user, organization=org) self.login_as(user=user) url = reverse( "sentry-api-0-organization-release-details", kwargs={"organization_slug": org.slug, "version": release.version}, ) response = self.client.put(url, data={"dateRel
370
test_organization_release_details.py
232
53
def test_activity_generation_long_release(self): user = self.create_user(is_staff=False, is_superuser=False) org = self.organization org.flags.allow_joinleave = False org.save() team = self.create_team(organization=org) project = self.create_project(teams=[team], organization=org) release = Release.objects.create(organization_id=org.id, version="x" * 65) release.add_project(project) self.create_member(teams=[team], user=user, organization=org) self.login_as(user=user) url = reverse( "sentry-api-0-organization-release-details", kwargs={"organization_slug": org.slug, "version": release.version}, ) response = self.client.put(url, data={"dateReleased": datetime.utcnow().isoformat() + "Z"}) assert response.status_code == 200, (response.status_code, response.content) release = Release.objects.get(id=release.id) assert release.date_released activity = Activity.objects.filter( type=ActivityType.RELEASE.value, project=project, ident=release.version[:64] ) assert activity.exists()
test_activity_generation_long_release
23
67
https://github.com/getsentry/sentry.git
235
sentry
Python
0
1
b9f5a910dc841b85f58d46266ec049ae5a7fd305
tests/sentry/api/endpoints/test_organization_release_details.py
90,873
Upgrading to support latest Pytorch version
6
8
def split_by_list(self, train, valid): "Split the data between `train` and `val
35
data_block.py
28
14
def split_by_list(self, train, valid): "Split the data between `train` and `valid`." return self._split(self.path, train, valid)
split_by_list
3
15
https://github.com/jantic/DeOldify.git
23
DeOldify
Python
0
1
4fc3616712edb19179b17dd270ad6cf63abf99c2
fastai/data_block.py
190,250
fix: fix endpoint discovery tries (#5014)
15
8
def test_grpc_gateway_runtime_lazy_request_access(linear_graph_dict, monkeypatch): call_counts = multiprocessing.Queue() monkeypatch.setattr( networking.GrpcConnectionPool, 'send_requests_once', DummyNoDocAccessMockConnectionPool.send_requests_once, ) monkeypatch.setattr( networking.GrpcConnectionPool, 'send
78
test_clients_gateways.py
78
15
def test_grpc_gateway_runtime_lazy_request_access(linear_graph_dict, monkeypatch): call_counts = multiprocessing.Queue() monkeypatch.setattr( networking.GrpcConnectionPool, 'send_requests_once', DummyNoDocAccessMockConnectionPool.send_requests_once, ) monkeypatch.setattr( networking.GrpcConnectionPool, 'send_discover_endpoint', DummyMockConnectionPool.send_discover_endpoint, ) port = random_port()
test_grpc_gateway_runtime_lazy_request_access
40
19
https://github.com/jina-ai/jina.git
183
jina
Python
0
4
6f5b3f2a9b13c2eae78b746531132cbfcdc8c2da
tests/integration/gateway_clients/test_clients_gateways.py
12,744
Comprehensive configs for trainer and combiner. (#2118)
8
11
def _perform_sanity_checks(config): assert "input_features" in config, "config does not define any input features" assert "output_features" in config, "config does not define any output features" assert isinstance(config["input_features"], list), ( "Ludwig expects input features in a list. Check your model " "config format" ) assert isinstance(config["output_features"], list), ( "Ludwig expects output features in a list. Check your model " "config format" ) assert len(config["input_fe
241
defaults.py
384
78
def _perform_sanity_checks(config): assert "input_features" in config, "config does not define any input features" assert "output_features" in config, "config does not define any output features" assert isinstance(config["input_features"], list), ( "Ludwig expects input features in a list. Check your model " "config format" ) assert isinstance(config["output_features"], list), ( "Ludwig expects output features in a list. Check your model " "config format" ) assert len(config["input_features"]) > 0, "config needs to have at least one input feature" assert len(config["output_features"]) > 0, "config needs to have at least one output feature" if TRAINER in config: assert isinstance(config[TRAINER], dict), ( "There is an issue while reading the training section of the " "config. The parameters are expected to be" "read as a dictionary. Please check your config format." ) if "preprocessing" in config: assert isinstance(config["preprocessing"], dict), ( "There is an issue while reading the preprocessing section of the " "config. The parameters are expected to be read" "as a dictionary. Please check your config format." ) if COMBINER in config: assert isinstance(config[COMBINER], dict), ( "There is an issue while reading the combiner section of the " "config. The parameters are expected to be read" "as a dictionary. Please check your config format." )
_perform_sanity_checks
29
197
https://github.com/ludwig-ai/ludwig.git
134
ludwig
Python
0
4
ae25cc4c5a229bbc44339249e1f94bf256f18317
ludwig/utils/defaults.py
6,983
Avoid deprecated TestCase functions in unit tests. (#76678) * Avoid deprecated TestCase functions in unit tests. * Add assertRaisesRegex for Python 2.7. * Fix indentation.
12
12
def test_host_label(self):
63
test_callback.py
22
9
def test_host_label(self): result = TaskResult(host=Host('host1'), task=mock_task, return_data={}) self.assertEqual(CallbackBase.host_label(result), 'host1')
test_host_label
3
9
https://github.com/ansible/ansible.git
38
ansible
Python
0
1
97104f1221b64ef36cf42cb90c5a0eff263a2adb
test/units/plugins/callback/test_callback.py
266,388
Remove pyarrow-legacy engine from parquet API (#8835) * remove pyarrow-legacy * Small fixup * Small fixup for pyarrow < 5 Co-authored-by: Jim Crist-Harif <jcristharif@gmail.com>
40
14
def test_writing_parquet_with_kwargs(tmpdir, engine): fn = str(tmpdir) path1 = os.path.join(fn, "normal") path2 = os.path.join(fn, "partitioned") df = pd.DataFrame( { "a": np.random.choice(["A", "B", "C"], size=100), "b": np.random.random(size=100), "c": np.random.randint(1, 5, size=100), } ) df.index.name = "index" ddf = dd.from_pandas(df, npartitions=3) engine_kwargs = { "pyarrow": { "compression": "snappy", "coerce_timestamps": None, "use_dictionary": True, }, "fastparquet": {"compression": "snappy", "times": "int64", "fixed_text": None}, } ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine
465
test_parquet.py
285
84
def test_writing_parquet_with_kwargs(tmpdir, engine): fn = str(tmpdir) path1 = os.path.join(fn, "normal") path2 = os.path.join(fn, "partitioned") df = pd.DataFrame( { "a": np.random.choice(["A", "B", "C"], size=100), "b": np.random.random(size=100), "c": np.random.randint(1, 5, size=100), } ) df.index.name = "index" ddf = dd.from_pandas(df, npartitions=3) engine_kwargs = { "pyarrow": { "compression": "snappy", "coerce_timestamps": None, "use_dictionary": True, }, "fastparquet": {"compression": "snappy", "times": "int64", "fixed_text": None}, } ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine]) out = dd.read_parquet(path1, engine=engine) assert_eq(out, ddf, check_index=(engine != "fastparquet")) # Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets with dask.config.set(scheduler="sync"): ddf.to_parquet( path2, engine=engine, partition_on=["a"], **engine_kwargs[engine] ) out = dd.read_parquet(path2, engine=engine).compute() for val in df.a.unique(): assert set(df.b[df.a == val]) == set(out.b[out.a == val])
test_writing_parquet_with_kwargs
31
105
https://github.com/dask/dask.git
284
dask
Python
0
2
0b36d7fcaf54ee9a78fff4b07f124cb0c8741cdf
dask/dataframe/io/tests/test_parquet.py
156,242
GraphQL subscription support for synchronous webhook events (#9763) * WIP add sync webhooks subscription payload handling * add tests, fix minor things * update schema * remove unneeded code * add fix for circular field resolve * fix-filter-shipping-methods-payload * added_in added to desription * add missing types * revert refactor, precommit issues * fixes after review * cosmetix fixes post-review * subscription types description fixes * remove unneeded description from PaymentBase * add validation for creating webhook with two top level fields, add tests for shippingListMethodsForCheckout * add docstring, refactor prevent_sync_event_circular_wuery wrapper * fix docstring of revent_sync_event_circular_query * fix linters
7
8
def subscription_order_fulfilled_webhook(subscription_webhook): return subscription_webhook( queries.ORDER_FULFILLED, WebhookEventAsyncType.ORDER_FULFILLED ) @pytest.fixture
32
fixtures.py
19
8
def subscription_order_fulfilled_webhook(subscription_webhook): return subscription_webhook( queries.ORDER_FULFILLED, WebhookEventAsyncType.ORDER_FULFILLED ) @pytest.fixture
subscription_order_fulfilled_webhook
4
8
https://github.com/saleor/saleor.git
16
saleor
Python
1
1
@pytest.fixture
8201efcde2d7aacccf3512c544cceea6780a0598
saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py
28,271
add data subject and data subject shape serialization to GammaTensor
28
11
def _object2bytes(self) -> bytes: schema = get_capnp_schema(schema_file="phi_tensor.capnp") pt_struct: CapnpModule = schema.PT # type: ignore pt_msg = pt_struct.new_message() # this is how we dispatch correct deserialization of bytes pt_msg.magicHeader = serde_magic_header(type(self)) # We always have FPT as the child of an PT in the tensor chain. chunk_bytes(serialize(self.child, to_bytes=True), "child", pt_msg) # type: ignore pt_msg.minVals = serialize(self.min_vals, to_bytes=True) pt_msg.maxVals = serialize(self.max_vals, to_bytes=True) pt_msg.dataSubjects = serialize( dslarraytonumpyutf8(self.da
202
phi_tensor.py
200
63
def _object2bytes(self) -> bytes: schema = get_capnp_schema(schema_file="phi_tensor.capnp") pt_struct: CapnpModule = schema.PT # type: ignore pt_msg = pt_struct.new_message() # this is how we dispatch correct deserialization of bytes pt_msg.magicHeader = serde_magic_header(type(self)) # We always have FPT as the child of an PT in the tensor chain. chunk_bytes(serialize(self.child, to_bytes=True), "child", pt_msg) # type: ignore pt_msg.minVals = serialize(self.min_vals, to_bytes=True) pt_msg.maxVals = serialize(self.max_vals, to_bytes=True) pt_msg.dataSubjects = serialize( dslarraytonumpyutf8(self.data_subjects), to_bytes=True ) pt_msg.dataSubjectsShape = serialize(self.data_subjects.shape, to_bytes=True) # to pack or not to pack? # to_bytes = pt_msg.to_bytes() return pt_msg.to_bytes_packed()
_object2bytes
13
83
https://github.com/OpenMined/PySyft.git
124
PySyft
Python
0
1
a81b66ea18721dc36c77aefac733dd224f48cc87
packages/syft/src/syft/core/tensor/autodp/phi_tensor.py
1,804
Use ColorMode enum in tplink (#70542)
15
10
def supported_color_modes(self) -> set[ColorMode | str] | None: modes: set[ColorMode | str] = set() if self.device.is_variable_color_temp: modes.add(ColorMode.COLOR_TEMP) if self.device.is_color: m
140
light.py
122
21
def supported_color_modes(self) -> set[ColorMode | str] | None: modes: set[ColorMode | str] = set() if self.device.is_variable_color_temp: modes.add(ColorMode.COLOR_TEMP) if self.device.is_color: modes.add(ColorMode.HS) if self.device.is_dimmable: modes.add(ColorMode.BRIGHTNESS) if not modes: modes.add(ColorMode.ONOFF) return modes
supported_color_modes
12
29
https://github.com/home-assistant/core.git
86
core
Python
0
5
121d2008c2e98c94775f0379ccd4eedc15476d7d
homeassistant/components/tplink/light.py
298,501
Support DataLoader with num_workers > 0 in streaming mode (#4375) * make TorchIterableDataset work in parallel - make it picklable - paralellize over the shards when num_workers is passed * start writing some tests * fix streaming extension and fsspec issues in subprocesses * fix some tests * fix more tests * fix import * fix and add tests * fix patch (handle successive patches and builtins) * revert unnecessary change to enriched_web_blg * style * use open locally to fix win permission errors * keep file opened in read_csv * fix compression for read_csv * consistency of read_csv: don't infer compression for file-like objects * stringify Path objects * comments + raise error if sharding is ambiguous * minor * Update src/datasets/iterable_dataset.py Co-authored-by: Mario Šaško <mariosasko777@gmail.com> Co-authored-by: Mario Šaško <mariosasko777@gmail.com>
6
10
def test_patch_submodule_missing_builtin(): # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point mock = "__test
71
test_patching.py
79
39
def test_patch_submodule_missing_builtin(): # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point mock = "__test_patch_submodule_missing_builtin_mock__" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching, "len", None) is None with patch_submodule(_test_patching, "len", mock): assert _test_patching.len is mock assert _test_patching.len is len
test_patch_submodule_missing_builtin
6
52
https://github.com/huggingface/datasets.git
40
datasets
Python
0
1
ab7d3045ac9154e9c1c2602d0869130defdc6dc7
tests/test_patching.py
105,154
Updating some names (#1575) * quick econ fix * black * keys and feature flags * terminal name :eyes: * some more replacements * some more replacements * edit pyproject * gst -> openbb * add example portfolios back to git * Update api from gst * sorry. skipping some tests * another round of names * another round of test edits * Missed some .gst refs and update timezone * water mark stuff * Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS * fix more GST to OpenBB Terminal * Logging : merge conflicts with main * Revert wrong files Co-authored-by: Andrew <andrew.kenreich@gmail.com> Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
17
11
def test_call_func_no_parser(func, mocker): mocker.patch( "openbb_terminal.stocks.fundamental_analysis.market_watch_view.parse_known_args_and_warn", return_value=None, ) func_result = getattr(market_watch_view, func)(other_args=list(), ticker="TSLA") assert func_result is None getattr(market_watch_view, "parse_known_args_and_warn").assert_called_once() @pytest.mark.vcr @pytest.mark.record_stdout @pytest.mark.parametrize(
160
test_market_watch_view.py
91
28
def test_call_func_no_parser(func, mocker): mocker.patch( "openbb_terminal.stocks.fundamental_analysis.market_watch_view.parse_known_args_and_warn", return_value=None, ) func_result = getattr(market_watch_view, func)(other_args=list(), ticker="TSLA") assert func_result is None getattr(market_watch_view, "parse_known_args_and_warn").assert_called_once() @pytest.mark.vcr @pytest.mark.record_stdout @pytest.mark.parametrize( "func", [ "income", "balance", "cash", ], ) @pytest.mark.parametrize( "use_color", [True, False], )
test_call_func_no_parser
8
33
https://github.com/OpenBB-finance/OpenBBTerminal.git
51
OpenBBTerminal
Python
1
1
@pytest.mark.vcr @pytest.mark.record_stdout @pytest.mark.parametrize( "func", [ "income", "balance", "cash", ], ) @pytest.mark.parametrize( "use_color", [True, False], )
b71abcfbf4d7e8ac1855522aff0378e13c8b5362
tests/openbb_terminal/stocks/fundamental_analysis/test_market_watch_view.py
283,567
Reformat with black
14
15
def test_all_nested_fields(self): response = self.get_response( type="demosite.BlogEntryPage", fields="feed_image(*)" ) content = json.loads(response.content.decode("UTF-8")) for page in content["items"]: self.assertEqual( set(page["feed_image"].keys()),
129
test_pages.py
114
22
def test_all_nested_fields(self): response = self.get_response( type="demosite.BlogEntryPage", fields="feed_image(*)" ) content = json.loads(response.content.decode("UTF-8")) for page in content["items"]: self.assertEqual( set(page["feed_image"].keys()), {"id", "meta", "title", "width", "height", "thumbnail"}, )
test_all_nested_fields
10
24
https://github.com/wagtail/wagtail.git
73
wagtail
Python
0
2
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail/admin/tests/api/test_pages.py
71,347
fix(sessions): Order results by timestamp and log error if snuba limit exceeded (#31214) As described in https://getsentry.atlassian.net/browse/ISSUE-1372, gaps occur in sessions_v2 time series when the number of releases is large. This seems to be caused by the fact that snuba applies a default limit of 1000. The sessions API queries these series without an orderBy constraint, so a random subset of entries default to zero. This PR logs an error if this limit is actually reached. Furthermore, we add an order by clause to the snuba query, such that at least the most recent part of the time series is complete.
10
14
def test_massage_simple_timeseries(): query = _make_query("statsPeriod=1d&interval=6h&field=sum(session)") result_totals = [{"sessions": 4}] # snuba returns the datetimes as strings for now result_timeseries = [ {"sessions": 2, "bucketed_started": "2020-12-18T06:00:00+00:00"}, {"sessions": 2, "bucketed_started": "2020-12-17T12:00:00+00:00"}, ] expected_result = { "start": "2020-12-17T12:00:00Z", "end": "2020-12-18T11:15:00Z", "query": "", "intervals": [ "2020-12-17T12:00:00Z", "2020-12-1
240
test_sessions_v2.py
218
56
def test_massage_simple_timeseries(): query = _make_query("statsPeriod=1d&interval=6h&field=sum(session)") result_totals = [{"sessions": 4}] # snuba returns the datetimes as strings for now result_timeseries = [ {"sessions": 2, "bucketed_started": "2020-12-18T06:00:00+00:00"}, {"sessions": 2, "bucketed_started": "2020-12-17T12:00:00+00:00"}, ] expected_result = { "start": "2020-12-17T12:00:00Z", "end": "2020-12-18T11:15:00Z", "query": "", "intervals": [ "2020-12-17T12:00:00Z", "2020-12-17T18:00:00Z", "2020-12-18T00:00:00Z", "2020-12-18T06:00:00Z", ], "groups": [ {"by": {}, "series": {"sum(session)": [2, 0, 0, 2]}, "totals": {"sum(session)": 4}} ], } actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries)) assert actual_result == expected_result @freeze_time("2020-12-18T11:14:17.105Z")
test_massage_simple_timeseries
23
71
https://github.com/getsentry/sentry.git
125
sentry
Python
1
1
@freeze_time("2020-12-18T11:14:17.105Z")
7fbf708470ba13992a5d53b088be2388a8ed93df
tests/snuba/sessions/test_sessions_v2.py
95,790
FEA Fused sparse-dense support for `PairwiseDistancesReduction` (#23585) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Christian Lorentzen <lorentzen.ch@gmail.com> Co-authored-by: Jérémie du Boisberranger <jeremiedbb@users.noreply.github.com> Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Meekail Zain <Micky774@users.noreply.github.com>
23
11
def test_pairwise_distances_reduction_is_usable_for(): rng = np.random.RandomState(0) X = rng.rand(100, 10) Y = rng.rand(100, 10) X_csr = csr_matrix(X) Y_csr = csr_matrix(Y) metric = "manhattan" # Must be usable for all possible pair of {dense, sparse} datasets assert BaseDistanceReductionDispatcher.is_usable_for(X, Y, metric) assert BaseDistanceReductionDispatcher.is_usable_for(X_csr, Y_csr, metric) assert BaseDistanceReductionDispatcher.is_usable_for(X_csr, Y, metric) assert BaseDistanceReductionDispatcher.is_usable_for(X, Y_csr, metric) assert BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.float64), Y.astype(np.float64), metric ) assert BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.float32), Y.astype(np.float32), metric ) assert not BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.int64), Y.astype(np.int64), metric ) assert not BaseDistanceReductionDispatcher.is_usable_for(X, Y, metric="pyfunc") assert not BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.float32), Y, metric ) assert not BaseDistanceReductionDispatcher.is_usable_for( X, Y.astype(np.int32), metric ) # F-ordered arrays are not supported assert not BaseDistanceReductionDispatcher.is_usable_for( np.asfortranarray(X), Y, metric ) # We prefer not to use those implementations for fused sparse-dense when # metric="(sq)euclidean" because it's not yet the most efficient o
494
test_pairwise_distances_reduction.py
429
114
def test_pairwise_distances_reduction_is_usable_for(): rng = np.random.RandomState(0) X = rng.rand(100, 10) Y = rng.rand(100, 10) X_csr = csr_matrix(X) Y_csr = csr_matrix(Y) metric = "manhattan" # Must be usable for all possible pair of {dense, sparse} datasets assert BaseDistanceReductionDispatcher.is_usable_for(X, Y, metric) assert BaseDistanceReductionDispatcher.is_usable_for(X_csr, Y_csr, metric) assert BaseDistanceReductionDispatcher.is_usable_for(X_csr, Y, metric) assert BaseDistanceReductionDispatcher.is_usable_for(X, Y_csr, metric) assert BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.float64), Y.astype(np.float64), metric ) assert BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.float32), Y.astype(np.float32), metric ) assert not BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.int64), Y.astype(np.int64), metric ) assert not BaseDistanceReductionDispatcher.is_usable_for(X, Y, metric="pyfunc") assert not BaseDistanceReductionDispatcher.is_usable_for( X.astype(np.float32), Y, metric ) assert not BaseDistanceReductionDispatcher.is_usable_for( X, Y.astype(np.int32), metric ) # F-ordered arrays are not supported assert not BaseDistanceReductionDispatcher.is_usable_for( np.asfortranarray(X), Y, metric ) # We prefer not to use those implementations for fused sparse-dense when # metric="(sq)euclidean" because it's not yet the most efficient one on # all configurations of datasets. # See: https://github.com/scikit-learn/scikit-learn/pull/23585#issuecomment-1247996669 # noqa # TODO: implement specialisation for (sq)euclidean on fused sparse-dense # using sparse-dense routines for matrix-vector multiplications. assert not BaseDistanceReductionDispatcher.is_usable_for( X_csr, Y, metric="euclidean" ) assert not BaseDistanceReductionDispatcher.is_usable_for( X_csr, Y_csr, metric="sqeuclidean" ) # CSR matrices without non-zeros elements aren't currently supported # TODO: support CSR matrices without non-zeros elements X_csr_0_nnz = csr_matrix(X * 0) assert not BaseDistanceReductionDispatcher.is_usable_for(X_csr_0_nnz, Y, metric) # CSR matrices with int64 indices and indptr (e.g. large nnz, or large n_features) # aren't supported as of now. # See: https://github.com/scikit-learn/scikit-learn/issues/23653 # TODO: support CSR matrices with int64 indices and indptr X_csr_int64 = csr_matrix(X) X_csr_int64.indices = X_csr_int64.indices.astype(np.int64) assert not BaseDistanceReductionDispatcher.is_usable_for(X_csr_int64, Y, metric)
test_pairwise_distances_reduction_is_usable_for
41
235
https://github.com/scikit-learn/scikit-learn.git
318
scikit-learn
Python
0
1
60cc5b596f38d0d236dab34e02c05d98b5a72bad
sklearn/metrics/tests/test_pairwise_distances_reduction.py
260,984
Adding ArangoDB Provider (#22548) * Adding ArangoDB Provider
7
11
def create_database(self, name): if not self.db_conn.has_database(name): self.db_conn.create_database(name)
69
arangodb.py
74
16
def create_database(self, name): if not self.db_conn.has_database(name): self.db_conn.create_database(name) return True else: self.log.info('Database already exists: %s', name) return False
create_database
7
17
https://github.com/apache/airflow.git
42
airflow
Python
0
2
c758c76ac336c054fd17d4b878378aa893b7a979
airflow/providers/arangodb/hooks/arangodb.py
46,738
refactor: Add exception handling in background job within BOM Update Tool
16
14
def replace_bom(args): try: frappe.db.auto_commit_on_many_writes = 1 args = frappe._dict(args) doc = frappe.get_doc("BOM Update Tool") doc.current_bom
135
bom_update_tool.py
19
27
def replace_bom(args): try: frappe.db.auto_commit_on_many_writes = 1 args = frappe._dict(args) doc = frappe.get_doc("BOM Update Tool") doc.current_bom = args.current_bom doc.new_bom = args.new_bom doc.replace_bom() except Exception: frappe.log_error( msg=frappe.get_traceback(), title=_("BOM Update Tool Error") ) finally: frappe.db.auto_commit_on_many_writes = 0
replace_bom
15
34
https://github.com/frappe/erpnext.git
80
erpnext
Python
0
3
f57725f8fa016b9826e8fdf2f14dbf1a3d9991f7
erpnext/manufacturing/doctype/bom_update_tool/bom_update_tool.py
64,563
[Feature] Support DDOD: Disentangle Your Dense Object Detector(ACM MM2021 oral) (#7279) * add ddod feature * add ddod feature * modify new * [Feature] modify ddod code0225 * [Feature] modify ddod code0226 * [Feature] modify ddod code0228 * [Feature] modify ddod code0228#7279 * [Feature] modify ddod code0301 * [Feature] modify ddod code0301 test draft * [Feature] modify ddod code0301 test * [Feature] modify ddod code0301 extra * [Feature] modify ddod code0301 delete src/mmtrack * [Feature] modify ddod code0302 * [Feature] modify ddod code0302(2) * [Feature] modify ddod code0303 * [Feature] modify ddod code0303(2) * [Feature] modify ddod code0303(3) * [Feature] modify ddod code0305 * [Feature] modify ddod code0305(2) delete diou * [Feature] modify ddod code0305(3) * modify ddod code0306 * [Feature] modify ddod code0307 * [Feature] modify ddod code0311 * [Feature] modify ddod code0311(2) * [Feature] modify ddod code0313 * update * [Feature] modify ddod code0319 * fix * fix lint * [Feature] modify ddod code0321 * update readme * [0502] compute common vars at once for get_target * [0504] update ddod conflicts * [0518] seperate reg and cls loss and get_target compute * [0518] merge ATSSCostAssigner to ATSSAssigner * [0518] refine ATSSAssigner * [0518] refine ATSSAssigner 2 * [0518] refine ATSSAssigner 2 * [0518] refine ATSSAssigner 3 * [0519] fix bugs * update * fix lr * update weight Co-authored-by: hha <1286304229@qq.com>
17
12
def forward_single(self, x, scale): cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.atss_cls(cls_feat) # we just follow atss, not apply exp in bbox_pred bbox_pred = scale(self.atss_reg(reg_feat)).float() iou_pred = self.atss_iou(reg_f
130
ddod_head.py
139
33
def forward_single(self, x, scale): cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.atss_cls(cls_feat) # we just follow atss, not apply exp in bbox_pred bbox_pred = scale(self.atss_reg(reg_feat)).float() iou_pred = self.atss_iou(reg_feat) return cls_score, bbox_pred, iou_pred
forward_single
11
47
https://github.com/open-mmlab/mmdetection.git
79
mmdetection
Python
0
3
151a803ed0119560f59dbe7b73824dbdcae08fc6
mmdet/models/dense_heads/ddod_head.py
244,274
refactor: use qb for patching flag
9
12
def execute(): batch = frappe.qb.D
63
update_batch_valuation_flag.py
4
7
def execute(): batch = frappe.qb.DocType("Batch") frappe.qb.update(batch).set(batch.use_batchwise_valuation, 0).run()
execute
3
7
https://github.com/frappe/erpnext.git
36
erpnext
Python
0
1
312db429e4605d6d0ce47d1034662fdf0ec053b7
erpnext/patches/v14_0/update_batch_valuation_flag.py
64,440
hookutils: tcl/tk: port to PyInstaller.isolated framework
15
10
def _get_tcl_tk_info(): try: import tkinter from _tkinter import TCL_VERSION, TK_VERSION except ImportError: # tkinter unavailable return None, None, None, False tcl = tkinter.Tcl() # Query the location of Tcl library/data directory. t
141
tcl_tk.py
196
76
def _get_tcl_tk_info(): try: import tkinter from _tkinter import TCL_VERSION, TK_VERSION except ImportError: # tkinter unavailable return None, None, None, False tcl = tkinter.Tcl() # Query the location of Tcl library/data directory. tcl_dir = tcl.eval("info library") # Check if Tcl/Tk is built with multi-threaded support (built with --enable-threads), as indicated by the presence # of optional `threaded` member in `tcl_platform` array. try: tcl.getvar("tcl_platform(threaded)") # Ignore the actual value. tcl_threaded = True except tkinter.TclError: tcl_threaded = False return tcl_dir, TCL_VERSION, TK_VERSION, tcl_threaded # Populate the variables. If `tkinter` is unavailable, the values are set to `None` or `False`. ( tcl_dir, tcl_version, tk_version, tcl_threaded, ) = _get_tcl_tk_info()
_get_tcl_tk_info
14
104
https://github.com/pyinstaller/pyinstaller.git
68
pyinstaller
Python
0
3
2b2559af1c7790596e7b2040f48e56baef608f9d
PyInstaller/utils/hooks/tcl_tk.py
264,062
ref(locks): Make the post_process locks backend configurable (#36328)
4
11
def test_cluster_as_str(self): assert RedisLockBackend(cluster="defau
31
test_redis.py
12
6
def test_cluster_as_str(self): assert RedisLockBackend(cluster="default").cluster == self.cluster
test_cluster_as_str
2
6
https://github.com/getsentry/sentry.git
18
sentry
Python
0
1
5cf12753665512f60b32a99dd8fd9aa27d0a4a3a
tests/sentry/utils/locking/backends/test_redis.py
92,287
Fix yield for crd3 (#4240) * yielding example per chunk id * changing data type for turns * removing unused variable * Update crd3.py Co-authored-by: Shanya Sharma - s0s0cr3 <Shanya.Sharma@walmartlabs.com>
13
20
def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "chunk": datasets.Value("string"), "chunk_id": datasets.Value("int32"), "turn_start": datasets.Value("int32"), "turn_end": datasets.Value("int32"), "alignment_score": datasets.Value("float32"), "turns": [ { "names": datasets.features.Sequence(datas
221
crd3.py
391
27
def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "chunk": datasets.Value("string"), "chunk_id": datasets.Value("int32"), "turn_start": datasets.Value("int32"), "turn_end": datasets.Value("int32"), "alignment_score": datasets.Value("float32"), "turns": [ { "names": datasets.features.Sequence(datasets.Value("string")), "utterances": datasets.features.Sequence(datasets.Value("string")), "number": datasets.Value("int32"), } ], } ), homepage="https://github.com/RevanthRameshkumar/CRD3", citation=_CITATION, )
_info
22
33
https://github.com/huggingface/datasets.git
126
datasets
Python
0
1
23efe55f5547c640f9efdcb2bc678fb7b76e663e
datasets/crd3/crd3.py
104,781

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
0
Add dataset card