id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
160,530
42
14
16
215
22
0
58
157
openhook
ENH: Support character string arrays TST: added test for issue #18684 ENH: f2py opens files with correct encoding, fixes #635 TST: added test for issue #6308 TST: added test for issue #4519 TST: added test for issue #3425 ENH: Implement user-defined hooks support for post-processing f2py data structure. Implement character BC hook. ENH: Add support for detecting utf-16 and utf-32 encodings.
https://github.com/numpy/numpy.git
def openhook(filename, mode): bytes = min(32, os.path.getsize(filename)) with open(filename, 'rb') as f: raw = f.read(bytes) if raw.startswith(codecs.BOM_UTF8): encoding = 'UTF-8-SIG' elif raw.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)): encoding = 'UTF-32' elif raw.startswith((codecs.BOM_LE, codecs.BOM_BE)): encoding = 'UTF-16' else: if chardet is not None: encoding = chardet.detect(raw)['encoding'] else: # hint: install chardet to ensure correct encoding handling encoding = 'ascii' return open(filename, mode, encoding=encoding)
127
crackfortran.py
Python
numpy/f2py/crackfortran.py
d4e11c7a2eb64861275facb076d47ccd135fa28c
numpy
5
303,750
9
9
5
47
6
0
9
41
async_state_changed
Improve type hints in yeelight lights (#76018) Co-authored-by: Franck Nijhof <frenck@frenck.nl>
https://github.com/home-assistant/core.git
def async_state_changed(self) -> None: if not self._device.available: self._async_cancel_pending_state_check() self.async_write_ha_state()
26
light.py
Python
homeassistant/components/yeelight/light.py
66b742f110025013e60ca8cac7aeb3247bac8f47
core
2
243,137
39
12
7
158
18
1
48
138
test_sanity_ati2
Add support for ATI1/2(BC4/BC5) DDS files This commit adds support for loading DDS with ATI1 and ATI2 fourcc pixel format
https://github.com/python-pillow/Pillow.git
def test_sanity_ati2(): with Image.open(TEST_FILE_ATI2) as im: im.load() assert im.format == "DDS" assert im.mode == "RGB" assert im.size == (128, 128) assert_image_equal_tofile(im, TEST_FILE_ATI2.replace(".dds", ".png")) @pytest.mark.parametrize( ("image_path", "expected_path"), ( # hexeditted to be typeless (TEST_FILE_DX10_BC5_TYPELESS, TEST_FILE_DX10_BC5_UNORM), (TEST_FILE_DX10_BC5_UNORM, TEST_FILE_DX10_BC5_UNORM), # hexeditted to use DX10 FourCC (TEST_FILE_DX10_BC5_SNORM, TEST_FILE_BC5S), (TEST_FILE_BC5S, TEST_FILE_BC5S), ), )
@pytest.mark.parametrize( ("image_path", "expected_path"), ( # hexeditted to be typeless (TEST_FILE_DX10_BC5_TYPELESS, TEST_FILE_DX10_BC5_UNORM), (TEST_FILE_DX10_BC5_UNORM, TEST_FILE_DX10_BC5_UNORM), # hexeditted to use DX10 FourCC (TEST_FILE_DX10_BC5_SNORM, TEST_FILE_BC5S), (TEST_FILE_BC5S, TEST_FILE_BC5S), ), )
55
test_file_dds.py
Python
Tests/test_file_dds.py
ad2c6a20fe874958d8d9adecbbfeb81856155f05
Pillow
1
60,238
26
11
5
92
9
0
28
44
crop_params
Balanced joint maximum mean discrepancy for deep transfer learning
https://github.com/jindongwang/transferlearning.git
def crop_params(fn): params = fn.params.get('crop_param', fn.params) axis = params.get('axis', 2) # default to spatial crop for N, C, H, W offset = np.array(params.get('offset', 0), ndmin=1) return (axis, offset)
55
coord_map.py
Python
code/deep/BJMMD/caffe/python/caffe/coord_map.py
cc4d0564756ca067516f71718a3d135996525909
transferlearning
1
290,734
30
13
17
161
20
0
45
189
_async_create_radio_entry
Minor refactor of zha config flow (#82200) * Minor refactor of zha config flow * Move ZhaRadioManager to a separate module
https://github.com/home-assistant/core.git
async def _async_create_radio_entry(self) -> FlowResult: assert self._title is not None assert self._radio_mgr.radio_type is not None assert self._radio_mgr.device_path is not None assert self._radio_mgr.device_settings is not None device_settings = self._radio_mgr.device_settings.copy() device_settings[CONF_DEVICE_PATH] = await self.hass.async_add_executor_job( usb.get_serial_by_id, self._radio_mgr.device_path ) return self.async_create_entry( title=self._title, data={ CONF_DEVICE: device_settings, CONF_RADIO_TYPE: self._radio_mgr.radio_type.name, }, )
106
config_flow.py
Python
homeassistant/components/zha/config_flow.py
bb64b39d0e6d41f531af9c63b69d1ce243a2751b
core
1
135,991
41
13
20
200
26
0
57
247
test_foreach_worker
[RLlib] Refactor `WorkerSet` on top of `FaultTolerantActorManager`. (#29938) Signed-off-by: Jun Gong <jungong@anyscale.com>
https://github.com/ray-project/ray.git
def test_foreach_worker(self): ws = WorkerSet( env_creator=lambda _: gym.make("CartPole-v1"), default_policy_class=RandomPolicy, config=AlgorithmConfig().rollouts(num_rollout_workers=2), num_workers=2, ) policies = ws.foreach_worker( lambda w: w.get_policy(DEFAULT_POLICY_ID), local_worker=True, ) # 3 policies including the one from the local worker. self.assertEqual(len(policies), 3) for p in policies: self.assertIsInstance(p, RandomPolicy) policies = ws.foreach_worker( lambda w: w.get_policy(DEFAULT_POLICY_ID), local_worker=False, ) # 2 policies from only the remote workers. self.assertEqual(len(policies), 2) ws.stop()
126
test_worker_set.py
Python
rllib/evaluation/tests/test_worker_set.py
e707ce4fb3717e3c05118c57f503dfbd03552ca9
ray
2
280,144
46
11
39
219
40
4
61
253
get_config
Make default `Layer.get_config()` automatically work for a wide range of layers that do not override it. PiperOrigin-RevId: 480781082
https://github.com/keras-team/keras.git
def get_config(self): config = { "name": self.name, "trainable": self.trainable, } config["dtype"] = policy.serialize(self._dtype_policy) if hasattr(self, "_batch_input_shape"): config["batch_input_shape"] = self._batch_input_shape if not generic_utils.is_default(self.get_config): # In this case the subclass implements get_config() return config # In this case the subclass doesn't implement get_config(): # Let's see if we can autogenerate it. if getattr(self, "_auto_get_config", False): config.update(self._auto_config.config) return config else: raise NotImplementedError( textwrap.dedent( f
raise NotImplementedError( textwrap.dedent( f""" Layer {self.__class__.__name__} was created by passingargument valuesand therefore the layer must override `get_config()` in order to be serializable. Please implement `get_config()`. Example:order to be serializable. Please implement
99
base_layer.py
Python
keras/engine/base_layer.py
af1408d3255e3db9067522762e22a6c454c56654
keras
4
127,668
37
12
16
77
10
0
42
143
to_object_ref
[AIR] Deprecate `Checkpoint.to_object_ref` and `Checkpoint.from_object_ref` (#28318) Before object_ref = checkpoint.to_object_ref() checkpoint.from_object_ref(object_ref) After (this is already possible) object_ref = ray.put(checkpoint) ray.get(checkpoint) Why are these changes needed? We need to efficiently recover checkpoint type. ray.get already does this; from_object_ref can't. See [AIR] Maintain checkpoint subclass information during serialization #28134. There are two ways to put checkpoints in the object store. You can either call to_object_ref or ray.put. We should standardize on the conventional way. There should be one-- and preferably only one --obvious way to do it.
https://github.com/ray-project/ray.git
def to_object_ref(self) -> ray.ObjectRef: warnings.warn( "`to_object_ref` is deprecated and will be removed in a future Ray " "version. To store the checkpoint in the Ray object store, call " "`ray.put(ckpt)` instead of `ckpt.to_object_ref()`.", DeprecationWarning, ) if self._obj_ref: return self._obj_ref else: return ray.put(self.to_dict())
43
checkpoint.py
Python
python/ray/air/checkpoint.py
c2bdee9fea6f354330545009d5e6caec3dd7eb26
ray
2
260,762
18
13
15
99
14
0
22
40
test_toy_example_collapse_points
MAINT Parameters validation for NeighborhoodComponentsAnalysis (#24195) Co-authored-by: jeremie du boisberranger <jeremiedbb@yahoo.fr>
https://github.com/scikit-learn/scikit-learn.git
def test_toy_example_collapse_points(): rng = np.random.RandomState(42) input_dim = 5 two_points = rng.randn(2, input_dim) X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]]) y = [0, 0, 1]
132
test_nca.py
Python
sklearn/neighbors/tests/test_nca.py
d7c978b764c6aafb65cc28757baf3f64da2cae34
scikit-learn
1
154,497
22
10
6
90
11
0
30
76
deploy
FIX-#4597: Refactor Partition handling of func, args, kwargs (#4715) Co-authored-by: Iaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Jonathan Shi <jhshi@ponder.io>
https://github.com/modin-project/modin.git
def deploy(cls, func, f_args=None, f_kwargs=None, num_returns=1): args = [] if f_args is None else f_args kwargs = {} if f_kwargs is None else f_kwargs return _deploy_ray_func.options(num_returns=num_returns).remote( func, *args, **kwargs )
60
engine_wrapper.py
Python
modin/core/execution/ray/common/engine_wrapper.py
d6d503ac7c3028d871c34d9e99e925ddb0746df6
modin
3
320,112
2
6
10
13
2
0
2
9
test_scan_file_for_separating_barcodes_pillow_transcode_error
In case pikepdf fails to convert an image to a PIL image, fall back to converting pages to PIL images
https://github.com/paperless-ngx/paperless-ngx.git
def test_scan_file_for_separating_barcodes_pillow_transcode_error(self):
69
test_barcodes.py
Python
src/documents/tests/test_barcodes.py
caf4b54bc7bf828ba170fcc329aa82a0c45da382
paperless-ngx
1
40,304
120
17
44
472
29
0
195
586
plotting_context
Use f-strings for string formatting (#2800) Reformats all the text from the old "%-formatted" and .format(...) format to the newer f-string format, as defined in PEP 498. This requires Python 3.6+. Flynt 0.76 was used to reformat the strings. 45 f-strings were created in 13 files. F-strings are in general more readable, concise and performant. See also: https://www.python.org/dev/peps/pep-0498/#rationale
https://github.com/mwaskom/seaborn.git
def plotting_context(context=None, font_scale=1, rc=None): if context is None: context_dict = {k: mpl.rcParams[k] for k in _context_keys} elif isinstance(context, dict): context_dict = context else: contexts = ["paper", "notebook", "talk", "poster"] if context not in contexts: raise ValueError(f"context must be in {', '.join(contexts)}") # Set up dictionary of default parameters texts_base_context = { "font.size": 12, "axes.labelsize": 12, "axes.titlesize": 12, "xtick.labelsize": 11, "ytick.labelsize": 11, "legend.fontsize": 11, "legend.title_fontsize": 12, } base_context = { "axes.linewidth": 1.25, "grid.linewidth": 1, "lines.linewidth": 1.5, "lines.markersize": 6, "patch.linewidth": 1, "xtick.major.width": 1.25, "ytick.major.width": 1.25, "xtick.minor.width": 1, "ytick.minor.width": 1, "xtick.major.size": 6, "ytick.major.size": 6, "xtick.minor.size": 4, "ytick.minor.size": 4, } base_context.update(texts_base_context) # Scale all the parameters by the same factor depending on the context scaling = dict(paper=.8, notebook=1, talk=1.5, poster=2)[context] context_dict = {k: v * scaling for k, v in base_context.items()} # Now independently scale the fonts font_keys = texts_base_context.keys() font_dict = {k: context_dict[k] * font_scale for k in font_keys} context_dict.update(font_dict) # Override these settings with the provided rc dictionary if rc is not None: rc = {k: v for k, v in rc.items() if k in _context_keys} context_dict.update(rc) # Wrap in a _PlottingContext object so this can be used in a with statement context_object = _PlottingContext(context_dict) return context_object
290
rcmod.py
Python
seaborn/rcmod.py
f7e25e18983f2f36a1529cd9e4bda6fa008cbd6d
seaborn
10
47,420
29
13
8
194
22
0
41
81
create_test_pipeline
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
https://github.com/apache/airflow.git
def create_test_pipeline(suffix, trigger_rule): skip_operator = EmptySkipOperator(task_id=f'skip_operator_{suffix}') always_true = EmptyOperator(task_id=f'always_true_{suffix}') join = EmptyOperator(task_id=trigger_rule, trigger_rule=trigger_rule) final = EmptyOperator(task_id=f'final_{suffix}') skip_operator >> join always_true >> join join >> final with DAG( dag_id='example_skip_dag', start_date=pendulum.datetime(2021, 1, 1, tz="UTC"), catchup=False, tags=['example'], ) as dag: create_test_pipeline('1', TriggerRule.ALL_SUCCESS) create_test_pipeline('2', TriggerRule.ONE_SUCCESS)
59
example_skip_dag.py
Python
airflow/example_dags/example_skip_dag.py
49e336ae0302b386a2f47269a6d13988382d975f
airflow
1
287,839
15
11
7
66
9
0
16
74
async_will_remove_from_hass
Netatmo refactor to use pyatmo 7.0.1 (#73482) (#78523) Co-authored-by: Robert Svensson <Kane610@users.noreply.github.com>
https://github.com/home-assistant/core.git
async def async_will_remove_from_hass(self) -> None: await super().async_will_remove_from_hass() for publisher in self._publishers: await self.data_handler.unsubscribe( publisher[SIGNAL_NAME], self.async_update_callback )
39
netatmo_entity_base.py
Python
homeassistant/components/netatmo/netatmo_entity_base.py
81abeac83ed85c5753cb8f2ac317caf079cf1868
core
2
53,074
27
11
14
49
5
0
29
105
import_distributed
Delay import of `distributed` This improves `prefect` module import times which can be really slow with all of the distributed extras and allows configuration of a `DaskTaskRunner` on a machine without `distributed` installed
https://github.com/PrefectHQ/prefect.git
def import_distributed() -> "distributed": try: import distributed except ImportError as exc: raise RuntimeError( "Using the Dask task runner requires Dask `distributed` to be installed." ) from exc return distributed
25
task_runners.py
Python
src/prefect/task_runners.py
9de7f04816f1ef884d98ed817e869e73a9523ca1
prefect
2
115,994
72
14
20
295
26
0
126
403
get_columns
implemented the connection_args and connection_args_example dicts
https://github.com/mindsdb/mindsdb.git
def get_columns(self) -> StatusResponse: query = "SELECT * FROM S3Object LIMIT 5" df = self.native_query(query) response = Response( RESPONSE_TYPE.TABLE, data_frame=pd.DataFrame( { 'column_name': df.columns, 'data_type': df.dtypes } ) ) return response connection_args = OrderedDict( aws_access_key_id={ 'type': ARG_TYPE.STR, 'description': 'The access key for the AWS account.' }, aws_secret_access_key={ 'type': ARG_TYPE.STR, 'description': 'The secret key for the AWS account.' }, region_name={ 'type': ARG_TYPE.STR, 'description': 'The AWS region where the S3 bucket is located.' }, bucket={ 'type': ARG_TYPE.STR, 'description': 'The name of the S3 bucket.' }, key={ 'type': ARG_TYPE.STR, 'description': 'The key of the object to be queried.' }, input_serialization={ 'type': ARG_TYPE.STR, 'description': 'The format of the data in the object that is to be queried.' } ) connection_args_example = OrderedDict( aws_access_key_id='PCAQ2LJDOSWLNSQKOCPW', aws_secret_access_key='U/VjewPlNopsDmmwItl34r2neyC6WhZpUiip57i', region_name='us-east-1', bucket='mindsdb-bucket', key='iris.csv', input_serialization="{'CSV': {'FileHeaderInfo': 'NONE'}}", )
50
s3_handler.py
Python
mindsdb/integrations/handlers/s3_handler/s3_handler.py
4c20820d35782ed27f41e964ad2a429420b0eb67
mindsdb
1
216,271
30
15
11
124
10
0
36
164
_get_job_completion_ipc_path
Enable minion's IPC channel to aggregate results from spawned jobber processes. Use a long-running request channel in the minion parent process to communicate job results back to the master via broker-based or broker-less transport. This is a necessary optimization for transports that prefer a sustained long-running connection because connection create/dispose operations are expensive. The working assumption is that this change benefits all supported transports. Testing Done: * this tests provide coverage for this use case: .../salt/tests/pytests/integration/minion.*
https://github.com/saltstack/salt.git
def _get_job_completion_ipc_path(self): if self.opts["ipc_mode"] == "tcp": # try to find the port and fallback to something if not configured uxd_path_or_tcp_port = int( self.opts.get("tcp_job_completion_port", self.opts["tcp_pub_port"] + 1) ) else: uxd_path_or_tcp_port = os.path.join( self.opts["sock_dir"], "job_completion_minion-{}.ipc".format(self.opts["id"]), ) return uxd_path_or_tcp_port
70
minion.py
Python
salt/minion.py
171926cc57618b51bf3fdc042b62212e681180fc
salt
2
22,117
10
8
10
48
6
0
11
24
patch
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
https://github.com/pypa/pipenv.git
def patch(self, url, data=None, **kwargs): r return self.request("PATCH", url, data=data, **kwargs)
32
sessions.py
Python
pipenv/patched/pip/_vendor/requests/sessions.py
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
1
248,583
36
10
12
177
15
0
48
154
test_guest_access_token
Move the "email unsubscribe" resource, refactor the macaroon generator & simplify the access token verification logic. (#12986) This simplifies the access token verification logic by removing the `rights` parameter which was only ever used for the unsubscribe link in email notifications. The latter has been moved under the `/_synapse` namespace, since it is not a standard API. This also makes the email verification link more secure, by embedding the app_id and pushkey in the macaroon and verifying it. This prevents the user from tampering the query parameters of that unsubscribe link. Macaroon generation is refactored: - Centralised all macaroon generation and verification logic to the `MacaroonGenerator` - Moved to `synapse.utils` - Changed the constructor to require only a `Clock`, hostname, and a secret key (instead of a full `Homeserver`). - Added tests for all methods.
https://github.com/matrix-org/synapse.git
def test_guest_access_token(self): token = self.macaroon_generator.generate_guest_access_token("@user:tesths") user_id = self.macaroon_generator.verify_guest_token(token) self.assertEqual(user_id, "@user:tesths") # Raises with another secret key with self.assertRaises(MacaroonVerificationFailedException): self.other_macaroon_generator.verify_guest_token(token) # Check that an old access token without the guest caveat does not work macaroon = self.macaroon_generator._generate_base_macaroon("access") macaroon.add_first_party_caveat(f"user_id = {user_id}") macaroon.add_first_party_caveat("nonce = 0123456789abcdef") token = macaroon.serialize() with self.assertRaises(MacaroonVerificationFailedException): self.macaroon_generator.verify_guest_token(token)
96
test_macaroons.py
Python
tests/util/test_macaroons.py
fe1daad67237c2154a3d8d8cdf6c603f0d33682e
synapse
1
160,733
31
10
6
48
5
0
34
73
no_nep50_warning
WIP: Add warning context manager and fix min_scalar for new promotion Even the new promotion has to use the min-scalar logic to avoid picking up a float16 loop for `np.int8(3) * 3.`.
https://github.com/numpy/numpy.git
def no_nep50_warning(): # TODO: We could skip the manager entirely if NumPy as a whole is not # in the warning mode. (Which is NOT thread/context safe.) token = NO_NEP50_WARNING.set(True) try: yield finally: NO_NEP50_WARNING.reset(token)
24
_ufunc_config.py
Python
numpy/core/_ufunc_config.py
baaeb9a16c9c28683db97c4fc3d047e86d32a0c5
numpy
2
224,136
19
15
17
130
19
0
20
136
test_invalid_config
Some manual changes ahead of formatting code with Black
https://github.com/mkdocs/mkdocs.git
def test_invalid_config(self): file_contents = dedent( ) config_file = tempfile.NamedTemporaryFile('w', delete=False) try: config_file.write(file_contents) config_file.flush() config_file.close() with self.assertRaises(ConfigurationError): config.load_config(config_file=open(config_file.name, 'rb')) finally: os.remove(config_file.name)
74
config_tests.py
Python
mkdocs/tests/config/config_tests.py
372384d8102ddb4be6360f44d1bfddb8b45435a4
mkdocs
2
203,227
13
12
9
72
8
0
13
82
_get_default_collation
Refs #33476 -- Refactored problematic code before reformatting by Black. In these cases Black produces unexpected results, e.g. def make_random_password( self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789', ): or cursor.execute(""" SELECT ... """, [table name], )
https://github.com/django/django.git
def _get_default_collation(self, table_name): with self.connection.cursor() as cursor: cursor.execute( , [self.normalize_name(table_name)], ) return cursor.fetchone()[0]
43
schema.py
Python
django/db/backends/oracle/schema.py
c5cd8783825b5f6384417dac5f3889b4210b7d08
django
1
272,375
44
16
15
153
17
0
60
184
_apply_scores
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _apply_scores(self, scores, value, scores_mask=None, training=None): if scores_mask is not None: padding_mask = tf.logical_not(scores_mask) # Bias so padding positions do not contribute to attention distribution. # Note 65504. is the max float16 value. if scores.dtype is tf.float16: scores -= 65504.0 * tf.cast(padding_mask, dtype=scores.dtype) else: scores -= 1.0e9 * tf.cast(padding_mask, dtype=scores.dtype) if training is None: training = backend.learning_phase() weights = tf.nn.softmax(scores)
133
base_dense_attention.py
Python
keras/layers/attention/base_dense_attention.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
4
133,351
12
11
4
52
9
0
12
44
update_scheduler
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def update_scheduler(self, metric): self.worker_group.apply_all_operators( lambda op: [sched.step(metric) for sched in op._schedulers] )
32
torch_trainer.py
Python
python/ray/util/sgd/torch/torch_trainer.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
2
288,502
6
9
3
33
7
0
6
20
auto_update
Supervisor update entity auto update from api (#79611) * Supervisor update entity auto update from api * Update api mocks in tests
https://github.com/home-assistant/core.git
def auto_update(self) -> bool: return self.coordinator.data[DATA_KEY_SUPERVISOR][ATTR_AUTO_UPDATE]
20
update.py
Python
homeassistant/components/hassio/update.py
416c10a793a982fb8c17259d36b99be458131cd0
core
1
272,119
64
15
38
329
29
0
114
680
test_shared_embedding_column_with_non_sequence_categorical
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def test_shared_embedding_column_with_non_sequence_categorical(self): with tf.Graph().as_default(): vocabulary_size = 3 sparse_input_a = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) sparse_input_b = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) categorical_column_a = ( tf.feature_column.categorical_column_with_identity( key="aaa", num_buckets=vocabulary_size ) ) categorical_column_b = ( tf.feature_column.categorical_column_with_identity( key="bbb", num_buckets=vocabulary_size ) ) shared_embedding_columns = tf.feature_column.shared_embeddings( [categorical_column_a, categorical_column_b], dimension=2 ) sequence_input_layer = ksfc.SequenceFeatures( shared_embedding_columns ) with self.assertRaisesRegex( ValueError, r"In embedding_column: aaa_shared_embedding\. " r"categorical_column must " r"be of type SequenceCategoricalColumn to use SequenceFeatures\.", ): _, _ = sequence_input_layer( {"aaa": sparse_input_a, "bbb": sparse_input_b} )
216
sequence_feature_column_test.py
Python
keras/feature_column/sequence_feature_column_test.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
268,177
164
21
57
556
39
0
348
1,100
package_status
apt: include apt preferences (e.g. pinning) when selecting packages (#78327) Fixes #77969
https://github.com/ansible/ansible.git
def package_status(m, pkgname, version_cmp, version, default_release, cache, state): try: # get the package from the cache, as well as the # low-level apt_pkg.Package object which contains # state fields not directly accessible from the # higher-level apt.package.Package object. pkg = cache[pkgname] ll_pkg = cache._cache[pkgname] # the low-level package object except KeyError: if state == 'install': try: provided_packages = cache.get_providing_packages(pkgname) if provided_packages: # When this is a virtual package satisfied by only # one installed package, return the status of the target # package to avoid requesting re-install if cache.is_virtual_package(pkgname) and len(provided_packages) == 1: package = provided_packages[0] installed, installed_version, version_installable, has_files = \ package_status(m, package.name, version_cmp, version, default_release, cache, state='install') if installed: return installed, installed_version, version_installable, has_files # Otherwise return nothing so apt will sort out # what package to satisfy this with return False, False, True, False m.fail_json(msg="No package matching '%s' is available" % pkgname) except AttributeError: # python-apt version too old to detect virtual packages # mark as not installed and let apt-get install deal with it return False, False, True, False else: return False, False, None, False try: has_files = len(pkg.installed_files) > 0 except UnicodeDecodeError: has_files = True except AttributeError: has_files = False # older python-apt cannot be used to determine non-purged try: package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED except AttributeError: # python-apt 0.7.X has very weak low-level object try: # might not be necessary as python-apt post-0.7.X should have current_state property package_is_installed = pkg.is_installed except AttributeError: # assume older version of python-apt is installed package_is_installed = pkg.isInstalled version_best = package_best_match(pkgname, version_cmp, version, default_release, cache._cache) version_is_installed = False version_installable = None if package_is_installed: try: installed_version = pkg.installed.version except AttributeError: installed_version = pkg.installedVersion if version_cmp == "=": # check if the version is matched as well version_is_installed = fnmatch.fnmatch(installed_version, version) if version_best and installed_version != version_best and fnmatch.fnmatch(version_best, version): version_installable = version_best elif version_cmp == ">=": version_is_installed = apt_pkg.version_compare(installed_version, version) >= 0 if version_best and installed_version != version_best and apt_pkg.version_compare(version_best, version) >= 0: version_installable = version_best else: version_is_installed = True if version_best and installed_version != version_best: version_installable = version_best else: version_installable = version_best return package_is_installed, version_is_installed, version_installable, has_files
350
apt.py
Python
lib/ansible/modules/apt.py
04e892757941bf77198692bbe37041d7a8cbf999
ansible
24
265,648
17
10
9
88
11
0
18
97
test_interface_label_count_valid
Fixes #10247: Allow changing selected device/VM when creating a new component (#10312) * Initial work on #10247 * Continued work on #10247 * Clean up component creation tests * Move valdiation of replicated field to form * Clean up ordering of fields in component creation forms * Omit fieldset header if none * Clean up ordering of fields in component template creation forms * View tests should not move component templates to new device type * Define replication_fields on VMInterfaceCreateForm * Clean up expandable field help texts * Update comments * Update component bulk update forms & views to support new replication fields * Fix ModularDeviceComponentForm parent class * Fix bulk creation of VM interfaces (thanks @kkthxbye-code!)
https://github.com/netbox-community/netbox.git
def test_interface_label_count_valid(self): interface_data = { 'device': self.device.pk, 'name': 'eth[0-9]', 'label': 'Interface[0-9]', 'type': InterfaceTypeChoices.TYPE_1GE_GBIC, } form = InterfaceCreateForm(interface_data) self.assertTrue(form.is_valid())
48
test_forms.py
Python
netbox/dcim/tests/test_forms.py
c4b7ab067a914349abd88398dd9bfef9f6c2f806
netbox
1
154,556
113
16
57
498
44
0
171
912
_join_by_index
FEAT-#4946: Replace OmniSci with HDK (#4947) Co-authored-by: Iaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Andrey Pavlenko <andrey.a.pavlenko@gmail.com>
https://github.com/modin-project/modin.git
def _join_by_index(self, other_modin_frames, how, sort, ignore_index): if how == "outer": raise NotImplementedError("outer join is not supported in HDK engine") lhs = self._maybe_materialize_rowid() reset_index_names = False for rhs in other_modin_frames: rhs = rhs._maybe_materialize_rowid() if len(lhs._index_cols) != len(rhs._index_cols): raise NotImplementedError( "join by indexes with different sizes is not supported" ) reset_index_names = reset_index_names or lhs._index_cols != rhs._index_cols condition = lhs._build_equi_join_condition( rhs, lhs._index_cols, rhs._index_cols ) exprs = lhs._index_exprs() new_columns = lhs.columns.to_list() for col in lhs.columns: exprs[col] = lhs.ref(col) for col in rhs.columns: # Handle duplicating column names here. When user specifies # suffixes to make a join, actual renaming is done in front-end. new_col_name = col rename_idx = 0 while new_col_name in exprs: new_col_name = f"{col}{rename_idx}" rename_idx += 1 exprs[new_col_name] = rhs.ref(col) new_columns.append(new_col_name) op = JoinNode( lhs, rhs, how=how, exprs=exprs, condition=condition, ) new_columns = Index.__new__( Index, data=new_columns, dtype=self.columns.dtype ) lhs = lhs.__constructor__( dtypes=lhs._dtypes_for_exprs(exprs), columns=new_columns, index_cols=lhs._index_cols, op=op, force_execution_mode=self._force_execution_mode, ) if sort: lhs = lhs.sort_rows( lhs._index_cols, ascending=True, ignore_index=False, na_position="last", ) if reset_index_names: lhs = lhs._reset_index_names() if ignore_index: new_columns = Index.__new__(RangeIndex, data=range(len(lhs.columns))) lhs = lhs._set_columns(new_columns) return lhs
315
dataframe.py
Python
modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py
e5b1888cd932909e49194d58035da34b210b91c4
modin
11
282,373
35
15
17
232
18
0
48
243
get_crypto_yfinance
Portfolio class (#1280) * remerge into main ? * tests again * change the example csv * squash all the bugs * Improve `add` interface * Add warning on loading portfolio with no cash * left a rogue print * oopsie. hugo * oopsie. hugo * Add back a new `al` function + port name * test Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def get_crypto_yfinance(self): if self._crypto_tickers: list_of_coins = [f"{coin}-USD" for coin in self._crypto_tickers] self._historical_crypto = yf.download( list_of_coins, start=self._start_date, progress=False )["Close"] if len(list_of_coins) == 1: self._historical_crypto = pd.DataFrame(self._historical_crypto) self._historical_crypto.columns = list_of_coins self._historical_crypto.columns = pd.MultiIndex.from_product( [["Close"], [col[:-4] for col in self._historical_crypto.columns]] ) else: self._historical_crypto = pd.DataFrame() self._historical_crypto[ pd.MultiIndex.from_product([["Close"], ["crypto"]]) ] = 0
142
portfolio_model.py
Python
gamestonk_terminal/portfolio/portfolio_model.py
2a998a5a417ba81b6ee3c4de90d2ffaca52b46fa
OpenBBTerminal
5
102,005
6
9
2
35
5
0
6
20
active
Update Face Filter - Remove old face filter - plugins.extract.pipeline: Expose plugins directly - Change `is_aligned` from plugin level to ExtractMedia level - Allow extract pipeline to take faceswap aligned images - Add ability for recognition plugins to accept aligned faces as input - Add face filter to recognition plugin - Move extractor pipeline IO ops to own class
https://github.com/deepfakes/faceswap.git
def active(self): return bool(self._filter_files) or bool(self._nfilter_files)
20
extract.py
Python
scripts/extract.py
1d1face00d9476896e7857d3976afce383585d1b
faceswap
2
276,023
18
13
6
93
9
0
20
72
_set_network_attributes_from_metadata
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _set_network_attributes_from_metadata(revived_obj): with utils.no_automatic_dependency_tracking_scope(revived_obj): # pylint:disable=protected-access metadata = revived_obj._serialized_attributes["metadata"] if metadata.get("dtype") is not None: revived_obj._set_dtype_policy(metadata["dtype"]) revived_obj._trainable = metadata["trainable"] # pylint:enable=protected-access
50
load.py
Python
keras/saving/saved_model/load.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
283,220
9
11
4
51
9
0
11
39
_words_and_emoticons
Create a packaged app bundle with Pyinstaller (#1525) * Add dashboard widget assets * Add ipywidgets and ipyflex to project * Add currencies dashboard notebook * Update docs and docstrings * Add pyinstaller to project deps * Add pyinstaller artifacts to gitignore * Fix linter errors in terminal.py * Update cspell hook and action with a pyinstaller specific word * Add pyinstaller specfile and artifacts * Add splashscreen image * Add app icon * adding splash screen support to terminal.spec and terminal.py * Restore the conda env build files * Sync deps * Add border to the splashscreen image * Clean up terminal launcher * Add support for default feature flags in packages apps * Fix types and linting * Add splashscreen management to app bootup * Check prediction feature flag when entering crypto/pred * Update pyinstaller spec file * fix .spec file to work for splash and icon - removed the ".." * Allows to export when using installer (#1568) * fix export for packaged apps * fix filename * Git : replace commit_hash when it is set in config_terminal * Add update of the git commit hash in gtff default during build * Add packaged app name and feature flag to logs * Add platform specific icon assignment * Add macOS build assets * Add tensorflow to hidden imports * Move LOGGING_COMMIT_HASH to gtff * Adding files/folders needed to .spec and pyinstaller folder. This will make certain commands work again. * Linting * Workflow : ignore ./build/pyinstaller from codespell * Workflow : exclude ./build/pyinstaller from flake8 * Poetry + Workflow : add types-six * Pyinstaller : remove property_cached, user_agent and vaderSentiment * Revert "Pyinstaller : remove property_cached, user_agent and vaderSentiment" This reverts commit dbb3e2b81086f97819ebd21457148c7160a4d703. * Clean up local paths in specfile * Validate deps have correct Jinja version (they do) * Fix logging commit hash to be set correctly for the logger to see it Co-authored-by: Andrew <andrew.kenreich@gmail.com> Co-authored-by: didierlopes.eth <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def _words_and_emoticons(self): wes = self.text.split() stripped = list(map(self._strip_punc_if_word, wes)) return stripped
30
vaderSentiment.py
Python
build/pyinstaller/vaderSentiment/vaderSentiment.py
ab4de1dd70fba866930150e440a03e461a6ca6a8
OpenBBTerminal
1
126,370
8
11
4
59
11
0
9
37
_reset_replica_iterator
[Serve] ServeHandle detects ActorError and drop replicas from target group (#26685)
https://github.com/ray-project/ray.git
def _reset_replica_iterator(self): replicas = list(self.in_flight_queries.keys()) random.shuffle(replicas) self.replica_iterator = itertools.cycle(replicas)
34
router.py
Python
python/ray/serve/_private/router.py
545c51609f0f55b41cf99cec95a9c21bee6846de
ray
1
176,368
47
13
18
219
17
1
79
203
is_perfect_matching
Update matching functions for error validation and speed (#4897) * First steps to update matching functions for #4644 Expand tests Change API to raise NetworkXError when matching involves nodes not in G Update is_*_matching to 100+ times faster. * improve matching_dict_to_set and docs for min_weight_matching * fix sphinx error
https://github.com/networkx/networkx.git
def is_perfect_matching(G, matching): if isinstance(matching, dict): matching = matching_dict_to_set(matching) nodes = set() for edge in matching: if len(edge) != 2: raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") u, v = edge if u not in G or v not in G: raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") if u == v: return False if not G.has_edge(u, v): return False if u in nodes or v in nodes: return False nodes.update(edge) return len(nodes) == len(G) @not_implemented_for("multigraph") @not_implemented_for("directed")
@not_implemented_for("multigraph") @not_implemented_for("directed")
119
matching.py
Python
networkx/algorithms/matching.py
28b3014d68d2b4e40d3e02219770296a827bd55c
networkx
10
314,030
7
7
8
24
4
0
7
21
assumed_state
Enable polling for hardwired powerview devices (#73659) * Enable polling for hardwired powerview devices * Update homeassistant/components/hunterdouglas_powerview/cover.py * Update homeassistant/components/hunterdouglas_powerview/cover.py * docs were wrong * Update homeassistant/components/hunterdouglas_powerview/cover.py * Update homeassistant/components/hunterdouglas_powerview/sensor.py
https://github.com/home-assistant/core.git
def assumed_state(self) -> bool: return not self._is_hard_wired
13
cover.py
Python
homeassistant/components/hunterdouglas_powerview/cover.py
120479acef9a8e9e52fa356f036e55465e441d31
core
1
293,838
60
11
20
281
23
1
76
170
test_matching_filter
Simplify time zone setting in tests (#68330) * Simplify timezone setting in tests * Fix typo * Adjust caldav tests * Adjust input_datetime tests * Adjust time_date tests * Adjust tod tests * Adjust helper tests * Adjust recorder tests * Adjust risco tests * Adjust aemet tests * Adjust flux tests * Adjust forecast_solar tests * Revert unnecessary change in forecast_solar test * Adjust climacell tests * Adjust google tests * Adjust sensor tests * Adjust sonarr tests * Adjust template tests * Adjust zodiac tests Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
https://github.com/home-assistant/core.git
async def test_matching_filter(mock_now, hass, calendar, set_tz): config = dict(CALDAV_CONFIG) config["custom_calendars"] = [ {"name": "Private", "calendar": "Private", "search": "This is a normal event"} ] assert await async_setup_component(hass, "calendar", {"calendar": config}) await hass.async_block_till_done() state = hass.states.get("calendar.private_private") assert state.name == calendar.name assert state.state == STATE_OFF assert dict(state.attributes) == { "friendly_name": "Private", "message": "This is a normal event", "all_day": False, "offset_reached": False, "start_time": "2017-11-27 17:00:00", "end_time": "2017-11-27 18:00:00", "location": "Hamburg", "description": "Surprisingly rainy", } @pytest.mark.parametrize("set_tz", ["utc"], indirect=True) @patch("homeassistant.util.dt.now", return_value=_local_datetime(12, 00))
@pytest.mark.parametrize("set_tz", ["utc"], indirect=True) @patch("homeassistant.util.dt.now", return_value=_local_datetime(12, 00))
124
test_calendar.py
Python
tests/components/caldav/test_calendar.py
cf4033b1bc853fc70828c6128ac91cdfb1d5bdaf
core
1
43,466
11
11
4
71
14
0
11
43
test_send_message_exception
Implement Azure Service Bus Queue Operators (#24038) Implemented Azure Service Bus Queue based Operator's to create queue, send message to the queue and receive message(list of message or batch message) and delete queue in azure service - Added `AzureServiceBusCreateQueueOperator` - Added `AzureServiceBusSendMessageOperator` - Added `AzureServiceBusReceiveMessageOperator` - Added `AzureServiceBusDeleteQueueOperator` - Added Example DAG - Added Documentation - Added hooks and connection type in - provider yaml file - Added unit Test case, doc strings
https://github.com/apache/airflow.git
def test_send_message_exception(self, mock_sb_client): hook = MessageHook(azure_service_bus_conn_id=self.conn_id) with pytest.raises(TypeError): hook.send_message(queue_name=None, messages="", batch_message_flag=False)
42
test_asb.py
Python
tests/providers/microsoft/azure/hooks/test_asb.py
09f38ad3f6872bae5059a1de226362eb358c4a7a
airflow
1
175,300
22
11
5
80
9
0
22
61
__setattr__
bpo-40066: [Enum] update str() and format() output (GH-30582) Undo rejected PEP-663 changes: - restore `repr()` to its 3.10 status - restore `str()` to its 3.10 status New changes: - `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result - zero-valued flags without a name have a slightly changed repr(), e.g. `repr(Color(0)) == '<Color: 0>'` - update `dir()` for mixed-in types to return all the methods and attributes of the mixed-in type - added `_numeric_repr_` to `Flag` to control display of unnamed values - enums without doc strings have a more comprehensive doc string added - `ReprEnum` added -- inheriting from this makes it so only `__repr__` is replaced, not `__str__` nor `__format__`; `IntEnum`, `IntFlag`, and `StrEnum` all inherit from `ReprEnum`
https://github.com/python/cpython.git
def __setattr__(cls, name, value): member_map = cls.__dict__.get('_member_map_', {}) if name in member_map: raise AttributeError('cannot reassign member %r' % (name, )) super().__setattr__(name, value)
48
enum.py
Python
Lib/enum.py
acf7403f9baea3ae1119fc6b4a3298522188bf96
cpython
2
22,039
12
10
7
50
6
0
13
46
unicode_is_ascii
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
https://github.com/pypa/pipenv.git
def unicode_is_ascii(u_string): assert isinstance(u_string, str) try: u_string.encode("ascii") return True except UnicodeEncodeError: return False
28
_internal_utils.py
Python
pipenv/patched/pip/_vendor/requests/_internal_utils.py
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
2
303,773
11
9
5
45
3
0
13
49
_clean_up_listener
Add schedule helper (#76566) Co-authored-by: Paulus Schoutsen <balloob@gmail.com>
https://github.com/home-assistant/core.git
def _clean_up_listener(self) -> None: if self._unsub_update is not None: self._unsub_update() self._unsub_update = None
26
__init__.py
Python
homeassistant/components/schedule/__init__.py
f0827a20c3c0014de7e28dbeba76fc3f2e74fc70
core
2
245,544
35
12
14
118
10
0
46
113
update_data_root
[Fix] replace mmcv's function and modules imported with mmengine's (#8594) * use mmengine's load_state_dict and load_checkpoint * from mmengine import dump * from mmengine import FileClient dump list_from_file * remove redundant registry * update * update * update * replace _load_checkpoint with CheckpointLoad.load_checkpoint * changes according to mmcv #2216 * changes due to mmengine #447 * changes due mmengine #447 and mmcv #2217 * changes due mmengine #447 and mmcv #2217 * update * update * update
https://github.com/open-mmlab/mmdetection.git
def update_data_root(cfg, logger=None): assert isinstance(cfg, Config), \ f'cfg got wrong type: {type(cfg)}, expected mmengine.Config' if 'MMDET_DATASETS' in os.environ: dst_root = os.environ['MMDET_DATASETS'] print_log(f'MMDET_DATASETS has been set to be {dst_root}.' f'Using {dst_root} as data root.') else: return assert isinstance(cfg, Config), \ f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
76
misc.py
Python
mmdet/utils/misc.py
d0695e68654ca242be54e655491aef8c959ac345
mmdetection
2
287,795
76
22
131
795
31
0
184
2,873
test_ryse_smart_bridge_four_shades_setup
Handle battery services that only report low battery in HomeKit Controller (#79072)
https://github.com/home-assistant/core.git
async def test_ryse_smart_bridge_four_shades_setup(hass): accessories = await setup_accessories_from_file( hass, "ryse_smart_bridge_four_shades.json" ) await setup_test_accessories(hass, accessories) await assert_devices_and_entities_created( hass, DeviceTestInfo( unique_id=HUB_TEST_ACCESSORY_ID, name="RYSE SmartBridge", model="RYSE SmartBridge", manufacturer="RYSE Inc.", sw_version="1.3.0", hw_version="0401.3521.0679", devices=[ DeviceTestInfo( unique_id="00:00:00:00:00:00:aid:2", name="LR Left", model="RYSE Shade", manufacturer="RYSE Inc.", sw_version="3.0.8", hw_version="1.0.0", serial_number="", devices=[], entities=[ EntityTestInfo( entity_id="cover.lr_left_ryse_shade", friendly_name="LR Left RYSE Shade", unique_id="homekit-00:00:00:00:00:00-2-48", supported_features=RYSE_SUPPORTED_FEATURES, state="closed", ), EntityTestInfo( entity_id="sensor.lr_left_ryse_shade_battery", friendly_name="LR Left RYSE Shade Battery", entity_category=EntityCategory.DIAGNOSTIC, capabilities={"state_class": SensorStateClass.MEASUREMENT}, unique_id="homekit-00:00:00:00:00:00-2-64", unit_of_measurement=PERCENTAGE, state="89", ), ], ), DeviceTestInfo( unique_id="00:00:00:00:00:00:aid:3", name="LR Right", model="RYSE Shade", manufacturer="RYSE Inc.", sw_version="3.0.8", hw_version="1.0.0", serial_number="", devices=[], entities=[ EntityTestInfo( entity_id="cover.lr_right_ryse_shade", friendly_name="LR Right RYSE Shade", unique_id="homekit-00:00:00:00:00:00-3-48", supported_features=RYSE_SUPPORTED_FEATURES, state="closed", ), EntityTestInfo( entity_id="sensor.lr_right_ryse_shade_battery", friendly_name="LR Right RYSE Shade Battery", entity_category=EntityCategory.DIAGNOSTIC, capabilities={"state_class": SensorStateClass.MEASUREMENT}, unique_id="homekit-00:00:00:00:00:00-3-64", unit_of_measurement=PERCENTAGE, state="100", ), ], ), DeviceTestInfo( unique_id="00:00:00:00:00:00:aid:4", name="BR Left", model="RYSE Shade", manufacturer="RYSE Inc.", sw_version="3.0.8", hw_version="1.0.0", serial_number="", devices=[], entities=[ EntityTestInfo( entity_id="cover.br_left_ryse_shade", friendly_name="BR Left RYSE Shade", unique_id="homekit-00:00:00:00:00:00-4-48", supported_features=RYSE_SUPPORTED_FEATURES, state="open", ), EntityTestInfo( entity_id="sensor.br_left_ryse_shade_battery", friendly_name="BR Left RYSE Shade Battery", entity_category=EntityCategory.DIAGNOSTIC, capabilities={"state_class": SensorStateClass.MEASUREMENT}, unique_id="homekit-00:00:00:00:00:00-4-64", unit_of_measurement=PERCENTAGE, state="100", ), ], ), DeviceTestInfo( unique_id="00:00:00:00:00:00:aid:5", name="RZSS", model="RYSE Shade", manufacturer="RYSE Inc.", sw_version="3.0.8", hw_version="1.0.0", serial_number="", devices=[], entities=[ EntityTestInfo( entity_id="cover.rzss_ryse_shade", friendly_name="RZSS RYSE Shade", unique_id="homekit-00:00:00:00:00:00-5-48", supported_features=RYSE_SUPPORTED_FEATURES, state="open", ), EntityTestInfo( entity_id="sensor.rzss_ryse_shade_battery", entity_category=EntityCategory.DIAGNOSTIC, capabilities={"state_class": SensorStateClass.MEASUREMENT}, friendly_name="RZSS RYSE Shade Battery", unique_id="homekit-00:00:00:00:00:00-5-64", unit_of_measurement=PERCENTAGE, state="0", ), ], ), ], entities=[], ), )
490
test_ryse_smart_bridge.py
Python
tests/components/homekit_controller/specific_devices/test_ryse_smart_bridge.py
917cf674de2db2216681dfec3ef9d63df573ace8
core
1
293,754
54
13
14
127
18
0
70
311
to_native
Separate attrs into another table (reduces database size) (#68224)
https://github.com/home-assistant/core.git
def to_native(self, validate_entity_id=True): try: return State( self.entity_id, self.state, # Join the state_attributes table on attributes_id to get the attributes # for newer states json.loads(self.attributes) if self.attributes else {}, process_timestamp(self.last_changed), process_timestamp(self.last_updated), # Join the events table on event_id to get the context instead # as it will always be there for state_changed events context=Context(id=None), validate_entity_id=validate_entity_id, ) except ValueError: # When json.loads fails _LOGGER.exception("Error converting row to state: %s", self) return None
80
models.py
Python
homeassistant/components/recorder/models.py
9215702388eef03c7c3ed9f756ea0db533d5beec
core
3
224,316
17
13
10
120
17
0
17
115
test_load_missing_required
Format code with `black -l100 --skip-string-normalization`
https://github.com/mkdocs/mkdocs.git
def test_load_missing_required(self): config_file = tempfile.NamedTemporaryFile('w', delete=False) try: config_file.write("site_dir: output\nsite_uri: https://www.mkdocs.org\n") config_file.flush() config_file.close() with self.assertRaises(exceptions.Abort): base.load_config(config_file=config_file.name) finally: os.remove(config_file.name)
66
base_tests.py
Python
mkdocs/tests/config/base_tests.py
dca7cbb43fcd6ea7c677c98ba585395b070d387b
mkdocs
2
127,791
21
10
8
105
15
0
21
92
_create_default_prometheus_configs
Export default configurations for grafana and prometheus (#28286)
https://github.com/ray-project/ray.git
def _create_default_prometheus_configs(self): prometheus_config_output_path = os.path.join( self.metrics_root, "prometheus", "prometheus.yml" ) # Copy default prometheus configurations if os.path.exists(prometheus_config_output_path): os.remove(prometheus_config_output_path) os.makedirs(os.path.dirname(prometheus_config_output_path), exist_ok=True) shutil.copy(PROMETHEUS_CONFIG_INPUT_PATH, prometheus_config_output_path)
63
metrics_head.py
Python
dashboard/modules/metrics/metrics_head.py
42da4445e7a3cb358a1a02ae433a004e9fa836b5
ray
2
259,014
58
14
19
246
24
0
81
166
calinski_harabasz_score
FIX Calinski and Harabasz score description (#22605)
https://github.com/scikit-learn/scikit-learn.git
def calinski_harabasz_score(X, labels): X, labels = check_X_y(X, labels) le = LabelEncoder() labels = le.fit_transform(labels) n_samples, _ = X.shape n_labels = len(le.classes_) check_number_of_labels(n_labels, n_samples) extra_disp, intra_disp = 0.0, 0.0 mean = np.mean(X, axis=0) for k in range(n_labels): cluster_k = X[labels == k] mean_k = np.mean(cluster_k, axis=0) extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2) intra_disp += np.sum((cluster_k - mean_k) ** 2) return ( 1.0 if intra_disp == 0.0 else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.0)) )
168
_unsupervised.py
Python
sklearn/metrics/cluster/_unsupervised.py
d548c77980c4a633780cee3671e54ecd2f8cecb4
scikit-learn
3
139,696
36
12
30
154
3
0
56
408
get_valid_runtime_envs
[Serve] Add deployment graph `import_path` and `runtime_env` to `ServeApplicationSchema` (#24814) A newly planned version of the Serve schema (used in the REST API and CLI) requires the user to pass in their deployment graph's`import_path` and optionally a runtime_env containing that graph. This new schema can then pick up any `init_args` and `init_kwargs` values directly from the graph, instead of requiring them to be serialized and passed explicitly into the REST request. This change: * Adds the `import_path` and `runtime_env` fields to the `ServeApplicationSchema`. * Updates or disables outdated unit tests. Follow-up changes should: * Update the status schemas (i.e. `DeploymentStatusSchema` and `ServeApplicationStatusSchema`). * Remove deployment-level `import_path`s. * Process the new `import_path` and `runtime_env` fields instead of silently ignoring them. * Remove `init_args` and `init_kwargs` from `DeploymentSchema` afterwards. Co-authored-by: Edward Oakes <ed.nmi.oakes@gmail.com>
https://github.com/ray-project/ray.git
def get_valid_runtime_envs() -> List[Dict]: return [ # Empty runtime_env {}, # Runtime_env with remote_URIs { "working_dir": ( "https://github.com/shrekris-anyscale/test_module/archive/HEAD.zip" ), "py_modules": [ ( "https://github.com/shrekris-anyscale/" "test_deploy_group/archive/HEAD.zip" ), ], }, # Runtime_env with extra options { "working_dir": ( "https://github.com/shrekris-anyscale/test_module/archive/HEAD.zip" ), "py_modules": [ ( "https://github.com/shrekris-anyscale/" "test_deploy_group/archive/HEAD.zip" ), ], "pip": ["pandas", "numpy"], "env_vars": {"OMP_NUM_THREADS": "32", "EXAMPLE_VAR": "hello"}, "excludes": "imaginary_file.txt", }, ]
78
test_schema.py
Python
python/ray/serve/tests/test_schema.py
3a2bd16ecae15d6e26585c32c113dcfe7469ccd7
ray
1
42,732
17
10
15
192
13
0
27
60
create_directories_and_files
Replace generation of docker volumes to be done from python (#23985) The pre-commit to generate docker volumes in docker compose file is now written in Python and it also uses the newer "volume:" syntax to define the volumes mounted in the docker-compose.
https://github.com/apache/airflow.git
def create_directories_and_files() -> None: BUILD_CACHE_DIR.mkdir(parents=True, exist_ok=True) FILES_DIR.mkdir(parents=True, exist_ok=True) MSSQL_DATA_VOLUME.mkdir(parents=True, exist_ok=True) KUBE_DIR.mkdir(parents=True, exist_ok=True) LOGS_DIR.mkdir(parents=True, exist_ok=True) DIST_DIR.mkdir(parents=True, exist_ok=True) OUTPUT_LOG.mkdir(parents=True, exist_ok=True) (AIRFLOW_SOURCES_ROOT / ".bash_aliases").touch() (AIRFLOW_SOURCES_ROOT / ".bash_history").touch() (AIRFLOW_SOURCES_ROOT / ".inputrc").touch()
118
path_utils.py
Python
dev/breeze/src/airflow_breeze/utils/path_utils.py
882535a8a2699af7d1d079ecebd8c31aa7fbaba9
airflow
1
109,545
22
10
11
108
4
0
45
139
_equal_aspect_axis_indices
Provide `adjustable='box'` to 3D axes aspect ratio setting (#23552) * Provided `adjustable='box'` option to set 3D aspect ratio. * "What's New": `adjustable` argument of 3D plots aspect ratio.
https://github.com/matplotlib/matplotlib.git
def _equal_aspect_axis_indices(self, aspect): ax_indices = [] # aspect == 'auto' if aspect == 'equal': ax_indices = [0, 1, 2] elif aspect == 'equalxy': ax_indices = [0, 1] elif aspect == 'equalxz': ax_indices = [0, 2] elif aspect == 'equalyz': ax_indices = [1, 2] return ax_indices
64
axes3d.py
Python
lib/mpl_toolkits/mplot3d/axes3d.py
7c6a74c47accdfb8d66e526cbd0b63c29ffede12
matplotlib
5
45,599
133
17
54
595
49
0
185
744
clear_not_launched_queued_tasks
Add map_index to pods launched by KubernetesExecutor (#21871) I also did a slight drive-by-refactor (sorry!) to rename `queued_tasks and `task` inside `clear_not_launched_queued_tasks` to `queued_tis` and `ti` to reflect what they are.
https://github.com/apache/airflow.git
def clear_not_launched_queued_tasks(self, session=None) -> None: self.log.debug("Clearing tasks that have not been launched") if not self.kube_client: raise AirflowException(NOT_STARTED_MESSAGE) queued_tis: List[TaskInstance] = ( session.query(TaskInstance).filter(TaskInstance.state == State.QUEUED).all() ) self.log.info('Found %s queued task instances', len(queued_tis)) # Go through the "last seen" dictionary and clean out old entries allowed_age = self.kube_config.worker_pods_queued_check_interval * 3 for key, timestamp in list(self.last_handled.items()): if time.time() - timestamp > allowed_age: del self.last_handled[key] for ti in queued_tis: self.log.debug("Checking task instance %s", ti) # Check to see if we've handled it ourselves recently if ti.key in self.last_handled: continue # Build the pod selector base_label_selector = ( f"dag_id={pod_generator.make_safe_label_value(ti.dag_id)}," f"task_id={pod_generator.make_safe_label_value(ti.task_id)}," f"airflow-worker={pod_generator.make_safe_label_value(str(ti.queued_by_job_id))}" ) if ti.map_index >= 0: # Old tasks _couldn't_ be mapped, so we don't have to worry about compat base_label_selector += f',map_index={ti.map_index}' kwargs = dict(label_selector=base_label_selector) if self.kube_config.kube_client_request_args: kwargs.update(**self.kube_config.kube_client_request_args) # Try run_id first kwargs['label_selector'] += ',run_id=' + pod_generator.make_safe_label_value(ti.run_id) pod_list = self.kube_client.list_namespaced_pod(self.kube_config.kube_namespace, **kwargs) if pod_list.items: continue # Fallback to old style of using execution_date kwargs['label_selector'] = ( f'{base_label_selector},' f'execution_date={pod_generator.datetime_to_label_safe_datestring(ti.execution_date)}' ) pod_list = self.kube_client.list_namespaced_pod(self.kube_config.kube_namespace, **kwargs) if pod_list.items: continue self.log.info('TaskInstance: %s found in queued state but was not launched, rescheduling', ti) session.query(TaskInstance).filter( TaskInstance.dag_id == ti.dag_id, TaskInstance.task_id == ti.task_id, TaskInstance.run_id == ti.run_id, ).update({TaskInstance.state: State.SCHEDULED})
318
kubernetes_executor.py
Python
airflow/executors/kubernetes_executor.py
ac77c89018604a96ea4f5fba938f2fbd7c582793
airflow
10
133,790
168
12
34
306
7
0
264
743
validate_config
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def validate_config(self, config): # Call (base) PPO's config validation function first. # Note that this will not touch or check on the train_batch_size=-1 # setting. super().validate_config(config) # Error if run on Win. if sys.platform in ["win32", "cygwin"]: raise ValueError( "DD-PPO not supported on Win yet! " "Due to usage of torch.distributed." ) # Auto-train_batch_size: Calculate from rollout len and # envs-per-worker. if config["train_batch_size"] == -1: config["train_batch_size"] = ( config["rollout_fragment_length"] * config["num_envs_per_worker"] ) # Users should not define `train_batch_size` directly (always -1). else: raise ValueError( "Set rollout_fragment_length instead of train_batch_size " "for DDPPO." ) # Only supported for PyTorch so far. if config["framework"] != "torch": raise ValueError("Distributed data parallel is only supported for PyTorch") if config["torch_distributed_backend"] not in ("gloo", "mpi", "nccl"): raise ValueError( "Only gloo, mpi, or nccl is supported for " "the backend of PyTorch distributed." ) # `num_gpus` must be 0/None, since all optimization happens on Workers. if config["num_gpus"]: raise ValueError( "When using distributed data parallel, you should set " "num_gpus=0 since all optimization " "is happening on workers. Enable GPUs for workers by setting " "num_gpus_per_worker=1." ) # `batch_mode` must be "truncate_episodes". if config["batch_mode"] != "truncate_episodes": raise ValueError( "Distributed data parallel requires truncate_episodes " "batch mode." ) # DDPPO doesn't support KL penalties like PPO-1. # In order to support KL penalties, DDPPO would need to become # undecentralized, which defeats the purpose of the algorithm. # Users can still tune the entropy coefficient to control the # policy entropy (similar to controlling the KL penalty). if config["kl_coeff"] != 0.0 or config["kl_target"] != 0.0: raise ValueError("DDPPO doesn't support KL penalties like PPO-1")
152
ddppo.py
Python
rllib/agents/ppo/ddppo.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
9
331,644
11
10
4
43
5
0
12
28
get_pretrained_cfg_value
Transitioning default_cfg -> pretrained_cfg. Improving handling of pretrained_cfg source (HF-Hub, files, timm config, etc). Checkpoint handling tweaks.
https://github.com/huggingface/pytorch-image-models.git
def get_pretrained_cfg_value(model_name, cfg_key): if model_name in _model_pretrained_cfgs: return _model_pretrained_cfgs[model_name].get(cfg_key, None) return None
27
registry.py
Python
timm/models/registry.py
abc9ba254430ef971ea3dbd12f2b4f1969da55be
pytorch-image-models
2
158,113
18
15
4
86
11
0
19
35
split_batch_multi_inputs
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title โ€™15.2. ๆƒ…ๆ„Ÿๅˆ†ๆž๏ผšไฝฟ็”จ้€’ๅฝ’็ฅž็ป็ฝ‘็ปœโ€˜ to โ€™15.2. ๆƒ…ๆ„Ÿๅˆ†ๆž๏ผšไฝฟ็”จๅพช็Žฏ็ฅž็ป็ฝ‘็ปœโ€˜ * ไฟฎๆ”น้ƒจๅˆ†่ฏญไน‰่กจ่ฟฐ (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94่กŒtypo: ๏ผˆโ€œbert.mallโ€๏ผ‰->๏ผˆโ€œbert.smallโ€๏ผ‰ (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) ไฟฎๆ”น้ƒจๅˆ†่ฏญไน‰่กจ่ฟฐ * Update self-attention-and-positional-encoding.md (#1133) ไพ็…งๆœฌไนฆ็š„็ฟป่ฏ‘ไน ๆƒฏ๏ผŒๅฐ†pooling็ฟป่ฏ‘ๆˆๆฑ‡่š * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) ไธ€ไธช็ฌ”่ฏฏ # ๅ‡่ฎพbatch_size=2๏ผŒnum_pred_positions=3 # ้‚ฃไนˆbatch_idxๅบ”่ฏฅๆ˜ฏnp.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <haizhou.sun@smartmore.com> * Update weight-decay.md (#1150) * Update weight-decay.md ๅ…ณไบŽโ€œkๅคš้€‰dโ€่ฟ™ไธ€้ƒจๅˆ†๏ผŒไธญๆ–‡่ฏป่€…ไฝฟ็”จๆŽ’ๅˆ—็ป„ๅˆ็š„ๆ–นๅผๅฏ่ƒฝๆ›ดๅฎนๆ˜“็†่งฃ ๅ…ณไบŽโ€œ็ป™ๅฎškไธชๅ˜้‡๏ผŒ้˜ถๆ•ฐ็š„ไธชๆ•ฐไธบ...โ€่ฟ™ๅฅ่ฏๆ˜ฏๆœ‰ๆญงไน‰็š„๏ผŒไธๆ˜ฏๅพˆๅƒไธญๅ›ฝ่ฏ๏ผŒๅบ”่ฏฅๆ˜ฏ่ฏดโ€œ้˜ถๆ•ฐไธบd็š„้กน็š„ไธชๆ•ฐไธบ...โ€ใ€‚ ๅนถๅขžๅŠ ไบ†ไธ€ๅฅๅฏนโ€œๅ› ๆญคๅณไฝฟๆ˜ฏ้˜ถๆ•ฐไธŠ็š„ๅพฎๅฐๅ˜ๅŒ–๏ผŒๆฏ”ๅฆ‚ไปŽ$2$ๅˆฐ$3$๏ผŒไนŸไผšๆ˜พ่‘—ๅขžๅŠ ๆˆ‘ไปฌๆจกๅž‹็š„ๅคๆ‚ๆ€งใ€‚โ€็š„่งฃ้‡Š ่งฃ้‡Šไธบไฝ•ไผšๅขžๅŠ ๅคๆ‚ๆ€งไปฅๅŠไธบไฝ•้œ€่ฆ็ป†็ฒ’ๅบฆๅทฅๅ…ทใ€‚ * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. ็ฟป่ฏ‘้”™่ฏฏ * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md ่ฏญๅฅไธ้€š้กบ * Update environment.md ่ฏญๅบๅผ‚ๅธธ * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that ๅŽŸ่ฏ‘ๆ–‡ๆŠŠwhoไนŸ็›ดๆŽฅ็ฟป่ฏ‘ๅ‡บๆฅไบ†ใ€‚ * Update mlp.md (#1117) * Update mlp.md ไฟฎๆ”น้ƒจๅˆ†่ฏญไน‰่กจ่ฟฐ * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <ubuntu@ip-172-31-12-66.us-west-2.compute.internal> Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: Aston Zhang <asv325@gmail.com> * ้‡ๅค่ฏญๅฅ (#1188) Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <haizhou.sun@smartmore.com> Co-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com> Co-authored-by: Xinwei Liu <xinzone@outlook.com> Co-authored-by: Anirudh Dagar <anirudhdagar6@gmail.com> Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com> Co-authored-by: gyroๆฐธไธๆŠฝ้ฃŽ <1247006353@qq.com> Co-authored-by: CanChengZheng <zcc550169544@163.com> Co-authored-by: linlin <jajupmochi@gmail.com> Co-authored-by: iuk <liukun0104@gmail.com> Co-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com> Co-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com> Co-authored-by: Chiyuan Fu <fuchiyuan2019@outlook.com> Co-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com> Co-authored-by: Haiker Sun <haizhou.uestc2011@gmail.com> Co-authored-by: Ming Liu <akira.liu@njnu.edu.cn> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: silenceZheng66 <13754430639@163.com> Co-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com> Co-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com> Co-authored-by: Krahets <krahets@163.com> Co-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com> Co-authored-by: Jameson <miraclecome@gmail.com> Co-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com> Co-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com> Co-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com> Co-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com> Co-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com> Co-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com> Co-authored-by: VigourJiang <jiangfuqiang154@163.com> Co-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com> Co-authored-by: LYF <27893441+liyufan@users.noreply.github.com> Co-authored-by: Aston Zhang <asv325@gmail.com> Co-authored-by: xiaotinghe <xiaotih@amazon.com> Co-authored-by: Ubuntu <ubuntu@ip-172-31-12-66.us-west-2.compute.internal> Co-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com> Co-authored-by: HinGwenWoong <peterhuang0323@qq.com> Co-authored-by: Shuai Zhang <cheungdaven@gmail.com>
https://github.com/d2l-ai/d2l-zh.git
def split_batch_multi_inputs(X, y, devices): X = list(zip(*[gluon.utils.split_and_load( feature, devices, even_split=False) for feature in X])) return (X, gluon.utils.split_and_load(y, devices, even_split=False))
58
mxnet.py
Python
d2l/mxnet.py
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
d2l-zh
2
84,208
45
13
30
178
23
0
52
114
clean_archived_data
retention: Add docstring info on how archive cleaning works. In particular, it's important to record the special treatment around ArchivedAttachment rows not being deleted in this step.
https://github.com/zulip/zulip.git
def clean_archived_data() -> None: logger.info("Cleaning old archive data.") check_date = timezone_now() - timedelta(days=settings.ARCHIVED_DATA_VACUUMING_DELAY_DAYS) # Associated archived objects will get deleted through the on_delete=CASCADE property: count = 0 transaction_ids = list( ArchiveTransaction.objects.filter(timestamp__lt=check_date).values_list("id", flat=True) ) while len(transaction_ids) > 0: transaction_block = transaction_ids[0:TRANSACTION_DELETION_BATCH_SIZE] transaction_ids = transaction_ids[TRANSACTION_DELETION_BATCH_SIZE:] ArchiveTransaction.objects.filter(id__in=transaction_block).delete() count += len(transaction_block) logger.info("Deleted %s old ArchiveTransactions.", count)
105
retention.py
Python
zerver/lib/retention.py
acfa55138ee2e5f43a0a96614aa0581b115fc714
zulip
2
250,533
33
9
44
162
20
0
47
211
test_get_multiple_keys_from_perspectives
Add missing type hints to tests. (#14687) Adds type hints to tests.metrics and tests.crypto.
https://github.com/matrix-org/synapse.git
def test_get_multiple_keys_from_perspectives(self) -> None: fetcher = PerspectivesKeyFetcher(self.hs) SERVER_NAME = "server2" testkey1 = signedjson.key.generate_signing_key("ver1") testverifykey1 = signedjson.key.get_verify_key(testkey1) testverifykey1_id = "ed25519:ver1" testkey2 = signedjson.key.generate_signing_key("ver2") testverifykey2 = signedjson.key.get_verify_key(testkey2) testverifykey2_id = "ed25519:ver2" VALID_UNTIL_TS = 200 * 1000 response1 = self.build_perspectives_response( SERVER_NAME, testkey1, VALID_UNTIL_TS, ) response2 = self.build_perspectives_response( SERVER_NAME, testkey2, VALID_UNTIL_TS, )
292
test_keyring.py
Python
tests/crypto/test_keyring.py
a4ca770655a6b067468de3d507292ec133fdc5ca
synapse
1
156,160
6
8
2
31
4
0
6
12
sample
Bag: add implementation for reservoir sampling (#7068) (#7636) - Implement the [L algorithm](https://en.wikipedia.org/wiki/Reservoir_sampling#An_optimal_algorithm) for reservoir sampling without replacement. - Use the **k** reservoir of size 1 strategy for sampling with replacement (see [reference](http://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf)) of **k** items
https://github.com/dask/dask.git
def sample(population, k): return _sample(population=population, k=k)
19
random.py
Python
dask/bag/random.py
4e5dfe7463028a39a90e026c7fb9220969093ab3
dask
1
292,851
10
9
8
35
5
0
10
24
available
Fix powerwall data incompatibility with energy integration (#67245)
https://github.com/home-assistant/core.git
def available(self) -> bool: return super().available and self.native_value != 0
20
sensor.py
Python
homeassistant/components/powerwall/sensor.py
3f16c6d6efad20b60a4a8d2114a0905ecd252820
core
2
298,591
24
9
14
170
18
0
43
85
test_restore_state_uncoherence_case
Use climate enums in generic_thermostat (#70656) * Use climate enums in generic_thermostat * Adjust tests
https://github.com/home-assistant/core.git
async def test_restore_state_uncoherence_case(hass): _mock_restore_cache(hass, temperature=20) calls = _setup_switch(hass, False) _setup_sensor(hass, 15) await _setup_climate(hass) await hass.async_block_till_done() state = hass.states.get(ENTITY) assert state.attributes[ATTR_TEMPERATURE] == 20 assert state.state == HVACMode.OFF assert len(calls) == 0 calls = _setup_switch(hass, False) await hass.async_block_till_done() state = hass.states.get(ENTITY) assert state.state == HVACMode.OFF
105
test_climate.py
Python
tests/components/generic_thermostat/test_climate.py
b81f8e75eea3d1aaa8111f542519de1d58093200
core
1
269,571
17
14
8
84
10
1
21
67
int_shape
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def int_shape(x): try: shape = x.shape if not isinstance(shape, tuple): shape = tuple(shape.as_list()) return shape except ValueError: return None @keras_export("keras.backend.ndim") @doc_controls.do_not_generate_docs
@keras_export("keras.backend.ndim") @doc_controls.do_not_generate_docs
39
backend.py
Python
keras/backend.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
3
247,935
31
8
11
126
15
0
39
116
test_linearizer
Convert `Linearizer` tests from `inlineCallbacks` to async (#12353) Signed-off-by: Sean Quah <seanq@element.io>
https://github.com/matrix-org/synapse.git
def test_linearizer(self) -> None: linearizer = Linearizer() key = object() _, acquired_d1, unblock1 = self._start_task(linearizer, key) self.assertTrue(acquired_d1.called) _, acquired_d2, unblock2 = self._start_task(linearizer, key) self.assertFalse(acquired_d2.called) # Once the first task is done, the second task can continue. unblock1() self.assertTrue(acquired_d2.called) unblock2()
76
test_linearizer.py
Python
tests/util/test_linearizer.py
41b5f72677ea9763f3cf920d4f6df507653222f2
synapse
1
8,639
17
12
5
89
15
0
19
54
test_window_autosizing_disabled
Enable dataset window autosizing (#2721) * set windowed shuffle for large datasets * documentation * update to automatic windowing flag * address reviews * address reviews * update logging info and add auto_window flag passthrough * update tests to use flag passthrough * more descriptive test class name * todo to add link to windowing docs * local test handling for dask import * handle RayDataset import in local tests * bad type annotation * bad type annotation
https://github.com/ludwig-ai/ludwig.git
def test_window_autosizing_disabled(self, ray_cluster_small_object_store): ds = self.create_dataset(self.object_store_size * 8, auto_window=False) pipe = ds.pipeline() rep = next(iter(pipe._base_iterable))() assert rep.num_blocks() == self.num_partitions
54
test_ray.py
Python
tests/integration_tests/test_ray.py
0d19a48cff0958ed77926a0712cbdb6485d4034a
ludwig
1
186,362
47
10
7
111
10
0
54
125
pick_apache_config
Various clean-ups in certbot-apache. Use f-strings. (#9132) * Various clean-ups in certbot-apache. Use f-strings. * Smaller tweaks
https://github.com/certbot/certbot.git
def pick_apache_config(self, warn_on_no_mod_ssl=True): # Disabling TLS session tickets is supported by Apache 2.4.11+ and OpenSSL 1.0.2l+. # So for old versions of Apache we pick a configuration without this option. min_openssl_version = util.parse_loose_version('1.0.2l') openssl_version = self.openssl_version(warn_on_no_mod_ssl) if self.version < (2, 4, 11) or not openssl_version or \ util.parse_loose_version(openssl_version) < min_openssl_version: return apache_util.find_ssl_apache_conf("old") return apache_util.find_ssl_apache_conf("current")
66
configurator.py
Python
certbot-apache/certbot_apache/_internal/configurator.py
eeca208c8f57304590ac1af80b496e61021aaa45
certbot
4
9,880
5
6
8
20
2
0
5
19
start
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <bo.wang@jina.ai> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <tobias.jacobowitz@posteo.de> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <joan.martinez@jina.ai> Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Deepankar Mahapatro <deepankar.mahapatro@jina.ai> Co-authored-by: bwanglzu <bo.wang@jina.ai> Co-authored-by: AlaeddineAbdessalem <alaeddine-13@live.fr> Co-authored-by: Zhaofeng Miao <522856232@qq.com>
https://github.com/jina-ai/jina.git
def start(self) -> 'BasePod': ...
9
__init__.py
Python
jina/peapods/pods/__init__.py
933415bfa1f9eb89f935037014dfed816eb9815d
jina
1
104,813
12
13
6
89
12
0
13
39
xbasename
Add support for metadata files to `imagefolder` (#4069) * Add support for metadata files to `imagefolder` * Fix imagefolder drop_labels test * Replace csv with jsonl * Add test * Correct resolution for nested metadata files * Allow None as JSON Lines value * Add comments * Count path segments * Address comments * Improve test * Update src/datasets/packaged_modules/imagefolder/imagefolder.py Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> * test e2e imagefolder with metadata * add test for zip archives * fix test * add some debug logging to know which files are ignored * add test for bad/malformed metadata file * revert use of posix path to fix windows tests * style * Refactor tests for packaged modules Text and Csv Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com>
https://github.com/huggingface/datasets.git
def xbasename(a): a, *b = str(a).split("::") if is_local_path(a): return os.path.basename(Path(a).as_posix()) else: return posixpath.basename(a)
51
streaming_download_manager.py
Python
src/datasets/utils/streaming_download_manager.py
7017b0965f0a0cae603e7143de242c3425ecef91
datasets
2
142,253
86
10
16
182
20
0
120
305
__call__
[air] Consolidate Tune and Train report (#25558) Consolidate tune/train report/checkpoint functionality by working with a unified Session interface. The goal of this PR is to establish a solid Session and Session.report path. In favor of having less merging conflict (as other folks are doing the whole package renaming) and control the scope of this PR, I have intentionally left out some migration. More PRs to follow. Feel free to comment on the ideal final state. To give an idea of the final directory structure. This is a for 2-worker DP training. ``` โ”œโ”€โ”€ TensorflowTrainer_ce44d_00000_0_2022-06-15_14-40-42 โ”‚ย ย  โ”œโ”€โ”€ checkpoint_000000 โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ _current_checkpoint_id.meta.pkl โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ _preprocessor.meta.pkl โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ _timestamp.meta.pkl โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ assets โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ keras_metadata.pb โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ saved_model.pb โ”‚ย ย  โ”‚ย ย  โ””โ”€โ”€ variables โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ variables.data-00000-of-00001 โ”‚ย ย  โ”‚ย ย  โ””โ”€โ”€ variables.index โ”‚ย ย  โ”œโ”€โ”€ events.out.tfevents.1655329242.xw โ”‚ย ย  โ”œโ”€โ”€ params.json โ”‚ย ย  โ”œโ”€โ”€ params.pkl โ”‚ย ย  โ”œโ”€โ”€ progress.csv โ”‚ย ย  โ”œโ”€โ”€ rank_0 โ”‚ย ย  โ”‚ย ย  โ””โ”€โ”€ my_model โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ assets โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ keras_metadata.pb โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ saved_model.pb โ”‚ย ย  โ”‚ย ย  โ””โ”€โ”€ variables โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ variables.data-00000-of-00001 โ”‚ย ย  โ”‚ย ย  โ””โ”€โ”€ variables.index โ”‚ย ย  โ”œโ”€โ”€ rank_1 โ”‚ย ย  โ”‚ย ย  โ””โ”€โ”€ my_model โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ assets โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ keras_metadata.pb โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ saved_model.pb โ”‚ย ย  โ”‚ย ย  โ””โ”€โ”€ variables โ”‚ย ย  โ”‚ย ย  โ”œโ”€โ”€ variables.data-00000-of-00001 โ”‚ย ย  โ”‚ย ย  โ””โ”€โ”€ variables.index โ”‚ย ย  โ””โ”€โ”€ result.json โ”œโ”€โ”€ basic-variant-state-2022-06-15_14-40-42.json โ”œโ”€โ”€ experiment_state-2022-06-15_14-40-42.json โ”œโ”€โ”€ trainable.pkl โ””โ”€โ”€ tuner.pkl ``` Update: 1. Updated a few classes to be backward compatible - while legacy ray train deprecation is ongoing. 2. Marked all places in 1 using "# TODO(xwjiang): Legacy Ray Train trainer clean up!". So we can easily clean those up once Antoni's work is landed. 3. All CI and release tests are passing. Co-authored-by: Eric Liang <ekhliang@gmail.com>
https://github.com/ray-project/ray.git
def __call__(self, _metric=None, **kwargs): assert self._last_report_time is not None, ( "_StatusReporter._start() must be called before the first " "report __call__ is made to ensure correct runtime metrics." ) if _metric: kwargs[DEFAULT_METRIC] = _metric # time per iteration is recorded directly in the reporter to ensure # any delays in logging results aren't counted report_time = time.time() if TIME_THIS_ITER_S not in kwargs: kwargs[TIME_THIS_ITER_S] = report_time - self._last_report_time self._last_report_time = report_time # add results to a thread-safe queue self._queue.put(kwargs.copy(), block=True) # This blocks until notification from the FunctionRunner that the last # result has been returned to Tune and that the function is safe to # resume training. self._continue_semaphore.acquire() # If the trial should be terminated, exit gracefully. if self._end_event.is_set(): self._end_event.clear() sys.exit(0)
107
function_runner.py
Python
python/ray/tune/function_runner.py
97f42425dacc914fc90059a010f5a02a5ab3b8c7
ray
4
100,318
52
16
15
217
23
0
76
302
get_loss_keys
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
https://github.com/deepfakes/faceswap.git
def get_loss_keys(self, session_id): if get_backend() == "amd": # We can't log the graph in Tensorboard logs for AMD so need to obtain from state file loss_keys = {int(sess_id): [name for name in session["loss_names"] if name != "total"] for sess_id, session in self._state["sessions"].items()} else: loss_keys = {sess_id: list(logs.keys()) for sess_id, logs in self._tb_logs.get_loss(session_id=session_id).items()} if session_id is None: retval = list(set(loss_key for session in loss_keys.values() for loss_key in session)) else: retval = loss_keys.get(session_id) return retval _SESSION = GlobalSession()
126
stats.py
Python
lib/gui/analysis/stats.py
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
9
249,541
21
11
10
95
12
0
25
103
test_get_insertion_event_backward_extremities_in_room
Only try to backfill event if we haven't tried before recently (#13635) Only try to backfill event if we haven't tried before recently (exponential backoff). No need to keep trying the same backfill point that fails over and over. Fix https://github.com/matrix-org/synapse/issues/13622 Fix https://github.com/matrix-org/synapse/issues/8451 Follow-up to https://github.com/matrix-org/synapse/pull/13589 Part of https://github.com/matrix-org/synapse/issues/13356
https://github.com/matrix-org/synapse.git
def test_get_insertion_event_backward_extremities_in_room(self): setup_info = self._setup_room_for_insertion_backfill_tests() room_id = setup_info.room_id backfill_points = self.get_success( self.store.get_insertion_event_backward_extremities_in_room(room_id) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, ["insertion_eventB", "insertion_eventA"] )
57
test_event_federation.py
Python
tests/storage/test_event_federation.py
ac1a31740b6d0dfda4d57a25762aaddfde981caf
synapse
2
274,643
30
13
10
115
13
0
33
147
merge_state
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def merge_state(self, metrics): assign_add_ops = [] for metric in metrics: if len(self.weights) != len(metric.weights): raise ValueError( f"Metric {metric} is not compatible with {self}" ) for weight, weight_to_add in zip(self.weights, metric.weights): assign_add_ops.append(weight.assign_add(weight_to_add)) return assign_add_ops
67
base_metric.py
Python
keras/metrics/base_metric.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
4
258,495
61
13
20
298
23
0
79
216
manhattan_distances
DOC Ensures that manhattan_distances passes numpydoc validation (#22139) Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
https://github.com/scikit-learn/scikit-learn.git
def manhattan_distances(X, Y=None, *, sum_over_features=True): X, Y = check_pairwise_arrays(X, Y) if issparse(X) or issparse(Y): if not sum_over_features: raise TypeError( "sum_over_features=%r not supported for sparse matrices" % sum_over_features ) X = csr_matrix(X, copy=False) Y = csr_matrix(Y, copy=False) X.sum_duplicates() # this also sorts indices in-place Y.sum_duplicates() D = np.zeros((X.shape[0], Y.shape[0])) _sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, D) return D if sum_over_features: return distance.cdist(X, Y, "cityblock") D = X[:, np.newaxis, :] - Y[np.newaxis, :, :] D = np.abs(D, D) return D.reshape((-1, X.shape[1]))
194
pairwise.py
Python
sklearn/metrics/pairwise.py
ff09c8a579b116500deade618f93c4dc0d5750bd
scikit-learn
5
155,898
142
20
55
576
40
0
240
846
get_scheduler
Raise warning when using multiple types of schedulers where one is `distributed` (#8700) Raise a warning when `compute` or `persist` are called with a scheduler different from "dask.distributed" or "distributed" in Dask.distributed mode
https://github.com/dask/dask.git
def get_scheduler(get=None, scheduler=None, collections=None, cls=None): if get: raise TypeError(get_err_msg) if scheduler is not None: if callable(scheduler): return scheduler elif "Client" in type(scheduler).__name__ and hasattr(scheduler, "get"): return scheduler.get elif isinstance(scheduler, str): scheduler = scheduler.lower() if scheduler in named_schedulers: if config.get("scheduler", None) in ("dask.distributed", "distributed"): warnings.warn( "Running on a single-machine scheduler when a distributed client " "is active might lead to unexpected results." ) return named_schedulers[scheduler] elif scheduler in ("dask.distributed", "distributed"): from distributed.worker import get_client return get_client().get else: raise ValueError( "Expected one of [distributed, %s]" % ", ".join(sorted(named_schedulers)) ) elif isinstance(scheduler, Executor): # Get `num_workers` from `Executor`'s `_max_workers` attribute. # If undefined, fallback to `config` or worst case CPU_COUNT. num_workers = getattr(scheduler, "_max_workers", None) if num_workers is None: num_workers = config.get("num_workers", CPU_COUNT) assert isinstance(num_workers, Integral) and num_workers > 0 return partial(local.get_async, scheduler.submit, num_workers) else: raise ValueError("Unexpected scheduler: %s" % repr(scheduler)) # else: # try to connect to remote scheduler with this name # return get_client(scheduler).get if config.get("scheduler", None): return get_scheduler(scheduler=config.get("scheduler", None)) if config.get("get", None): raise ValueError(get_err_msg) if getattr(thread_state, "key", False): from distributed.worker import get_worker return get_worker().client.get if cls is not None: return cls.__dask_scheduler__ if collections: collections = [c for c in collections if c is not None] if collections: get = collections[0].__dask_scheduler__ if not all(c.__dask_scheduler__ == get for c in collections): raise ValueError( "Compute called on multiple collections with " "differing default schedulers. Please specify a " "scheduler=` parameter explicitly in compute or " "globally with `dask.config.set`." ) return get return None
346
base.py
Python
dask/base.py
277859ddfcc30a9070ca560c9e3e2720e5eed616
dask
23
101,370
79
14
37
262
25
0
112
531
_load_extractor
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
https://github.com/deepfakes/faceswap.git
def _load_extractor(self) -> Optional[Extractor]: if not self._alignments.have_alignments_file and not self._args.on_the_fly: logger.error("No alignments file found. Please provide an alignments file for your " "destination video (recommended) or enable on-the-fly conversion (not " "recommended).") sys.exit(1) if self._alignments.have_alignments_file: if self._args.on_the_fly: logger.info("On-The-Fly conversion selected, but an alignments file was found. " "Using pre-existing alignments file: '%s'", self._alignments.file) else: logger.debug("Alignments file found: '%s'", self._alignments.file) return None logger.debug("Loading extractor") logger.warning("On-The-Fly conversion selected. This will use the inferior cv2-dnn for " "extraction and will produce poor results.") logger.warning("It is recommended to generate an alignments file for your destination " "video with Extract first for superior results.") extractor = Extractor(detector="cv2-dnn", aligner="cv2-dnn", masker=self._args.mask_type, multiprocess=True, rotate_images=None, min_size=20) extractor.launch() logger.debug("Loaded extractor") return extractor
148
convert.py
Python
scripts/convert.py
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
faceswap
5
107,814
7
8
2
29
5
0
7
13
test_strip_comment
Support quoted strings in matplotlibrc This enables using the comment character # within strings. Closes #19288. Superseeds #22565.
https://github.com/matplotlib/matplotlib.git
def test_strip_comment(line, result): assert cbook._strip_comment(line) == result
17
test_cbook.py
Python
lib/matplotlib/tests/test_cbook.py
7c378a8f3f30ce57c874a851f3af8af58f1ffdf6
matplotlib
1
153,666
50
15
19
112
9
0
71
245
_is_zero_copy_possible
FEAT-#4244: Implement dataframe exchange protocol for OmniSci (#4269) Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Co-authored-by: Vasily Litvinov <vasilij.n.litvinov@intel.com> Signed-off-by: Dmitry Chigarev <dmitry.chigarev@intel.com>
https://github.com/modin-project/modin.git
def _is_zero_copy_possible(self) -> bool: if self.__is_zero_copy_possible is None: if self._df._has_arrow_table(): # If PyArrow table is already materialized then we can # retrieve data zero-copy self.__is_zero_copy_possible = True elif not self._df._can_execute_arrow(): # When not able to execute the plan via PyArrow means # that we have to involve OmniSci, so no zero-copy. self.__is_zero_copy_possible = False else: # Check whether the plan for PyArrow can be executed zero-copy self.__is_zero_copy_possible = self._is_zero_copy_arrow_op(self._df._op) return self.__is_zero_copy_possible
64
dataframe.py
Python
modin/experimental/core/execution/native/implementations/omnisci_on_native/exchange/dataframe_protocol/dataframe.py
0c1a2129df64cf45bf1ff49c8ed92c510fdb1c82
modin
4
126,252
46
12
19
166
26
0
59
162
test_syncer_callback_noop_on_trial_cloud_checkpointing
[air] Add annotation for Tune module. (#27060) Co-authored-by: Kai Fricke <kai@anyscale.com>
https://github.com/ray-project/ray.git
def test_syncer_callback_noop_on_trial_cloud_checkpointing(): callbacks = _create_default_callbacks(callbacks=[], sync_config=SyncConfig()) syncer_callback = None for cb in callbacks: if isinstance(cb, SyncerCallback): syncer_callback = cb trial1 = MockTrial(trial_id="a", logdir=None) trial1.uses_cloud_checkpointing = True assert syncer_callback assert syncer_callback._enabled # Cloud checkpointing set, so no-op assert not syncer_callback._sync_trial_dir(trial1) # This should not raise any error for not existing directory syncer_callback.on_checkpoint( iteration=1, trials=[], trial=trial1, checkpoint=_TrackedCheckpoint( dir_or_data="/does/not/exist", storage_mode=CheckpointStorage.PERSISTENT ), )
103
test_syncer_callback.py
Python
python/ray/tune/tests/test_syncer_callback.py
eb69c1ca286a2eec594f02ddaf546657a8127afd
ray
3
241,536
23
9
6
96
7
0
30
48
test_prefix_metric_keys
Group metrics generated by `DeviceStatsMonitor` for better visualization (#11254) Co-authored-by: Carlos Mocholรญ <carlossmocholi@gmail.com> Co-authored-by: Jirka Borovec <Borda@users.noreply.github.com>
https://github.com/Lightning-AI/lightning.git
def test_prefix_metric_keys(tmpdir): metrics = {"1": 1.0, "2": 2.0, "3": 3.0} prefix = "foo" separator = "." converted_metrics = _prefix_metric_keys(metrics, prefix, separator) assert converted_metrics == {"foo.1": 1.0, "foo.2": 2.0, "foo.3": 3.0}
65
test_device_stats_monitor.py
Python
tests/callbacks/test_device_stats_monitor.py
05ed9a201c24e08c2b4d3df4735296758ddcd6a5
lightning
1
273,619
4
6
2
16
3
0
4
18
output_size
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def output_size(self): raise NotImplementedError
8
abstract_rnn_cell.py
Python
keras/layers/rnn/abstract_rnn_cell.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
269,586
136
14
31
405
40
1
189
444
categorical_crossentropy
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def categorical_crossentropy(target, output, from_logits=False, axis=-1): target = tf.convert_to_tensor(target) output = tf.convert_to_tensor(output) target.shape.assert_is_compatible_with(output.shape) # Use logits whenever they are available. `softmax` and `sigmoid` # activations cache logits on the `output` Tensor. if hasattr(output, "_keras_logits"): output = output._keras_logits # pylint: disable=protected-access if from_logits: warnings.warn( '"`categorical_crossentropy` received `from_logits=True`, but ' "the `output` argument was produced by a sigmoid or softmax " 'activation and thus does not represent logits. Was this intended?"', stacklevel=2, ) from_logits = True if from_logits: return tf.nn.softmax_cross_entropy_with_logits( labels=target, logits=output, axis=axis ) if ( not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable)) and output.op.type == "Softmax" ) and not hasattr(output, "_keras_history"): # When softmax activation function is used for output operation, we # use logits from the softmax function directly to compute loss in order # to prevent collapsing zero when training. # See b/117284466 assert len(output.op.inputs) == 1 output = output.op.inputs[0] return tf.nn.softmax_cross_entropy_with_logits( labels=target, logits=output, axis=axis ) # scale preds so that the class probas of each sample sum to 1 output = output / tf.reduce_sum(output, axis, True) # Compute cross entropy from probabilities. epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_) return -tf.reduce_sum(target * tf.math.log(output), axis) @keras_export("keras.backend.sparse_categorical_crossentropy") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
@keras_export("keras.backend.sparse_categorical_crossentropy") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
237
backend.py
Python
keras/backend.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
7
156,322
6
6
12
19
3
0
6
20
produces_keys
Use DataFrameIOLayer in DataFrame.from_delayed (#8852)
https://github.com/dask/dask.git
def produces_keys(self) -> bool: return False
10
blockwise.py
Python
dask/blockwise.py
1ccd1a4f96afa1fe89ea93dcfe66517319b0664d
dask
1
22,678
16
11
5
50
6
0
16
59
set
refactor: clean code Signed-off-by: slowy07 <slowy.arfy@gmail.com>
https://github.com/geekcomputers/Python.git
def set(self, components): if len(components) > 0: self.__components = components else: raise Exception("please give any vector")
28
lib.py
Python
linear-algebra-python/src/lib.py
f0af0c43340763724f139fa68aa1e5a9ffe458b4
Python
2
176,703
23
9
74
83
9
0
25
43
average_clustering
Remove redundant py2 numeric conversions (#5661) * Remove redundant float conversion * Remove redundant int conversion * Use integer division Co-authored-by: Miroslav ล edivรฝ <6774676+eumiro@users.noreply.github.com>
https://github.com/networkx/networkx.git
def average_clustering(G, nodes=None, mode="dot"): r if nodes is None: nodes = G ccs = latapy_clustering(G, nodes=nodes, mode=mode) return sum(ccs[v] for v in nodes) / len(nodes)
54
cluster.py
Python
networkx/algorithms/bipartite/cluster.py
2a05ccdb07cff88e56661dee8a9271859354027f
networkx
3
89,862
70
18
45
339
23
0
88
887
test_first_event_with_minified_stack_trace_received
ref(onboarding): Add function to record first event per project with min stack trace -(#42208)
https://github.com/getsentry/sentry.git
def test_first_event_with_minified_stack_trace_received(self, record_analytics): now = timezone.now() project = self.create_project(first_event=now) project_created.send(project=project, user=self.user, sender=type(project)) url = "http://localhost:3000" data = load_data("javascript") data["tags"] = [("url", url)] data["exception"] = { "values": [ { **data["exception"]["values"][0], "raw_stacktrace": { "frames": [ { "function": "o", "filename": "/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js", "abs_path": "https://s1.sentry-cdn.com/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js", "lineno": 2, "colno": 37098, "pre_context": [ "/*! For license information please see vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd. {snip}" ], "context_line": "{snip} .apply(this,arguments);const i=o.map((e=>c(e,t)));return e.apply(this,i)}catch(e){throw l(),(0,i.$e)((n=>{n.addEventProcessor((e=>(t.mechani {snip}", "post_context": [ "//# sourceMappingURL=../sourcemaps/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.fe32 {snip}" ], "in_app": False, }, ], }, } ] } self.store_event( project_id=project.id, data=data, ) record_analytics.assert_called_with( "first_event_with_minified_stack_trace_for_project.sent", user_id=self.user.id, organization_id=project.organization_id, project_id=project.id, platform=data["platform"], url=url, )
198
test_onboarding.py
Python
tests/sentry/receivers/test_onboarding.py
ce841204ef3b20d0f6ac812ebb06aebbc63547ac
sentry
1
260,279
9
9
4
49
7
0
9
37
fit
MAINT parameter validation for Normalizer (#23543) Co-authored-by: jeremie du boisberranger <jeremiedbb@yahoo.fr>
https://github.com/scikit-learn/scikit-learn.git
def fit(self, X, y=None): self._validate_params() self._validate_data(X, accept_sparse="csr") return self
29
_data.py
Python
sklearn/preprocessing/_data.py
40e055b362a337cef15645d4b1be046aa782c415
scikit-learn
1
86,878
15
14
14
69
10
0
18
59
get_region_to_control_producer
chore(hybrid-cloud): AuditLogEntry is a control silo model now (#39890) In the control silo, creating an audit log entry writes to the db directly, whilst in region silo mode creating an audit log entry will instead push to a new kafka producer that consumes into the control silo asynchronously.
https://github.com/getsentry/sentry.git
def get_region_to_control_producer() -> KafkaProducer: global _publisher if _publisher is None: config = settings.KAFKA_TOPICS.get(settings.KAFKA_REGION_TO_CONTROL) _publisher = KafkaProducer( kafka_config.get_kafka_producer_cluster_options(config["cluster"]) )
48
producer.py
Python
src/sentry/region_to_control/producer.py
941184cd24186324fd9f7f304b7f713041834726
sentry
2
181,702
19
9
6
60
8
0
23
41
test_source_decode_2
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
https://github.com/EpistasisLab/tpot.git
def test_source_decode_2(): import_str, op_str, op_obj = source_decode("sklearn.linear_model.LogisticReg") from sklearn.linear_model import LogisticRegression assert import_str == "sklearn.linear_model" assert op_str == "LogisticReg" assert op_obj is None
33
tpot_tests.py
Python
tests/tpot_tests.py
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
1
20,964
47
14
16
171
18
0
58
157
get_allowed_args
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def get_allowed_args(fn_or_class): # type: (Union[Callable, Type]) -> Tuple[List[str], Dict[str, Any]] try: signature = inspect.signature(fn_or_class) except AttributeError: import funcsigs signature = funcsigs.signature(fn_or_class) args = [] kwargs = {} for arg, param in signature.parameters.items(): if ( param.kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY) ) and param.default is param.empty: args.append(arg) else: kwargs[arg] = param.default if param.default is not param.empty else None return args, kwargs
106
utils.py
Python
pipenv/vendor/pip_shims/utils.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
6
211,029
31
15
12
233
9
0
69
146
convert_x_to_bbox
[MOT] Add OC_SORT tracker (#6272) * add ocsort tracker * add ocsort deploy * merge develop * fix ocsort tracker codes * fix doc, test=document_fix * fix doc, test=document_fix
https://github.com/PaddlePaddle/PaddleDetection.git
def convert_x_to_bbox(x, score=None): w = np.sqrt(x[2] * x[3]) h = x[2] / w if (score == None): return np.array( [x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2.]).reshape((1, 4)) else: score = np.array([score]) return np.array([ x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2., score ]).reshape((1, 5))
167
ocsort_tracker.py
Python
deploy/pptracking/python/mot/tracker/ocsort_tracker.py
c84153a355d9855fe55cf51d203b8b24e7d884e5
PaddleDetection
2
107,206
7
8
3
40
7
0
7
44
get_current_underline_thickness
Factor out underline-thickness lookups in mathtext. Just adding a small helper function.
https://github.com/matplotlib/matplotlib.git
def get_current_underline_thickness(self): return self.font_output.get_underline_thickness( self.font, self.fontsize, self.dpi)
25
_mathtext.py
Python
lib/matplotlib/_mathtext.py
b71421e685733b1cade94113b588ca1a773ae558
matplotlib
1
86,136
23
14
8
112
13
0
23
95
get_form
fix(admin): Fix typo in admin user creation form (#38418) This fixes the error reported in https://github.com/getsentry/sentry/issues/38303, which appears to be due to a typo in the django module name.
https://github.com/getsentry/sentry.git
def get_form(self, request, obj=None, **kwargs): defaults = {} if obj is None: defaults.update( {"form": self.add_form, "fields": admin.utils.flatten_fieldsets(self.add_fieldsets)} ) defaults.update(kwargs) return super().get_form(request, obj, **defaults)
69
admin.py
Python
src/sentry/admin.py
d522d620e5e6799000b918278c86cbaa0b1592a1
sentry
2
9,760
104
15
6
374
37
0
161
496
different
Check gallery up to date as part of CI (#3329) * Check gallery up to date as part of CI Fix #2916 * tweak check_gallery.py * update CI workflow * update stale doc cache * update stale docs
https://github.com/RaRe-Technologies/gensim.git
def different(path1, path2): with open(path1) as fin: f1 = fin.read() with open(path2) as fin: f2 = fin.read() return f1 != f2 curr_dir = os.path.dirname(__file__) stale = [] for root, dirs, files in os.walk(os.path.join(curr_dir, 'gallery')): for f in files: if f.endswith('.py'): source_path = os.path.join(root, f) cache_path = source_path.replace('docs/src/gallery/', 'docs/src/auto_examples/') # # We check two things: # # 1) Actual file content # 2) MD5 checksums # # We check 1) because that's the part that matters to the user - # it's what will appear in the documentation. We check 2) because # that's what Sphinx Gallery relies on to decide what it needs to # rebuild. In practice, only one of these checks is necessary, # but we run them both because it's trivial. # if different(source_path, cache_path): stale.append(cache_path) continue actual_md5 = hashlib.md5() with open(source_path, 'rb') as fin: actual_md5.update(fin.read()) md5_path = cache_path + '.md5' with open(md5_path) as fin: expected_md5 = fin.read() if actual_md5.hexdigest() != expected_md5: stale.append(cache_path) if stale: print(f, file=sys.stderr) sys.exit(1)
41
check_gallery.py
Python
docs/src/check_gallery.py
9bbf12c330275351e777b553c145066b7c397f95
gensim
1
106,118
52
14
22
326
17
0
85
222
fast_slice
Clean up Table class docstrings (#5355) * clean table docstrings * apply review Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
https://github.com/huggingface/datasets.git
def fast_slice(self, offset=0, length=None) -> pa.Table: if offset < 0: raise IndexError("Offset must be non-negative") elif offset >= self._offsets[-1] or (length is not None and length <= 0): return pa.Table.from_batches([], schema=self._schema) i = _interpolation_search(self._offsets, offset) if length is None or length + offset >= self._offsets[-1]: batches = self._batches[i:] batches[0] = batches[0].slice(offset - self._offsets[i]) else: j = _interpolation_search(self._offsets, offset + length - 1) batches = self._batches[i : j + 1] batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j]) batches[0] = batches[0].slice(offset - self._offsets[i]) return pa.Table.from_batches(batches, schema=self._schema)
214
table.py
Python
src/datasets/table.py
c902456677116a081f762fa2b4aad13a0aa04d6e
datasets
7
133,814
41
11
10
198
18
0
55
93
_mac
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def _mac(model, obs, h): B, n_agents = obs.size(0), obs.size(1) if not isinstance(obs, dict): obs = {"obs": obs} obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()} h_flat = [s.reshape([B * n_agents, -1]) for s in h] q_flat, h_flat = model(obs_agents_as_batches, h_flat, None) return q_flat.reshape([B, n_agents, -1]), [ s.reshape([B, n_agents, -1]) for s in h_flat ]
130
qmix_policy.py
Python
rllib/agents/qmix/qmix_policy.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
5
136,639
10
10
5
45
5
0
10
42
post_process
KubeRay node provider refactor (#30281) Implements KubeRay node provider as a "BatchingNodeProvider". Builds on #29933. Summary of design An autoscaler update now works like this: list pod data from k8s check if it's safe to proceed with update. Abort the update if not. do some internal calculation to determine desired scale submit a single patch to the RayCluster CR if a scale change is required Everything is single-threaded and there are O(1) K8s API calls per autoscaler update. Signed-off-by: Dmitri Gekhtman <dmitri.m.gekhtman@gmail.com>
https://github.com/ray-project/ray.git
def post_process(self) -> None: if self.scale_change_needed: self.submit_scale_request(self.scale_request) self.scale_change_needed = False
26
batching_node_provider.py
Python
python/ray/autoscaler/batching_node_provider.py
c976799dfd96806ec9972a287835f7a034ec3d2c
ray
2
309,333
6
7
22
27
4
0
8
29
test_circular_import
Use Platform enum in load_platform [tests] (#63904) * Use Platform enum in numato tests * Use Platform enum in discovery tests * Adjust load_platform argument Co-authored-by: epenet <epenet@users.noreply.github.com>
https://github.com/home-assistant/core.git
def test_circular_import(self): component_calls = [] platform_calls = []
131
test_discovery.py
Python
tests/helpers/test_discovery.py
b71a22557dc6ca87b6c1871a0e4d3c3a949759fc
core
1
126,889
24
8
3
46
7
0
27
62
__reduce__
Fix out-of-band deserialization of actor handle (#27700) When we deserialize actor handle via pickle, we will register it with an outer object ref equaling to itself which is wrong. For out-of-band deserialization, there should be no outer object ref. Signed-off-by: Jiajun Yao <jeromeyjj@gmail.com>
https://github.com/ray-project/ray.git
def __reduce__(self): (serialized, _) = self._serialization_helper() # There is no outer object ref when the actor handle is # deserialized out-of-band using pickle. return ActorHandle._deserialization_helper, (serialized, None)
27
actor.py
Python
python/ray/actor.py
f084546d41f0533c1e9e96a7249532d0eb4ff47d
ray
1
3,763
29
14
12
176
19
0
39
114
get_json_schema
๐ŸŽ‰ ๐ŸŽ‰ Source FB Marketing: performance and reliability fixes (#9805) * Facebook Marketing performance improvement * add comments and little refactoring * fix integration tests with the new config * improve job status handling, limit concurrency to 10 * fix campaign jobs, refactor manager * big refactoring of async jobs, support random order of slices * update source _read_incremental to hook new state logic * fix issues with timeout * remove debugging and clean up, improve retry logic * merge changes from #8234 * fix call super _read_increment * generalize batch execution, add use_batch flag * improve coverage, do some refactoring of spec * update test, remove overrides of source * add split by AdSet * add smaller insights * fix end_date < start_date case * add account_id to PK * add notes * fix new streams * fix reversed incremental stream * update spec.json for SAT * upgrade CDK and bump version Co-authored-by: Dmytro Rezchykov <dmitry.rezchykov@zazmic.com> Co-authored-by: Eugene Kulak <kulak.eugene@gmail.com>
https://github.com/airbytehq/airbyte.git
def get_json_schema(self) -> Mapping[str, Any]: loader = ResourceSchemaLoader(package_name_from_class(self.__class__)) schema = loader.get_schema("ads_insights") if self._fields: schema["properties"] = {k: v for k, v in schema["properties"].items() if k in self._fields} if self.breakdowns: breakdowns_properties = loader.get_schema("ads_insights_breakdowns")["properties"] schema["properties"].update({prop: breakdowns_properties[prop] for prop in self.breakdowns}) return schema
106
base_insight_streams.py
Python
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/base_insight_streams.py
a3aae8017a0a40ff2006e2567f71dccb04c997a5
airbyte
6
247,670
14
9
6
71
12
0
15
50
test_exception_callback
Add tests for database transaction callbacks (#12198) Signed-off-by: Sean Quah <seanq@element.io>
https://github.com/matrix-org/synapse.git
def test_exception_callback(self) -> None: _test_txn = Mock(side_effect=ZeroDivisionError) after_callback, exception_callback = self._run_interaction(_test_txn) after_callback.assert_not_called() exception_callback.assert_called_once_with(987, 654, extra=321)
43
test_database.py
Python
tests/storage/test_database.py
dea577998f221297d3ff30bdf904f7147f3c3d8a
synapse
1
268,826
25
13
9
95
11
0
28
55
print_msg
Put absl logging control flag in a separate file. Open the APIs for control the logging in Keras. PiperOrigin-RevId: 419972643
https://github.com/keras-team/keras.git
def print_msg(message, line_break=True): # Use `getattr` in case `INTERACTIVE_LOGGING` # does not have the `enable` attribute. if INTERACTIVE_LOGGING.enable: if line_break: sys.stdout.write(message + '\n') else: sys.stdout.write(message) sys.stdout.flush() else: logging.info(message)
53
io_utils.py
Python
keras/utils/io_utils.py
f427e16d9e4a440b5e7e839001255f7cd87127f5
keras
3
288,323
63
12
29
253
17
0
74
289
test_webhook_person_event
Fix Netatmo scope issue with HA cloud (#79437) Co-authored-by: Paulus Schoutsen <balloob@gmail.com>
https://github.com/home-assistant/core.git
async def test_webhook_person_event(hass, config_entry, netatmo_auth): with selected_platforms(["camera"]): assert await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() test_netatmo_event = async_capture_events(hass, NETATMO_EVENT) assert not test_netatmo_event fake_webhook_event = { "persons": [ { "id": "91827374-7e04-5298-83ad-a0cb8372dff1", "face_id": "a1b2c3d4e5", "face_key": "9876543", "is_known": True, "face_url": "https://netatmocameraimage.blob.core.windows.net/production/12345", } ], "snapshot_id": "123456789abc", "snapshot_key": "foobar123", "snapshot_url": "https://netatmocameraimage.blob.core.windows.net/production/12346", "event_type": "person", "camera_id": "12:34:56:00:f1:62", "device_id": "12:34:56:00:f1:62", "event_id": "1234567890", "message": "MYHOME: John Doe has been seen by Indoor Camera ", "push_type": "NACamera-person", } webhook_id = config_entry.data[CONF_WEBHOOK_ID] await simulate_webhook(hass, webhook_id, fake_webhook_event) assert test_netatmo_event
133
test_camera.py
Python
tests/components/netatmo/test_camera.py
3e411935bbe07ebe0e7a9f5323734448486d75d7
core
1
291,525
26
13
10
137
13
0
40
119
_is_today
Fix Sonos alarm 'scheduled_today' attribute logic (#82816) fixes undefined
https://github.com/home-assistant/core.git
def _is_today(self) -> bool: recurrence = self.alarm.recurrence daynum = int(datetime.datetime.today().strftime("%w")) return ( recurrence in ("DAILY", "ONCE") or (recurrence == "WEEKENDS" and daynum in WEEKEND_DAYS) or (recurrence == "WEEKDAYS" and daynum not in WEEKEND_DAYS) or (recurrence.startswith("ON_") and str(daynum) in recurrence) )
79
switch.py
Python
homeassistant/components/sonos/switch.py
f887aeedfe057682f8d5a3abd44082d02fe42758
core
7