id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
125,184
47
15
22
251
18
0
65
224
_format
[State Observability] Use a table format by default (#26159) NOTE: tabulate is copied/pasted to the codebase for table formatting. This PR changes the default layout to be the table format for both summary and list APIs.
https://github.com/ray-project/ray.git
def _format(val, valtype, floatfmt, missingval="", has_invisible=True): # noqa if val is None: return missingval if valtype in [int, _text_type]: return "{0}".format(val) elif valtype is _binary_type: try: return _text_type(val, "ascii") except TypeError: return _text_type(val) elif valtype is float: is_a_colored_number = has_invisible and isinstance( val, (_text_type, _binary_type) ) if is_a_colored_number: raw_val = _strip_invisible(val) formatted_val = format(float(raw_val), floatfmt) return val.replace(raw_val, formatted_val) else: return format(float(val), floatfmt) else: return "{0}".format(val)
132
tabulate.py
Python
python/ray/_private/thirdparty/tabulate/tabulate.py
adf24bfa9723b0621183bb27f0c889b813c06e8a
ray
8
42,070
10
8
3
53
9
0
10
19
set_context
Convert docs to pydata-sphinx-theme and add new material (#2842) * Do basic conversion of site to pydata_sphinx_theme * Remove some pae structure customizations we no longer need * Add some custom CSS * Tweak a few more colors * Remove vestigial div closing tag * Reorganize release notes into hierarchical pages * Rebuild full docs and fix some resulting issues * Make release note doc refs absolute * Convert homepage to use sphinx-design instead of hand-crafted html * Remove original custom css * Simplify header and put archive switcher in footer * Streamline API docs for objects * Play around with templates to fix shrinking content (not perfect yet) * Improve use of horizontal space without sidebars * Various tweaks * Convert tutorial homepage source to native sphinx-design directives * Move intro page into tutorial * More tweaks * Tweak theme colors and footer * Remove reference to navbar version * Note that error bar tutorial demonstrates new features as of v0.12 * Update layout customization for new theme features * Various layout and CSS tweaks * Narrow support guidance to StackOverflow * Run all notebooks * Adapt to new dropdown navbar in pydata theme * Separate tutorial source and outputs * Separate dostring source and outputs * Add scale API template * Update API docs * Fix requirements * Add new objects * Point doc requirements at v0.10 RC for theme
https://github.com/mwaskom/seaborn.git
def set_context(context=None, font_scale=1, rc=None): context_object = plotting_context(context, font_scale, rc) mpl.rcParams.update(context_object)
34
rcmod.py
Python
seaborn/rcmod.py
34662f4be5c364e7518f9c1118c9b362038ee5dd
seaborn
1
311,030
7
10
3
35
5
0
7
21
async_unload
Replace Synology DSM services with buttons (#57352)
https://github.com/home-assistant/core.git
async def async_unload(self) -> None: await self._syno_api_executer(self.dsm.logout)
19
common.py
Python
homeassistant/components/synology_dsm/common.py
5d7d652237b2368320a68c772ce3d837e4c1d04b
core
1
278,720
35
16
14
160
16
0
46
191
clone_keras_tensors
Remove pylint comments. PiperOrigin-RevId: 452353044
https://github.com/keras-team/keras.git
def clone_keras_tensors(args, keras_tensor_mapping): result = [] for obj in tf.nest.flatten(args): if node_module.is_keras_tensor(obj): if id(obj) in keras_tensor_mapping: cpy = keras_tensor_mapping[id(obj)] else: # Create copy of keras_tensor if we haven't done it before cpy = _clone_keras_tensor(obj) cpy._keras_history = obj._keras_history keras_tensor_mapping[id(obj)] = cpy result.append(cpy) else: result.append(obj) return tf.nest.pack_sequence_as(args, result)
98
functional_utils.py
Python
keras/engine/functional_utils.py
3613c3defc39c236fb1592c4f7ba1a9cc887343a
keras
4
22,168
7
7
9
31
4
0
7
21
get_plain_headed_box
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
https://github.com/pypa/pipenv.git
def get_plain_headed_box(self) -> "Box": return PLAIN_HEADED_SUBSTITUTIONS.get(self, self)
17
box.py
Python
pipenv/patched/pip/_vendor/rich/box.py
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
1
156,567
36
12
11
129
10
0
44
106
apply_and_enforce
Add kwarg ``enforce_ndim`` to ``dask.array.map_blocks()`` (#8865)
https://github.com/dask/dask.git
def apply_and_enforce(*args, **kwargs): func = kwargs.pop("_func") expected_ndim = kwargs.pop("expected_ndim") out = func(*args, **kwargs) if getattr(out, "ndim", 0) != expected_ndim: out_ndim = getattr(out, "ndim", 0) raise ValueError( f"Dimension mismatch: expected output of {func} " f"to have dims = {expected_ndim}. Got {out_ndim} instead." ) return out
68
core.py
Python
dask/array/core.py
2b90415b02d3ad1b08362889e0818590ca3133f4
dask
2
178,165
33
11
6
83
10
0
42
90
get_jobs_by_meta
feat: DEV-2075: Add mixin to Project to support mechanism to cancel old jobs (#2547) * feat: DEV-2075: Add mixin to Project to support mechanism to cancel old jobs
https://github.com/heartexlabs/label-studio.git
def get_jobs_by_meta(queue, func_name, meta): # get all jobs from Queue jobs = (job for job in queue.get_jobs() if job.func.__name__ == func_name ) # return only with same meta data return [job for job in jobs if hasattr(job, 'meta') and job.meta == meta]
52
redis.py
Python
label_studio/core/redis.py
283628097a10e8abafc94c683bc8be2d79a5998f
label-studio
6
269,601
8
8
3
38
4
1
9
17
enable_tf_random_generator
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def enable_tf_random_generator(): global _USE_GENERATOR_FOR_RNG _USE_GENERATOR_FOR_RNG = True @keras_export("keras.backend.experimental.disable_tf_random_generator", v1=[])
@keras_export("keras.backend.experimental.disable_tf_random_generator", v1=[])
10
backend.py
Python
keras/backend.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
42,548
48
14
18
205
27
0
61
258
collocation_list
Docstring tests (#3050) * fixed pytests * fixed more pytests * fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py * fixed pytests (mainly multiline or rounding issues) * fixed treebank pytests, removed test for return_string=True (deprecated) * fixed destructive.py pytests, removed test for return_string=True (deprecated) * fixed pytest (rounding issues) * fixed pytest (initialised missing object) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * added pytest +SKIP for deprecated module stanford * updated AUTHORS.md * changed docstring corrections by usage of ELLIPSIS and different roundings * fixed AUTHORS.md to be consistent * Fix framenet doctest formatting with pprint * Change docstring on MultiListBox.__init__ I believe the original typo was misinterpreted and changed to something that was not originally intended. Co-authored-by: Jan Lennartz <jan.lennartz@ing.com> Co-authored-by: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com> Co-authored-by: Tom Aarsen <Cubiegamedev@gmail.com>
https://github.com/nltk/nltk.git
def collocation_list(self, num=20, window_size=2): if not ( "_collocations" in self.__dict__ and self._num == num and self._window_size == window_size ): self._num = num self._window_size = window_size # print("Building collocations list") from nltk.corpus import stopwords ignored_words = stopwords.words("english") finder = BigramCollocationFinder.from_words(self.tokens, window_size) finder.apply_freq_filter(2) finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words) bigram_measures = BigramAssocMeasures() self._collocations = list( finder.nbest(bigram_measures.likelihood_ratio, num) ) return self._collocations
126
text.py
Python
nltk/text.py
8a4cf5d94eb94b6427c5d1d7907ba07b119932c5
nltk
5
294,024
33
13
15
126
21
0
34
116
library_section_payload
Support multiple Plex servers in media browser (#68321)
https://github.com/home-assistant/core.git
def library_section_payload(section): try: children_media_class = ITEM_TYPE_MEDIA_CLASS[section.TYPE] except KeyError as err: raise UnknownMediaType(f"Unknown type received: {section.TYPE}") from err server_id = section._server.machineIdentifier # pylint: disable=protected-access return BrowseMedia( title=section.title, media_class=MEDIA_CLASS_DIRECTORY, media_content_id=generate_plex_uri(server_id, section.key), media_content_type="library", can_play=False, can_expand=True, children_media_class=children_media_class, )
77
media_browser.py
Python
homeassistant/components/plex/media_browser.py
653305b998dd033365576db303b32dd5df3a6c54
core
2
213,030
27
9
8
111
4
0
36
99
gen_skeleton
fix: Py27hash fix (#2182) * Add third party py27hash code * Add Py27UniStr and unit tests * Add py27hash_fix utils and tests * Add to_py27_compatible_template and tests * Apply py27hash fix to wherever it is needed * Apply py27hash fix, all tests pass except api_with_any_method_in_swagger * apply py27hash fix in openapi + run black * remove py27 testing * remove other py27 references * black fixes * fixes/typos * remove py27 from tox.ini * refactoring * third party notice * black * Fix py27hash fix to deal with null events * Fix Py27UniStr repr for unicode literals * black reformat * Update _template_has_api_resource to check data type more defensively * Apply py27Dict in _get_authorizers * Apply Py27Dict to authorizers and gateway responses which will go into swagger * Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class * Rename _convert_to_py27_dict to _convert_to_py27_type * Apply Py27UniStr to path param name * Handle HttpApi resource under to_py27_compatible_template * Fix InvalidDocumentException to not sort different exceptions * black reformat * Remove unnecessary test files Co-authored-by: Wing Fung Lau <4760060+hawflau@users.noreply.github.com>
https://github.com/aws/serverless-application-model.git
def gen_skeleton(): # create as Py27Dict and insert key one by one to preserve input order skeleton = Py27Dict() skeleton["openapi"] = "3.0.1" skeleton["info"] = Py27Dict() skeleton["info"]["version"] = "1.0" skeleton["info"]["title"] = ref("AWS::StackName") skeleton["paths"] = Py27Dict() return skeleton
55
open_api.py
Python
samtranslator/open_api/open_api.py
a5db070f446b7cfebdaa6ad2e3dcf78f6105a272
serverless-application-model
1
260,625
48
12
13
175
24
0
57
171
fit
MAINT validate parameter in KernelPCA (#24020) Co-authored-by: Julien Jerphanion <git@jjerphan.xyz> Co-authored-by: jeremiedbb <jeremiedbb@yahoo.fr>
https://github.com/scikit-learn/scikit-learn.git
def fit(self, X, y=None): self._validate_params() if self.fit_inverse_transform and self.kernel == "precomputed": raise ValueError("Cannot fit_inverse_transform with a precomputed kernel.") X = self._validate_data(X, accept_sparse="csr", copy=self.copy_X) self._centerer = KernelCenterer() K = self._get_kernel(X) self._fit_transform(K) if self.fit_inverse_transform: # no need to use the kernel to transform X, use shortcut expression X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_) self._fit_inverse_transform(X_transformed, X) self.X_fit_ = X return self
106
_kernel_pca.py
Python
sklearn/decomposition/_kernel_pca.py
3312bc2ea6aad559643a1d920e3380fa123f627c
scikit-learn
4
42,787
146
13
71
759
49
0
267
1,032
get_conn
Use KubernetesHook to create api client in KubernetesPodOperator (#20578) Add support for k8s hook in KPO; use it always (even when no conn id); continue to consider the core k8s settings that KPO already takes into account but emit deprecation warning about them. KPO historically takes into account a few settings from core airflow cfg (e.g. verify ssl, tcp keepalive, context, config file, and in_cluster). So to use the hook to generate the client, somehow the hook has to take these settings into account. But we don't want the hook to consider these settings in general. So we read them in KPO and if necessary patch the hook and warn.
https://github.com/apache/airflow.git
def get_conn(self) -> Any: in_cluster = self._coalesce_param( self.in_cluster, self.conn_extras.get("extra__kubernetes__in_cluster") or None ) cluster_context = self._coalesce_param( self.cluster_context, self.conn_extras.get("extra__kubernetes__cluster_context") or None ) kubeconfig_path = self._coalesce_param( self.config_file, self.conn_extras.get("extra__kubernetes__kube_config_path") or None ) kubeconfig = self.conn_extras.get("extra__kubernetes__kube_config") or None num_selected_configuration = len([o for o in [in_cluster, kubeconfig, kubeconfig_path] if o]) if num_selected_configuration > 1: raise AirflowException( "Invalid connection configuration. Options kube_config_path, " "kube_config, in_cluster are mutually exclusive. " "You can only use one option at a time." ) disable_verify_ssl = self._coalesce_param( self.disable_verify_ssl, _get_bool(self._get_field("disable_verify_ssl")) ) disable_tcp_keepalive = self._coalesce_param( self.disable_tcp_keepalive, _get_bool(self._get_field("disable_tcp_keepalive")) ) # BEGIN apply settings from core kubernetes configuration # this section should be removed in next major release deprecation_warnings: List[Tuple[str, Any]] = [] if disable_verify_ssl is None and self._deprecated_core_disable_verify_ssl is True: deprecation_warnings.append(('verify_ssl', False)) disable_verify_ssl = self._deprecated_core_disable_verify_ssl # by default, hook will try in_cluster first. so we only need to # apply core airflow config and alert when False and in_cluster not otherwise set. if in_cluster is None and self._deprecated_core_in_cluster is False: deprecation_warnings.append(('in_cluster', self._deprecated_core_in_cluster)) in_cluster = self._deprecated_core_in_cluster if not cluster_context and self._deprecated_core_cluster_context: deprecation_warnings.append(('cluster_context', self._deprecated_core_cluster_context)) cluster_context = self._deprecated_core_cluster_context if not kubeconfig_path and self._deprecated_core_config_file: deprecation_warnings.append(('config_file', self._deprecated_core_config_file)) kubeconfig_path = self._deprecated_core_config_file if disable_tcp_keepalive is None and self._deprecated_core_disable_tcp_keepalive is True: deprecation_warnings.append(('enable_tcp_keepalive', False)) disable_tcp_keepalive = True if deprecation_warnings: self._deprecation_warning_core_param(deprecation_warnings) # END apply settings from core kubernetes configuration if disable_verify_ssl is True: _disable_verify_ssl() if disable_tcp_keepalive is not True: _enable_tcp_keepalive() if in_cluster: self.log.debug("loading kube_config from: in_cluster configuration") config.load_incluster_config() return client.ApiClient() if kubeconfig_path is not None: self.log.debug("loading kube_config from: %s", kubeconfig_path) config.load_kube_config( config_file=kubeconfig_path, client_configuration=self.client_configuration, context=cluster_context, ) return client.ApiClient() if kubeconfig is not None: with tempfile.NamedTemporaryFile() as temp_config: self.log.debug("loading kube_config from: connection kube_config") temp_config.write(kubeconfig.encode()) temp_config.flush() config.load_kube_config( config_file=temp_config.name, client_configuration=self.client_configuration, context=cluster_context, ) return client.ApiClient() return self._get_default_client(cluster_context=cluster_context)
460
kubernetes.py
Python
airflow/providers/cncf/kubernetes/hooks/kubernetes.py
60eb9e106f5915398eafd6aa339ec710c102dc09
airflow
24
149,758
29
15
18
272
19
0
37
180
load_data
add freqao backend machinery, user interface, documentation
https://github.com/freqtrade/freqtrade.git
def load_data(self) -> Any: model = load(self.model_path+self.model_filename+"_model.joblib") with open(self.model_path+self.model_filename+"_metadata.json", 'r') as fp: self.data = json.load(fp) if self.data.get('training_features_list'): self.training_features_list = [*self.data.get('training_features_list')] self.data_dictionary['train_features'] = pd.read_pickle(self.model_path+ self.model_filename+"_trained_df.pkl") self.model_path = self.data['model_path'] self.model_filename = self.data['model_filename'] if self.config['freqai']['feature_parameters']['principal_component_analysis']: self.pca = pk.load(open(self.model_path+self.model_filename+"_pca_object.pkl","rb")) return model
155
data_handler.py
Python
freqtrade/freqai/data_handler.py
fc837c4daa27a18ff0e86128f4d52089b88fa5fb
freqtrade
3
144,300
13
9
3
56
11
0
13
34
_bind
[Ray DAG] Implement experimental Ray DAG API for task/class (#22058)
https://github.com/ray-project/ray.git
def _bind(self, *args, **kwargs): from ray.experimental.dag.class_node import ClassNode return ClassNode(self.__ray_metadata__.modified_class, args, kwargs, {})
38
actor.py
Python
python/ray/actor.py
c065e3f69ec248383d98b45a8d1c00832ccfdd57
ray
1
337,524
22
13
13
128
11
0
42
149
find_device
Big model inference (#345) * Big model inference * Reorganize port cleanup * Last cleanup * Test fix * Quality * Update src/accelerate/big_modeling.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * Fix bug in default mem * Check device map is complete * More tests * Make load function more general * Apply suggestions from code review Co-authored-by: Zachary Mueller <muellerzr@gmail.com> * Quality * Address more review comments * Check generation results for gpt2 * Add main wrapper around everything * Tests for final API * Clean infer_auto_device * Type annotations * Apply suggestions from code review Co-authored-by: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr> * Address review comments * Last review comment for now * Fix bug in clean_device_map * Add doc * Style * Fixes + dtype support * Fix test * Add option to offload CPU state_dict * Indent typo * Final tweaks Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: Zachary Mueller <muellerzr@gmail.com> Co-authored-by: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> Co-authored-by: Lysandre Debut <lysandre.debut@reseau.eseo.fr>
https://github.com/huggingface/accelerate.git
def find_device(data): if isinstance(data, Mapping): for obj in data.values(): device = find_device(obj) if device is not None: return device elif isinstance(data, (tuple, list)): for obj in data: device = find_device(obj) if device is not None: return device elif isinstance(data, torch.Tensor): return data.device
82
operations.py
Python
src/accelerate/utils/operations.py
f56f4441b3d448f4a81d5131c03e7dd73eac3ba0
accelerate
8
60,126
17
10
12
78
8
0
19
80
wait
Add thread-safe async primitives `Event` and `Future` (#7865) Co-authored-by: Serina Grill <42048900+serinamarie@users.noreply.github.com>
https://github.com/PrefectHQ/prefect.git
async def wait(self) -> None: if self._is_set: return if not self._loop: self._loop = get_running_loop() self._event = asyncio.Event() await self._event.wait()
44
primitives.py
Python
src/prefect/_internal/concurrency/primitives.py
a368874d1b145c1ec5201e5efd3c26ce7c1e8611
prefect
3
78,295
18
10
8
67
10
0
21
89
test_get_settings_no_request
Add generic settings to compliment site-specific settings (#8327)
https://github.com/wagtail/wagtail.git
def test_get_settings_no_request(self): context = Context() template = Template( "{% load wagtailsettings_tags %}" "{% get_settings %}" "{{ settings.tests.testgenericsetting.title }}" ) self.assertEqual(template.render(context), self.default_settings.title)
36
test_templates.py
Python
wagtail/contrib/settings/tests/generic/test_templates.py
d967eccef28ce47f60d26be1c28f2d83a25f40b0
wagtail
1
208,719
44
12
25
198
13
0
73
344
get_tail
This fixed the mixing of multiple history seen in #13631 It forces get_tail to put the current session last in the returned results.
https://github.com/ipython/ipython.git
def get_tail(self, n=10, raw=True, output=False, include_latest=False): self.writeout_cache() if not include_latest: n += 1 # cursor/line/entry this_cur = list( self._run_sql( "WHERE session == ? ORDER BY line DESC LIMIT ? ", (self.session_number, n), raw=raw, output=output, ) ) other_cur = list( self._run_sql( "WHERE session != ? ORDER BY session DESC, line DESC LIMIT ?", (self.session_number, n), raw=raw, output=output, ) ) everything = this_cur + other_cur everything = everything[:n] if not include_latest: return list(everything)[:0:-1] return list(everything)[::-1]
128
history.py
Python
IPython/core/history.py
dc5bcc1c50892a5128fcf128af28887226144927
ipython
3
268,911
12
10
6
70
13
1
16
29
opt_combinations_only
- Consolidate disparate test-related files into a single testing_infra folder. - Cleanup TODO related to removing testing infra as a dependency of the Keras target. - Standardize import naming: there is now only "test_combinations" for test combinations, and "test_utils" for utilities. The TF utilities module "test_util" is now always imported as "tf_test_utils" to avoid confusion. PiperOrigin-RevId: 426773173
https://github.com/keras-team/keras.git
def opt_combinations_only(): experimental_opt_combinations = test_combinations.combine( mode='eager', opt_cls=optimizer_experimental.Optimizer) orig_opt_combination = test_combinations.combine( opt_cls=optimizer_v2.OptimizerV2) return experimental_opt_combinations + orig_opt_combination @tf_test_utils.with_control_flow_v2
@tf_test_utils.with_control_flow_v2
37
loss_scale_optimizer_test.py
Python
keras/mixed_precision/loss_scale_optimizer_test.py
b96518a22bfd92a29811e507dec0b34248a8a3f5
keras
1
148,285
13
12
5
57
7
0
16
11
_normalize_entries
[Bugfix] fix invalid excluding of Black (#24042) - We should use `--force-exclude` when we pass code path explicitly https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html?highlight=--force-exclude#command-line-options - Recover the files in `python/ray/_private/thirdparty` which has been formatted in the PR https://github.com/ray-project/ray/pull/21975 by mistake.
https://github.com/ray-project/ray.git
def _normalize_entries(entries, separators=None): norm_files = {} for entry in entries: norm_files[normalize_file(entry.path, separators=separators)] = entry return norm_files
36
util.py
Python
python/ray/_private/thirdparty/pathspec/util.py
0e6c042e29cbbe429d81c9c1af3c75c261f00980
ray
2
209,543
26
10
9
117
13
0
30
61
overlap_frag
E275 - Missing whitespace after keyword (#3711) Co-authored-by: Alexander Aring <alex.aring@gmail.com> Co-authored-by: Anmol Sarma <me@anmolsarma.in> Co-authored-by: antoine.torre <torreantoine1@gmail.com> Co-authored-by: Antoine Vacher <devel@tigre-bleu.net> Co-authored-by: Arnaud Ebalard <arno@natisbad.org> Co-authored-by: atlowl <86038305+atlowl@users.noreply.github.com> Co-authored-by: Brian Bienvenu <brian@bienvenu.id.au> Co-authored-by: Chris Packham <chris.packham@alliedtelesis.co.nz> Co-authored-by: CQ <cq674350529@163.com> Co-authored-by: Daniel Collins <kinap@users.noreply.github.com> Co-authored-by: Federico Maggi <federico.maggi@gmail.com> Co-authored-by: Florian Maury <florian.maury@ssi.gouv.fr> Co-authored-by: _Frky <3105926+Frky@users.noreply.github.com> Co-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com> Co-authored-by: gpotter2 <gabriel@potter.fr> Co-authored-by: Guillaume Valadon <guillaume@valadon.net> Co-authored-by: Hao Zheng <haozheng10@gmail.com> Co-authored-by: Haresh Khandelwal <hareshkhandelwal@gmail.com> Co-authored-by: Harri Hämäläinen <hhamalai@iki.fi> Co-authored-by: hecke <hecke@naberius.de> Co-authored-by: Jan Romann <jan.romann@gmail.com> Co-authored-by: Jan Sebechlebsky <sebechlebskyjan@gmail.com> Co-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com> Co-authored-by: jockque <38525640+jockque@users.noreply.github.com> Co-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com> Co-authored-by: Keith Scott <kscott@mitre.org> Co-authored-by: Kfir Gollan <kfir@drivenets.com> Co-authored-by: Lars Munch <lars@segv.dk> Co-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com> Co-authored-by: Leonard Crestez <cdleonard@gmail.com> Co-authored-by: Marcel Patzlaff <mpatzlaff@benocs.com> Co-authored-by: Martijn Thé <martijnthe@users.noreply.github.com> Co-authored-by: Martine Lenders <authmillenon@gmail.com> Co-authored-by: Michael Farrell <micolous+git@gmail.com> Co-authored-by: Michał Mirosław <mirq-linux@rere.qmqm.pl> Co-authored-by: mkaliszan <mkaliszan@benocs.com> Co-authored-by: mtury <maxence.tury@ssi.gouv.fr> Co-authored-by: Neale Ranns <nranns@cisco.com> Co-authored-by: Octavian Toader <Octavian.Toader@belden.com> Co-authored-by: Peter Eisenlohr <peter@eisenlohr.org> Co-authored-by: Phil <phil@secdev.org> Co-authored-by: Pierre Lalet <pierre@droids-corp.org> Co-authored-by: Pierre Lorinquer <pierre.lorinquer@ssi.gouv.fr> Co-authored-by: piersoh <42040737+piersoh@users.noreply.github.com> Co-authored-by: plorinquer <pierre.lorinquer@ssi.gouv.fr> Co-authored-by: pvinci <pvinci@users.noreply.github.com> Co-authored-by: Rahul Jadhav <nyrahul@gmail.com> Co-authored-by: Robin Jarry <robin.jarry@6wind.com> Co-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com> Co-authored-by: rperez <rperez@debian> Co-authored-by: Sabrina Dubroca <sd@queasysnail.net> Co-authored-by: Sebastian Baar <sebastian.baar@gmx.de> Co-authored-by: sebastien mainand <sebastien.mainand@ssi.gouv.fr> Co-authored-by: smehner1 <smehner1@gmail.com> Co-authored-by: speakinghedge <hecke@naberius.de> Co-authored-by: Steven Van Acker <steven@singularity.be> Co-authored-by: Thomas Faivre <thomas.faivre@6wind.com> Co-authored-by: Tran Tien Dat <peter.trantiendat@gmail.com> Co-authored-by: Wael Mahlous <wael.mahlous@gmail.com> Co-authored-by: waeva <74464394+waeva@users.noreply.github.com> Co-authored-by: Alexander Aring <alex.aring@gmail.com> Co-authored-by: Anmol Sarma <me@anmolsarma.in> Co-authored-by: antoine.torre <torreantoine1@gmail.com> Co-authored-by: Antoine Vacher <devel@tigre-bleu.net> Co-authored-by: Arnaud Ebalard <arno@natisbad.org> Co-authored-by: atlowl <86038305+atlowl@users.noreply.github.com> Co-authored-by: Brian Bienvenu <brian@bienvenu.id.au> Co-authored-by: Chris Packham <chris.packham@alliedtelesis.co.nz> Co-authored-by: CQ <cq674350529@163.com> Co-authored-by: Daniel Collins <kinap@users.noreply.github.com> Co-authored-by: Federico Maggi <federico.maggi@gmail.com> Co-authored-by: Florian Maury <florian.maury@ssi.gouv.fr> Co-authored-by: _Frky <3105926+Frky@users.noreply.github.com> Co-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com> Co-authored-by: gpotter2 <gabriel@potter.fr> Co-authored-by: Guillaume Valadon <guillaume@valadon.net> Co-authored-by: Hao Zheng <haozheng10@gmail.com> Co-authored-by: Haresh Khandelwal <hareshkhandelwal@gmail.com> Co-authored-by: Harri Hämäläinen <hhamalai@iki.fi> Co-authored-by: hecke <hecke@naberius.de> Co-authored-by: Jan Romann <jan.romann@gmail.com> Co-authored-by: Jan Sebechlebsky <sebechlebskyjan@gmail.com> Co-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com> Co-authored-by: jockque <38525640+jockque@users.noreply.github.com> Co-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com> Co-authored-by: Keith Scott <kscott@mitre.org> Co-authored-by: Kfir Gollan <kfir@drivenets.com> Co-authored-by: Lars Munch <lars@segv.dk> Co-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com> Co-authored-by: Leonard Crestez <cdleonard@gmail.com> Co-authored-by: Marcel Patzlaff <mpatzlaff@benocs.com> Co-authored-by: Martijn Thé <martijnthe@users.noreply.github.com> Co-authored-by: Martine Lenders <authmillenon@gmail.com> Co-authored-by: Michael Farrell <micolous+git@gmail.com> Co-authored-by: Michał Mirosław <mirq-linux@rere.qmqm.pl> Co-authored-by: mkaliszan <mkaliszan@benocs.com> Co-authored-by: mtury <maxence.tury@ssi.gouv.fr> Co-authored-by: Neale Ranns <nranns@cisco.com> Co-authored-by: Octavian Toader <Octavian.Toader@belden.com> Co-authored-by: Peter Eisenlohr <peter@eisenlohr.org> Co-authored-by: Phil <phil@secdev.org> Co-authored-by: Pierre Lalet <pierre@droids-corp.org> Co-authored-by: Pierre Lorinquer <pierre.lorinquer@ssi.gouv.fr> Co-authored-by: piersoh <42040737+piersoh@users.noreply.github.com> Co-authored-by: pvinci <pvinci@users.noreply.github.com> Co-authored-by: Rahul Jadhav <nyrahul@gmail.com> Co-authored-by: Robin Jarry <robin.jarry@6wind.com> Co-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com> Co-authored-by: rperez <rperez@debian> Co-authored-by: Sabrina Dubroca <sd@queasysnail.net> Co-authored-by: Sebastian Baar <sebastian.baar@gmx.de> Co-authored-by: sebastien mainand <sebastien.mainand@ssi.gouv.fr> Co-authored-by: smehner1 <smehner1@gmail.com> Co-authored-by: Steven Van Acker <steven@singularity.be> Co-authored-by: Thomas Faivre <thomas.faivre@6wind.com> Co-authored-by: Tran Tien Dat <peter.trantiendat@gmail.com> Co-authored-by: Wael Mahlous <wael.mahlous@gmail.com> Co-authored-by: waeva <74464394+waeva@users.noreply.github.com>
https://github.com/secdev/scapy.git
def overlap_frag(p, overlap, fragsize=8, overlap_fragsize=None): if overlap_fragsize is None: overlap_fragsize = fragsize q = p.copy() del q[IP].payload q[IP].add_payload(overlap) qfrag = fragment(q, overlap_fragsize) qfrag[-1][IP].flags |= 1 return qfrag + fragment(p, fragsize)
76
inet.py
Python
scapy/layers/inet.py
08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf
scapy
2
144,233
8
9
2
34
6
0
8
22
__len__
[RLlib] AlphaStar: Parallelized, multi-agent/multi-GPU learning via league-based self-play. (#21356)
https://github.com/ray-project/ray.git
def __len__(self): return sum(len(s) for s in self.shards)
20
distributed_learners.py
Python
rllib/agents/alpha_star/distributed_learners.py
3f03ef8ba8016b095c611c4d2e118771e4a750ca
ray
2
263,805
68
11
11
135
17
0
88
163
update_exe_pe_checksum
winutils: optimize PE headers fixup Attempt to optimize PE headers fix-up from both time- and memory- intensity perspective. First, avoid specifying `fast_load=False` in `pefile.PE` constructor, because that triggers the bytes statistics collection https://github.com/erocarrera/pefile/blob/v2022.5.30/pefile.py#L2862-L2876 which takes a long time for large files. Instead, we can obtain full headers (required for build timestamp modification) by calling `pe.full_load()` ourselves. Second, use (an equivalent of) `MapFileAndCheckSumW` to compute the PE checksum. For large files, it is orders of magnitude faster than its pure-python `pefile.PE.generate_checksum` counterpart. The downside is that `MapFileAndCheckSumW` requires an on-disk file as opposed to a memory buffer, so we need to split the PE headers fixup into two separate steps, with each modifying the corresponding PE headers and (re)writing the whole file. Even so, this brings the fix-up process for a 700MB executable down to seconds instead of minutes. In addition, as noted on MSDN, `MapFileAndCheckSumW` internally calls its ASCII variant (`MapFileAndCheckSumA`), so it cannot handle file paths that contain characters that are not representable in the current code page. Therefore, we implement our own equivalent using `ctypes` and pure widechar-based win32 API functions.
https://github.com/pyinstaller/pyinstaller.git
def update_exe_pe_checksum(exe_path): import pefile # Compute checksum using our equivalent of the MapFileAndCheckSumW - for large files, it is significantly faster # than pure-pyton pefile.PE.generate_checksum(). However, it requires the file to be on disk (i.e., cannot operate # on a memory buffer). try: checksum = compute_exe_pe_checksum(exe_path) except Exception as e: raise RuntimeError("Failed to compute PE checksum!") from e # Update the checksum with pefile.PE(exe_path, fast_load=True) as pe: pe.OPTIONAL_HEADER.CheckSum = checksum # Generate updated EXE data data = pe.write() # Rewrite the exe with open(exe_path, 'wb') as fp: fp.write(data)
72
winutils.py
Python
PyInstaller/utils/win32/winutils.py
41483cb9e6d5086416c8fea6ad6781782c091c60
pyinstaller
2
78,323
74
16
20
201
17
0
102
506
test_get_page_url_when_for_settings_fetched_via_for_site
Add generic settings to compliment site-specific settings (#8327)
https://github.com/wagtail/wagtail.git
def test_get_page_url_when_for_settings_fetched_via_for_site(self): self._create_importantpagessitesetting_object() settings = ImportantPagesSiteSetting.for_site(self.default_site) # Force site root paths query beforehand self.default_site.root_page._get_site_root_paths() for page_fk_field, expected_result in ( ("sign_up_page", "http://localhost/"), ("general_terms_page", "http://localhost/"), ("privacy_policy_page", "http://other/"), ): with self.subTest(page_fk_field=page_fk_field): # only the first request for each URL will trigger queries. # 2 are triggered instead of 1 here, because tests use the # database cache backed, and the cache is queried each time # to fetch site root paths (because there's no 'request' to # store them on) with self.assertNumQueries(2): self.assertEqual( settings.get_page_url(page_fk_field), expected_result ) # when called directly self.assertEqual( settings.get_page_url(page_fk_field), expected_result ) # when called indirectly via shortcut self.assertEqual( getattr(settings.page_url, page_fk_field), expected_result )
115
test_model.py
Python
wagtail/contrib/settings/tests/site_specific/test_model.py
d967eccef28ce47f60d26be1c28f2d83a25f40b0
wagtail
2
269,136
48
15
18
142
12
0
58
140
recursively_deserialize_keras_object
Support Keras saving/loading for ShardedVariables with arbitrary partitions. PiperOrigin-RevId: 439837516
https://github.com/keras-team/keras.git
def recursively_deserialize_keras_object(config, module_objects=None): if isinstance(config, dict): if 'class_name' in config: return generic_utils.deserialize_keras_object( config, module_objects=module_objects) else: return { key: recursively_deserialize_keras_object(config[key], module_objects) for key in config } elif isinstance(config, (tuple, list)): return [ recursively_deserialize_keras_object(x, module_objects) for x in config ] else: raise ValueError( f'Unable to decode Keras layer config. Config should be a dictionary, ' f'tuple or list. Received: config={config}')
89
load.py
Python
keras/saving/saved_model/load.py
e61cbc52fd3b0170769c120e9b8dabc8c4205322
keras
6
309,801
28
14
15
106
9
0
42
227
get_latest_device_activity
spelling: components/august (#64232) Co-authored-by: Josh Soref <jsoref@users.noreply.github.com>
https://github.com/home-assistant/core.git
def get_latest_device_activity(self, device_id, activity_types): if device_id not in self._latest_activities: return None latest_device_activities = self._latest_activities[device_id] latest_activity = None for activity_type in activity_types: if activity_type in latest_device_activities: if ( latest_activity is not None and latest_device_activities[activity_type].activity_start_time <= latest_activity.activity_start_time ): continue latest_activity = latest_device_activities[activity_type] return latest_activity
69
activity.py
Python
homeassistant/components/august/activity.py
dadcc5ebcbcf951ff677568b281c5897d990c8ae
core
6
159,623
6
12
3
40
7
0
6
12
project_root
[ATO-114]Add nightly workflows and creation scripts
https://github.com/RasaHQ/rasa.git
def project_root() -> Path: return Path(os.path.dirname(__file__)).parent.parent
23
prepare_nightly_release.py
Python
scripts/prepare_nightly_release.py
9f634d248769198881bbb78ccd8d333982462ef5
rasa
1
248,026
24
14
17
121
14
0
28
279
add_device_change
Process device list updates asynchronously (#12365)
https://github.com/matrix-org/synapse.git
def add_device_change(self, user_id, device_ids, host): for device_id in device_ids: stream_id = self.get_success( self.store.add_device_change_to_streams( "user_id", [device_id], ["!some:room"] ) ) self.get_success( self.store.add_device_list_outbound_pokes( user_id=user_id, device_id=device_id, room_id="!some:room", stream_id=stream_id, hosts=[host], context={}, ) )
79
test_devices.py
Python
tests/storage/test_devices.py
aa2811026402394b4013033f075d8f509cdc1257
synapse
2
267,050
35
13
7
71
7
0
54
93
self_check
ansible-test - Support multiple coverage versions. ci_complete ci_coverage
https://github.com/ansible/ansible.git
def self_check() -> None: # Verify all supported Python versions have a coverage version. for version in SUPPORTED_PYTHON_VERSIONS: get_coverage_version(version) # Verify all controller Python versions are mapped to the latest coverage version. for version in CONTROLLER_PYTHON_VERSIONS: if get_coverage_version(version) != CONTROLLER_COVERAGE_VERSION: raise InternalError(f'Controller Python version {version} is not mapped to the latest coverage version.') self_check()
35
coverage_util.py
Python
test/lib/ansible_test/_internal/coverage_util.py
b9606417598217106e394c12c776d8c5ede9cd98
ansible
4
258,546
65
15
21
310
26
0
99
320
predict
MAINT Do not compute distances for uniform weighting (#22280)
https://github.com/scikit-learn/scikit-learn.git
def predict(self, X): if self.weights == "uniform": # In that case, we do not need the distances to perform # the weighting so we do not compute them. neigh_ind = self.kneighbors(X, return_distance=False) neigh_dist = None else: neigh_dist, neigh_ind = self.kneighbors(X) weights = _get_weights(neigh_dist, self.weights) _y = self._y if _y.ndim == 1: _y = _y.reshape((-1, 1)) if weights is None: y_pred = np.mean(_y[neigh_ind], axis=1) else: y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64) denom = np.sum(weights, axis=1) for j in range(_y.shape[1]): num = np.sum(_y[neigh_ind, j] * weights, axis=1) y_pred[:, j] = num / denom if self._y.ndim == 1: y_pred = y_pred.ravel() return y_pred
199
_regression.py
Python
sklearn/neighbors/_regression.py
fb082b223dc9f1dd327f48dc9b830ee382d6f661
scikit-learn
6
190,825
61
10
10
130
7
0
90
217
getImageDescriptor
Reformat of files using black These files were not properly formatted.
https://github.com/thumbor/thumbor.git
def getImageDescriptor(self, im, xy=None): # Defaule use full image and place at upper left if xy is None: xy = (0, 0) # Image separator, bb = b"\x2C" # Image position and size bb += int2long(xy[0]) # Left position bb += int2long(xy[1]) # Top position bb += int2long(im.size[0]) # image width bb += int2long(im.size[1]) # image height # packed field: local color table flag1, interlace0, sorted table0, # reserved00, lct size111=7=2^(7+1)=256. bb += b"\x87" # LZW minimum size code now comes later, # begining of [image data] blocks return bb
74
pil.py
Python
thumbor/engines/extensions/pil.py
3c745ef193e9af9244cc406734e67815377472ed
thumbor
2
272,348
36
12
9
203
22
0
62
153
test_calculate_scores_one_dim_with_scale
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def test_calculate_scores_one_dim_with_scale(self): # Query tensor of shape [1, 1, 1] q = np.array([[[1.1]]], dtype=np.float32) # Key tensor of shape [1, 1, 1] k = np.array([[[1.6]]], dtype=np.float32) attention_layer = keras.layers.Attention(use_scale=True) attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1])) attention_layer.scale = -2.0 actual = attention_layer._calculate_scores(query=q, key=k) # Expected tensor of shape [1, 1, 1]. # expected000 = -2*1.1*1.6 = -3.52 expected = np.array([[[-3.52]]], dtype=np.float32) self.assertAllClose(expected, actual)
139
attention_test.py
Python
keras/layers/attention/attention_test.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
281,114
65
12
48
339
22
0
84
266
prepare_all_coins_df
Crypto menu refactor (#1119) * enabled some crypto commands in dd to be called independent of source loaded * support for coin_map_df in all dd functions + load ta and plot chart refactor * updated tests and removed coingecko scrapping where possible * removed ref of command from hugo * updated pycoingecko version * refactoring load * refactored load to fetch prices; pred can run independent of source now * load by default usd on cp/cg and usdt on cb/bin * updated to rich for formatting and updated dependencies * fixed changes requested * update docs * revert discord requirements * removed absolute from calculate change for price * fixing pr issues * fix loading issue when similar coins exist, move coins to home, fill n/a * update docs for coins * adds load to ta and pred menu
https://github.com/OpenBB-finance/OpenBBTerminal.git
def prepare_all_coins_df() -> pd.DataFrame: gecko_coins_df = load_coins_list("coingecko_coins.json") paprika_coins_df = load_coins_list("coinpaprika_coins.json") paprika_coins_df = paprika_coins_df[paprika_coins_df["is_active"]] paprika_coins_df = paprika_coins_df[["rank", "id", "name", "symbol", "type"]] # TODO: Think about scheduled job, that once a day will update data binance_coins_df = load_binance_map().rename(columns={"symbol": "Binance"}) coinbase_coins_df = load_coinbase_map().rename(columns={"symbol": "Coinbase"}) gecko_paprika_coins_df = pd.merge( gecko_coins_df, paprika_coins_df, on="name", how="left" ) df_merged = pd.merge( left=gecko_paprika_coins_df, right=binance_coins_df, left_on="id_x", right_on="id", how="left", ) df_merged.rename( columns={ "id_x": "CoinGecko", "symbol_x": "Symbol", "id_y": "CoinPaprika", }, inplace=True, ) df_merged = pd.merge( left=df_merged, right=coinbase_coins_df, left_on="CoinGecko", right_on="id", how="left", ) return df_merged[["CoinGecko", "CoinPaprika", "Binance", "Coinbase", "Symbol"]]
191
cryptocurrency_helpers.py
Python
gamestonk_terminal/cryptocurrency/cryptocurrency_helpers.py
ea964109d654394cc0a5237e6ec5510ba6404097
OpenBBTerminal
1
34,009
30
9
3
64
10
1
33
52
_set_gradient_checkpointing
Add Nystromformer (#14659) * Initial commit * Config and modelling changes Added Nystromformer-specific attributes to config and removed all decoder functionality from modelling. * Modelling and test changes Added Nystrom approximation and removed decoder tests. * Code quality fixes * Modeling changes and conversion script Initial commits to conversion script, modeling changes. * Minor modeling changes and conversion script * Modeling changes * Correct modeling, add tests and documentation * Code refactor * Remove tokenizers * Code refactor * Update __init__.py * Fix bugs * Update src/transformers/__init__.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/__init__.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/nystromformer/__init__.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/model_doc/nystromformer.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/nystromformer/configuration_nystromformer.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/nystromformer/configuration_nystromformer.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/nystromformer/configuration_nystromformer.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/nystromformer/configuration_nystromformer.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/nystromformer/convert_nystromformer_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/nystromformer/configuration_nystromformer.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update modeling and test_modeling * Code refactor * .rst to .mdx * doc changes * Doc changes * Update modeling_nystromformer.py * Doc changes * Fix copies * Apply suggestions from code review Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update configuration_nystromformer.py * Fix copies * Update tests/test_modeling_nystromformer.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update test_modeling_nystromformer.py * Apply suggestions from code review Co-authored-by: Lysandre Debut <lysandre@huggingface.co> * Fix code style * Update modeling_nystromformer.py * Update modeling_nystromformer.py * Fix code style * Reformat modeling file * Update modeling_nystromformer.py * Modify NystromformerForMultipleChoice * Fix code quality * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Code style changes and torch.no_grad() * make style * Apply suggestions from code review Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Lysandre Debut <lysandre@huggingface.co> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
https://github.com/huggingface/transformers.git
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, NystromformerEncoder): module.gradient_checkpointing = value NYSTROMFORMER_START_DOCSTRING = r NYSTROMFORMER_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare Nyströmformer Model transformer outputting raw hidden-states without any specific head on top.", NYSTROMFORMER_START_DOCSTRING, )
@add_start_docstrings( "The bare Nyströmformer Model transformer outputting raw hidden-states without any specific head on top.", NYSTROMFORMER_START_DOCSTRING, )
24
modeling_nystromformer.py
Python
src/transformers/models/nystromformer/modeling_nystromformer.py
28e091430eea9e0d40839e56fd0d57aec262f5f9
transformers
2
119,984
23
9
8
124
16
1
27
91
bcoo_dot_general_sampled
[sparse] Update docstrings for bcoo primitives. PiperOrigin-RevId: 438685829
https://github.com/google/jax.git
def bcoo_dot_general_sampled(A, B, indices, *, dimension_numbers): (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers cdims = (api_util._ensure_index_tuple(lhs_contract), api_util._ensure_index_tuple(rhs_contract)) bdims = (api_util._ensure_index_tuple(lhs_batch), api_util._ensure_index_tuple(rhs_batch)) return bcoo_dot_general_sampled_p.bind(A, B, indices, dimension_numbers=(cdims, bdims)) @bcoo_dot_general_sampled_p.def_impl
@bcoo_dot_general_sampled_p.def_impl
80
bcoo.py
Python
jax/experimental/sparse/bcoo.py
3184dd65a222354bffa2466d9a375162f5649132
jax
1
80,736
21
11
9
95
10
0
28
83
_get_instance_id
Fix up new Django 3.0 deprecations Mostly text based: force/smart_text, ugettext_*
https://github.com/ansible/awx.git
def _get_instance_id(from_dict, new_id, default=''): instance_id = default for key in new_id.split('.'): if not hasattr(from_dict, 'get'): instance_id = default break instance_id = from_dict.get(key, default) from_dict = instance_id return smart_str(instance_id)
56
_inventory_source.py
Python
awx/main/migrations/_inventory_source.py
a3a216f91f1158fd54c001c34cbdf2f68ccbc272
awx
3
100,396
48
11
13
225
20
0
74
225
compile_sample
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
https://github.com/deepfakes/faceswap.git
def compile_sample(self, batch_size, samples=None, images=None, masks=None): num_images = self._config.get("preview_images", 14) num_images = min(batch_size, num_images) if batch_size is not None else num_images retval = {} for side in ("a", "b"): logger.debug("Compiling samples: (side: '%s', samples: %s)", side, num_images) side_images = images[side] if images is not None else self._target[side] side_masks = masks[side] if masks is not None else self._masks[side] side_samples = samples[side] if samples is not None else self._samples[side] retval[side] = [side_samples[0:num_images], side_images[0:num_images], side_masks[0:num_images]] return retval
153
_base.py
Python
plugins/train/trainer/_base.py
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
6
314,211
8
6
6
25
4
0
8
22
temperature
Weather unit conversion (#73441) Co-authored-by: Erik <erik@montnemery.com>
https://github.com/home-assistant/core.git
def temperature(self) -> float | None: return self._attr_temperature
14
__init__.py
Python
homeassistant/components/weather/__init__.py
90e1fb6ce2faadb9a35fdbe1774fce7b4456364f
core
1
20,801
6
7
3
26
4
0
6
20
get_time
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def get_time(self) -> float: return self._get_time()
14
progress.py
Python
pipenv/patched/notpip/_vendor/rich/progress.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
1
118,718
28
12
13
173
19
0
35
142
test_add_unstyled_rows_to_styled_rows
Pandas 1.4 styler fix (#4316) Change the way we detect custom styling in a DataFrame, to account for changes in Pandas 1.4. Our DataFrame styling support is based on internal Pandas APIs, so they're always subject to change out from underneath us. In general, we'd prefer to only pass `display_value` data to the frontend when a DataFrame cell has been custom-formatted by the user, to save on bandwidth. However, Panda's Styler's internals are private, and it doesn't give us a consistent way of testing whether a cell has a custom `display_value` or not. Prior to Pandas 1.4, we could test whether a cell's `display_value` differed from its `value`, and only stick the `display_value` in the protobuf when that was the case. In 1.4, an unmodified Styler will contain `display_value` strings for all cells, regardless of whether any formatting has been applied to that cell, so we no longer have this ability (or at least I couldn't figure out a reasonable way to test for this). So instead, as of this PR, calling `st._legacy_dataframe(df.styler)` will *always* result in `display_value` strings being written to the dataframe protobuf (even though there isn't any custom formatting). This means that styled DataFrames may result in more data being sent to the frontend now than was the case before. In practice, I don't think this is a big deal - only the legacy DataFrame code has styling support; and often, if you're styling a DataFrame, you're customizing the formatting on most or all of its cells anyway. I also made a number of small type-safety changes as I was working with the dataframe code, and those are all in the PR as well. (I've left a PR comment under the actual logic changes.)
https://github.com/streamlit/streamlit.git
def test_add_unstyled_rows_to_styled_rows(self, st_element, get_proto): df1 = pd.DataFrame([5, 6]) df2 = pd.DataFrame([7, 8]) css_values = [ {css_s("color", "black")}, {css_s("color", "black")}, set(), set(), ] x = st_element(df1.style.applymap(lambda val: "color: black")) x._legacy_add_rows(df2) proto_df = get_proto(self._get_element()) self._assert_column_css_styles(proto_df, 0, css_values)
106
legacy_dataframe_styling_test.py
Python
lib/tests/streamlit/legacy_dataframe_styling_test.py
2c153aa179a27539f856e389870161d5a58da213
streamlit
1
282,770
13
11
4
53
9
0
13
37
handle_error_code
Output Missing API Key Message to Console (#1357) * Decorator to output error msg to console of missing API Key * Refactor FMP & alpha advantage * Refactor FRED & QUANDL * Refactor Polygon * Refactor FRED * Refactor FRED * Refactor Finnhub & coinmarketcap & Newsapi * Allow disabling of check api * Updating tests : disable check api for tests * Refactor Finnhub & SI & Binance * Fix linting * Fix test & add black formatting * Fix test failing * Fix test failing * Refactor CryptoPanic & Whales alert & Glassnode & Coinglass * Refactor ETHexplorer & Smartstake & Alpha Advanage & Coinbase * Add decorators to controllers * Fix test & Refactor Coinbase, RH, Reddit * Add contributing guideline * Update CONTRIBUTING.md * Update CONTRIBUTING.md * fix tests * add decorator to snews cmd Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: didierlopes.eth <dro.lopes@campus.fct.unl.pt>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def handle_error_code(requests_obj, error_code_map): for error_code, error_msg in error_code_map.items(): if requests_obj.status_code == error_code: console.print(error_msg)
32
helper_funcs.py
Python
gamestonk_terminal/helper_funcs.py
401e4c739a6f9d18944e0ab49c782e97b56fda94
OpenBBTerminal
3
21,882
27
15
10
99
11
0
31
97
detect
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
https://github.com/pypa/pipenv.git
def detect(byte_str): if not isinstance(byte_str, bytearray): if not isinstance(byte_str, bytes): raise TypeError( f"Expected object of type bytes or bytearray, got: {type(byte_str)}" ) byte_str = bytearray(byte_str) detector = UniversalDetector() detector.feed(byte_str) return detector.close()
53
__init__.py
Python
pipenv/patched/pip/_vendor/chardet/__init__.py
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
3
212,923
37
13
9
133
13
0
46
129
delete_file
Added report_error setting for user_settings_delete_file. Global Settings window complete rework to use Tabs. Hoping nothing broke, but just remember things are in flux for a little bit while the ttk scrollbars are finishing up
https://github.com/PySimpleGUI/PySimpleGUI.git
def delete_file(self, filename=None, path=None, report_error=False): if filename is not None or path is not None or (filename is None and path is None): self.set_location(filename=filename, path=path) try: os.remove(self.full_filename) except Exception as e: if report_error: _error_popup_with_traceback('UserSettings delete_file warning ***', 'Exception trying to perform os.remove', e) self.dict = {}
83
PySimpleGUI.py
Python
PySimpleGUI.py
f776589349476a41b98aa1f467aff2f30e2a8fc2
PySimpleGUI
7
60,235
31
9
10
91
11
0
44
86
compose
Balanced joint maximum mean discrepancy for deep transfer learning
https://github.com/jindongwang/transferlearning.git
def compose(base_map, next_map): ax1, a1, b1 = base_map ax2, a2, b2 = next_map if ax1 is None: ax = ax2 elif ax2 is None or ax1 == ax2: ax = ax1 else: raise AxisMismatchException return ax, a1 * a2, a1 * b2 + b1
58
coord_map.py
Python
code/deep/BJMMD/caffe/python/caffe/coord_map.py
cc4d0564756ca067516f71718a3d135996525909
transferlearning
4
225,809
9
7
3
29
5
1
9
22
is_doc_id_none
Add index composability! (#86) Summary of changes - Bumped version to 0.1.0 - Abstracted out a BaseDocument class that both Document (from data loaders) and IndexStruct (our data struct classes) inherit from. - Add a DocumentStore that contains the id's of all BaseDocuments. Both Document objects and IndexStruct objects are registered in here, allowing us to recursively fetch and query sub-index structures within an index structure. - Add a reference document id to each Node class. This allows us to recursively query within another index struct after we traverse a node, if the reference document id of that node corresponds to another index struct in the DocumentStore. - Use Node as the central abstraction containing both "text" as well as a reference document_id: use for List, Tree, KeywordTable - Factored out a QueryRunner to recursively run queries. I grappled with some circular dependency issues but I believe the current approach works. - Add a bunch of unit tests Co-authored-by: Jerry Liu <jerry@robustintelligence.com>
https://github.com/jerryjliu/llama_index.git
def is_doc_id_none(self) -> bool: return self.doc_id is None @dataclass
@dataclass
14
schema.py
Python
gpt_index/schema.py
c22d865acb3899a181921d94b6e94e665a12b432
llama_index
1
301,395
17
10
6
91
17
0
19
37
test_setup_not_ready
Create iAlarmXR integration (#67817) * Creating iAlarmXR integration * fixing after review code * fixing remaining review hints * fixing remaining review hints * updating underlying pyialarm library * Creating iAlarmXR integration * fixing after review code * fixing remaining review hints * fixing remaining review hints * updating underlying pyialarm library * fixing after iMicknl review * Improving exception handling * Updating pyialarmxr library * fixing after merge dev * fixing after iMicknl review * Update CODEOWNERS Co-authored-by: Ludovico de Nittis <git@denittis.one> * fixing iot_class * Update homeassistant/components/ialarmxr/config_flow.py Co-authored-by: J. Nick Koston <nick@koston.org> * fixing after bdraco review * Update homeassistant/components/ialarmxr/config_flow.py Co-authored-by: J. Nick Koston <nick@koston.org> * reverting catching exception in setup step * Update homeassistant/components/ialarmxr/__init__.py Co-authored-by: J. Nick Koston <nick@koston.org> * Update homeassistant/components/ialarmxr/__init__.py Co-authored-by: J. Nick Koston <nick@koston.org> * fixing after bdraco suggestions * Update homeassistant/components/ialarmxr/alarm_control_panel.py Co-authored-by: J. Nick Koston <nick@koston.org> * Update homeassistant/components/ialarmxr/alarm_control_panel.py Co-authored-by: Mick Vleeshouwer <mick@imick.nl> * Update homeassistant/components/ialarmxr/config_flow.py Co-authored-by: J. Nick Koston <nick@koston.org> * Update homeassistant/components/ialarmxr/config_flow.py Co-authored-by: J. Nick Koston <nick@koston.org> * Update homeassistant/components/ialarmxr/__init__.py Co-authored-by: J. Nick Koston <nick@koston.org> * Update homeassistant/components/ialarmxr/__init__.py Co-authored-by: J. Nick Koston <nick@koston.org> * Update homeassistant/components/ialarmxr/utils.py Co-authored-by: J. Nick Koston <nick@koston.org> * regenerate translation and rename function to async_get_ialarmxr_mac * removing and collapsing unused error messages * fixing tests * improve code coverage in tests * improve code coverage in tests * improve code coverage in tests * fixing retry policy with new pyalarmxr library * snake case fix * renaming integration in ialarm_xr * renaming control panel name Co-authored-by: Ludovico de Nittis <git@denittis.one> Co-authored-by: J. Nick Koston <nick@koston.org> Co-authored-by: Mick Vleeshouwer <mick@imick.nl>
https://github.com/home-assistant/core.git
async def test_setup_not_ready(hass, ialarmxr_api, mock_config_entry): ialarmxr_api.return_value.get_mac = Mock(side_effect=ConnectionError) mock_config_entry.add_to_hass(hass) assert not await hass.config_entries.async_setup(mock_config_entry.entry_id) await hass.async_block_till_done() assert mock_config_entry.state is ConfigEntryState.SETUP_RETRY
55
test_init.py
Python
tests/components/ialarm_xr/test_init.py
42c80dda85f567192c182da2b4c603408a890381
core
1
264,031
99
15
19
243
20
0
146
397
collect_qtqml_files
hookutils: reorganize the Qt hook utilities Reorganize the Qt module information to provide information necessary to deal with variations between different python Qt bindings (PySide2, PyQt5, PySide6, and PyQt6). Replace the existing table-like dictionary with list of entries, which is easier to format and document. From this list, we now generate two dictionaries; one that maps Qt module (shared library) names to the module info entries (the same role as the old dictionary), and one that maps python module names to the module info entries. The latter is necessary to accommodate python modules that do not have corresponding Qt shared libraries (header-only Qt modules, such as QtAxContainer; or statically-linked module, such as QSci), but we still need to provide information about plugins or translation files. The new information list is based on manual inspection of source code for Qt 5.15 and 6.3, and should provide comprehensive information about all plugin names and translation file basenames. In addition, most of the helper functions, which take a reference to the `QtLibraryInfo` class as their first argument, have been turned into methods of the `QtLibraryInfo` class. The corresponding hooks have also been adjusted.
https://github.com/pyinstaller/pyinstaller.git
def collect_qtqml_files(self): # No-op if requested Qt-based package is not available. if self.version is None: return [], [] # Not all PyQt5/PySide2 installs have QML files. In this case, location['Qml2ImportsPath'] is empty. # Furthermore, even if location path is provided, the directory itself may not exist. # # https://github.com/pyinstaller/pyinstaller/pull/3229#issuecomment-359735031 # https://github.com/pyinstaller/pyinstaller/issues/3864 # # In Qt 6, Qml2ImportsPath was deprecated in favor of QmlImportsPath. The former is not available in PySide6 # 6.4.0 anymore (but is in PyQt6 6.4.0). Use the new QmlImportsPath if available. if 'QmlImportsPath' in self.location: qml_src_dir = self.location['QmlImportsPath'] else: qml_src_dir = self.location['Qml2ImportsPath'] if not qml_src_dir or not os.path.isdir(qml_src_dir): logger.warning('%s: QML directory %r does not exist. QML files not packaged.', self, qml_src_dir) return [], [] qml_dst_dir = os.path.join(self.qt_rel_dir, 'qml') datas = [(qml_src_dir, qml_dst_dir)] binaries = [ # Produce ``/path/to/Qt/Qml/path_to_qml_binary/qml_binary, PyQt5/Qt/Qml/path_to_qml_binary``. ( qml_plugin_file, os.path.join(qml_dst_dir, os.path.dirname(os.path.relpath(qml_plugin_file, qml_src_dir))) ) for qml_plugin_file in misc.dlls_in_subdirs(qml_src_dir) ] return binaries, datas
144
__init__.py
Python
PyInstaller/utils/hooks/qt/__init__.py
d789a7daa7712716c89259b987349917a89aece7
pyinstaller
6
82,418
50
11
26
356
15
0
72
314
test_patricks_move
ci: Added codespell (#7355) Co-authored-by: Christian Clauss <cclauss@me.com> * ci: codespell config taken from #7292
https://github.com/django-cms/django-cms.git
def test_patricks_move(self): self.assertEqual(self.pg.node.parent, self.pe.node) # perform moves under slave... self.move_page(self.pg, self.pc) self.reload_pages() # page is now under PC self.assertEqual(self.pg.node.parent, self.pc.node) self.assertEqual(self.pg.get_absolute_url(), self.pg.publisher_public.get_absolute_url()) self.move_page(self.pe, self.pg) self.reload_pages() self.assertEqual(self.pe.node.parent, self.pg.node) self.ph = self.ph.reload() # check urls - they should stay be the same now after the move self.assertEqual( self.pg.publisher_public.get_absolute_url(), self.pg.get_absolute_url() ) self.assertEqual( self.ph.publisher_public.get_absolute_url(), self.ph.get_absolute_url() ) # check if urls are correct after move self.assertEqual( self.pg.publisher_public.get_absolute_url(), '%smaster/slave-home/pc/pg/' % self.get_pages_root() ) self.assertEqual( self.ph.publisher_public.get_absolute_url(), '%smaster/slave-home/pc/pg/pe/ph/' % self.get_pages_root() )
215
test_permmod.py
Python
cms/tests/test_permmod.py
c1290c9ff89cb00caa5469129fd527e9d82cd820
django-cms
1
291,315
23
10
9
85
15
0
24
67
test_text_new_min_max_pattern
Add `text` platform (#79454) Co-authored-by: Franck Nijhof <frenck@frenck.nl> Co-authored-by: Franck Nijhof <git@frenck.dev>
https://github.com/home-assistant/core.git
async def test_text_new_min_max_pattern(hass): text = MockTextEntity(native_min=-1, native_max=500, pattern=r"[a-z]") text.hass = hass assert text.capability_attributes == { ATTR_MIN: 0, ATTR_MAX: MAX_LENGTH_STATE_STATE, ATTR_MODE: TextMode.TEXT, ATTR_PATTERN: r"[a-z]", }
55
test_init.py
Python
tests/components/text/test_init.py
003e4224c89a6da381960dc5347750d1521d85c9
core
1
260,017
266
15
48
713
67
0
475
735
load_dataset
DOC rework plot_document_classification_20newsgroups.py example (#22928) Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com> Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Julien Jerphanion <git@jjerphan.xyz>
https://github.com/scikit-learn/scikit-learn.git
def load_dataset(verbose=False, remove=()): data_train = fetch_20newsgroups( subset="train", categories=categories, shuffle=True, random_state=42, remove=remove, ) data_test = fetch_20newsgroups( subset="test", categories=categories, shuffle=True, random_state=42, remove=remove, ) # order of labels in `target_names` can be different from `categories` target_names = data_train.target_names # split target in a training set and a test set y_train, y_test = data_train.target, data_test.target # Extracting features from the training data using a sparse vectorizer t0 = time() vectorizer = TfidfVectorizer( sublinear_tf=True, max_df=0.5, min_df=5, stop_words="english" ) X_train = vectorizer.fit_transform(data_train.data) duration_train = time() - t0 # Extracting features from the test data using the same vectorizer t0 = time() X_test = vectorizer.transform(data_test.data) duration_test = time() - t0 feature_names = vectorizer.get_feature_names_out() if verbose: # compute size of loaded data data_train_size_mb = size_mb(data_train.data) data_test_size_mb = size_mb(data_test.data) print( f"{len(data_train.data)} documents - " f"{data_train_size_mb:.2f}MB (training set)" ) print(f"{len(data_test.data)} documents - {data_test_size_mb:.2f}MB (test set)") print(f"{len(target_names)} categories") print( f"vectorize training done in {duration_train:.3f}s " f"at {data_train_size_mb / duration_train:.3f}MB/s" ) print(f"n_samples: {X_train.shape[0]}, n_features: {X_train.shape[1]}") print( f"vectorize testing done in {duration_test:.3f}s " f"at {data_test_size_mb / duration_test:.3f}MB/s" ) print(f"n_samples: {X_test.shape[0]}, n_features: {X_test.shape[1]}") return X_train, X_test, y_train, y_test, feature_names, target_names # %% # Compare feature effects # ----------------------- # We train a first classification model without attempting to strip the metadata # of the dataset. X_train, X_test, y_train, y_test, feature_names, target_names = load_dataset( verbose=True ) # %% # Our first model is an instance of the # :class:`~sklearn.linear_model.RidgeClassifier` class. This is a linear # classification model that uses the mean squared error on {-1, 1} encoded # targets, one for each possible class. Contrary to # :class:`~sklearn.linear_model.LogisticRegression`, # :class:`~sklearn.linear_model.RidgeClassifier` does not # provide probabilistic predictions (no `predict_proba` method), # but it is often faster to train. from sklearn.linear_model import RidgeClassifier clf = RidgeClassifier(tol=1e-2, solver="sparse_cg") clf.fit(X_train, y_train) pred = clf.predict(X_test) # %% # We plot the confusion matrix of this classifier to find if there is a pattern # in the classification errors. import matplotlib.pyplot as plt from sklearn.metrics import ConfusionMatrixDisplay fig, ax = plt.subplots(figsize=(10, 5)) ConfusionMatrixDisplay.from_predictions(y_test, pred, ax=ax) ax.xaxis.set_ticklabels(target_names) ax.yaxis.set_ticklabels(target_names) _ = ax.set_title( f"Confusion Matrix for {clf.__class__.__name__}\non the original documents" ) # %% # The confusion matrix highlights that documents of the `alt.atheism` class are # often confused with documents with the class `talk.religion.misc` class and # vice-versa which is expected since the topics are semantically related. # # We also observe that some documents of the `sci.space` class can be misclassified as # `comp.graphics` while the converse is much rarer. A manual inspection of those # badly classified documents would be required to get some insights on this # asymmetry. It could be the case that the vocabulary of the space topic could # be more specific than the vocabulary for computer graphics. # # We can gain a deeper understanding of how this classifier makes its decisions # by looking at the words with the highest average feature effects: import pandas as pd import numpy as np
224
plot_document_classification_20newsgroups.py
Python
examples/text/plot_document_classification_20newsgroups.py
71028322e8964cf1f341a7b293abaefeb5275e12
scikit-learn
2
155,176
51
13
13
222
21
0
64
193
apply
FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059) Signed-off-by: Igoshev, Iaroslav <iaroslav.igoshev@intel.com>
https://github.com/modin-project/modin.git
def apply(self, func, *args, **kwargs): logger = get_logger() logger.debug(f"ENTER::Partition.apply::{self._identity}") data = self._data call_queue = self.call_queue + [[func, args, kwargs]] if len(call_queue) > 1: logger.debug(f"SUBMIT::_apply_list_of_funcs::{self._identity}") result, length, width, ip = _apply_list_of_funcs.remote(call_queue, data) else: # We handle `len(call_queue) == 1` in a different way because # this dramatically improves performance. result, length, width, ip = _apply_func.remote(data, func, *args, **kwargs) logger.debug(f"SUBMIT::_apply_func::{self._identity}") logger.debug(f"EXIT::Partition.apply::{self._identity}") return PandasOnUnidistDataframePartition(result, length, width, ip)
126
partition.py
Python
modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py
193505fdf0c984743397ba3df56262f30aee13a8
modin
2
157,201
55
14
15
278
22
1
60
148
test_roundtrip_nullable_dtypes
Add support for `use_nullable_dtypes` to `dd.read_parquet` (#9617)
https://github.com/dask/dask.git
def test_roundtrip_nullable_dtypes(tmp_path, write_engine, read_engine): if read_engine == "fastparquet" or write_engine == "fastparquet": pytest.xfail("https://github.com/dask/fastparquet/issues/465") df = pd.DataFrame( { "a": pd.Series([1, 2, pd.NA, 3, 4], dtype="Int64"), "b": pd.Series([True, pd.NA, False, True, False], dtype="boolean"), "c": pd.Series([0.1, 0.2, 0.3, pd.NA, 0.4], dtype="Float64"), "d": pd.Series(["a", "b", "c", "d", pd.NA], dtype="string"), } ) ddf = dd.from_pandas(df, npartitions=2) ddf.to_parquet(tmp_path, engine=write_engine) ddf2 = dd.read_parquet(tmp_path, engine=read_engine) assert_eq(df, ddf2) @PYARROW_MARK
@PYARROW_MARK
182
test_parquet.py
Python
dask/dataframe/io/tests/test_parquet.py
b1e468e8645baee30992fbfa84250d816ac1098a
dask
3
290,831
90
16
47
410
41
0
167
606
async_update_group_state
Cleanup supported_features in group (#82242) * Cleanup supported_features in group * Remove defaults (already set to 0 in fan and media_player)
https://github.com/home-assistant/core.git
def async_update_group_state(self) -> None: self._attr_assumed_state = False states = [ state for entity_id in self._entities if (state := self.hass.states.get(entity_id)) is not None ] self._attr_assumed_state |= not states_equal(states) # Set group as unavailable if all members are unavailable or missing self._attr_available = any(state.state != STATE_UNAVAILABLE for state in states) valid_state = any( state.state not in (STATE_UNKNOWN, STATE_UNAVAILABLE) for state in states ) if not valid_state: # Set as unknown if all members are unknown or unavailable self._is_on = None else: # Set as ON if any member is ON self._is_on = any(state.state == STATE_ON for state in states) percentage_states = self._async_states_by_support_flag( FanEntityFeature.SET_SPEED ) self._percentage = reduce_attribute(percentage_states, ATTR_PERCENTAGE) self._attr_assumed_state |= not attribute_equal( percentage_states, ATTR_PERCENTAGE ) if ( percentage_states and percentage_states[0].attributes.get(ATTR_PERCENTAGE_STEP) and attribute_equal(percentage_states, ATTR_PERCENTAGE_STEP) ): self._speed_count = ( round(100 / percentage_states[0].attributes[ATTR_PERCENTAGE_STEP]) or 100 ) else: self._speed_count = 100 self._set_attr_most_frequent( "_oscillating", FanEntityFeature.OSCILLATE, ATTR_OSCILLATING ) self._set_attr_most_frequent( "_direction", FanEntityFeature.DIRECTION, ATTR_DIRECTION ) self._attr_supported_features = reduce( ior, [feature for feature in SUPPORTED_FLAGS if self._fans[feature]], 0 ) self._attr_assumed_state |= any( state.attributes.get(ATTR_ASSUMED_STATE) for state in states )
265
fan.py
Python
homeassistant/components/group/fan.py
38a8e86ddeb65ee8c731b90a7063a3b3702dc1ef
core
14
261,351
15
10
6
75
11
0
15
61
predict
OPTIM use pairwise_distances_argmin in NearestCentroid.predict (#24645) Co-authored-by: Julien Jerphanion <git@jjerphan.xyz>
https://github.com/scikit-learn/scikit-learn.git
def predict(self, X): check_is_fitted(self) X = self._validate_data(X, accept_sparse="csr", reset=False) return self.classes_[ pairwise_distances_argmin(X, self.centroids_, metric=self.metric) ]
48
_nearest_centroid.py
Python
sklearn/neighbors/_nearest_centroid.py
e01035d3b2dc147cbbe9f6dbd7210a76119991e8
scikit-learn
1
109,149
69
13
35
487
22
0
146
463
_suplabels
Add rcparam for figure label size and weight (#22566) * Add rcparam for figure label size and weight
https://github.com/matplotlib/matplotlib.git
def _suplabels(self, t, info, **kwargs): suplab = getattr(self, info['name']) x = kwargs.pop('x', None) y = kwargs.pop('y', None) if info['name'] in ['_supxlabel', '_suptitle']: autopos = y is None elif info['name'] == '_supylabel': autopos = x is None if x is None: x = info['x0'] if y is None: y = info['y0'] if 'horizontalalignment' not in kwargs and 'ha' not in kwargs: kwargs['horizontalalignment'] = info['ha'] if 'verticalalignment' not in kwargs and 'va' not in kwargs: kwargs['verticalalignment'] = info['va'] if 'rotation' not in kwargs: kwargs['rotation'] = info['rotation'] if 'fontproperties' not in kwargs: if 'fontsize' not in kwargs and 'size' not in kwargs: kwargs['size'] = mpl.rcParams[info['size']] if 'fontweight' not in kwargs and 'weight' not in kwargs: kwargs['weight'] = mpl.rcParams[info['weight']] sup = self.text(x, y, t, **kwargs) if suplab is not None: suplab.set_text(t) suplab.set_position((x, y)) suplab.update_from(sup) sup.remove() else: suplab = sup suplab._autopos = autopos setattr(self, info['name'], suplab) self.stale = True return suplab
283
figure.py
Python
lib/matplotlib/figure.py
eeac402ec56d7e69234e0cd7b15f59d53852e457
matplotlib
16
47,465
65
15
34
372
49
0
84
398
test_backfill_execute_subdag_with_removed_task
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
https://github.com/apache/airflow.git
def test_backfill_execute_subdag_with_removed_task(self): dag = self.dagbag.get_dag('example_subdag_operator') subdag = dag.get_task('section-1').subdag session = settings.Session() executor = MockExecutor() job = BackfillJob( dag=subdag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor, donot_pickle=True ) dr = DagRun( dag_id=subdag.dag_id, execution_date=DEFAULT_DATE, run_id="test", run_type=DagRunType.BACKFILL_JOB ) session.add(dr) removed_task_ti = TI( task=EmptyOperator(task_id='removed_task'), run_id=dr.run_id, state=State.REMOVED ) removed_task_ti.dag_id = subdag.dag_id dr.task_instances.append(removed_task_ti) session.commit() with timeout(seconds=30): job.run() for task in subdag.tasks: instance = ( session.query(TI) .filter( TI.dag_id == subdag.dag_id, TI.task_id == task.task_id, TI.execution_date == DEFAULT_DATE ) .first() ) assert instance is not None assert instance.state == State.SUCCESS removed_task_ti.refresh_from_db() assert removed_task_ti.state == State.REMOVED subdag.clear() dag.clear()
232
test_backfill_job.py
Python
tests/jobs/test_backfill_job.py
49e336ae0302b386a2f47269a6d13988382d975f
airflow
2
210,267
46
11
11
197
19
1
59
174
__call__
Remove conditional block in RCNN export onnx (#5371) * support rcnn onnx * clean code * update cascade rcnn * add todo for rpn proposals
https://github.com/PaddlePaddle/PaddleDetection.git
def __call__(self, mask_out, bboxes, bbox_num, origin_shape): num_mask = mask_out.shape[0] origin_shape = paddle.cast(origin_shape, 'int32') # TODO: support bs > 1 and mask output dtype is bool pred_result = paddle.zeros( [num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32') im_h, im_w = origin_shape[0][0], origin_shape[0][1] pred_mask = self.paste_mask(mask_out[:, None, :, :], bboxes[:, 2:], im_h, im_w) pred_mask = pred_mask >= self.binary_thresh pred_result = paddle.cast(pred_mask, 'int32') return pred_result @register
@register
129
post_process.py
Python
ppdet/modeling/post_process.py
afb3b7a1c7842921b8eacae9d2ac4f2e660ea7e1
PaddleDetection
1
81,344
146
19
96
894
7
0
244
1,599
context_stub
Adding fields to job_metadata for workflows and approval nodes (#12255)
https://github.com/ansible/awx.git
def context_stub(cls): context = { 'job': { 'allow_simultaneous': False, 'artifacts': {}, 'controller_node': 'foo_controller', 'created': datetime.datetime(2018, 11, 13, 6, 4, 0, 0, tzinfo=datetime.timezone.utc), 'custom_virtualenv': 'my_venv', 'description': 'Sample job description', 'diff_mode': False, 'elapsed': 0.403018, 'execution_node': 'awx', 'failed': False, 'finished': False, 'force_handlers': False, 'forks': 0, 'host_status_counts': {'skipped': 1, 'ok': 5, 'changed': 3, 'failures': 0, 'dark': 0, 'failed': False, 'processed': 0, 'rescued': 0}, 'id': 42, 'job_explanation': 'Sample job explanation', 'job_slice_count': 1, 'job_slice_number': 0, 'job_tags': '', 'job_type': 'run', 'launch_type': 'workflow', 'limit': 'bar_limit', 'modified': datetime.datetime(2018, 12, 13, 6, 4, 0, 0, tzinfo=datetime.timezone.utc), 'name': 'Stub JobTemplate', 'playbook': 'ping.yml', 'scm_branch': '', 'scm_revision': '', 'skip_tags': '', 'start_at_task': '', 'started': '2019-07-29T17:38:14.137461Z', 'status': 'running', 'summary_fields': { 'created_by': {'first_name': '', 'id': 1, 'last_name': '', 'username': 'admin'}, 'instance_group': {'id': 1, 'name': 'tower'}, 'inventory': { 'description': 'Sample inventory description', 'has_active_failures': False, 'has_inventory_sources': False, 'hosts_with_active_failures': 0, 'id': 17, 'inventory_sources_with_failures': 0, 'kind': '', 'name': 'Stub Inventory', 'organization_id': 121, 'total_groups': 0, 'total_hosts': 1, 'total_inventory_sources': 0, }, 'job_template': {'description': 'Sample job template description', 'id': 39, 'name': 'Stub JobTemplate'}, 'labels': {'count': 0, 'results': []}, 'project': {'description': 'Sample project description', 'id': 38, 'name': 'Stub project', 'scm_type': 'git', 'status': 'successful'}, 'schedule': { 'description': 'Sample schedule', 'id': 42, 'name': 'Stub schedule', 'next_run': datetime.datetime(2038, 1, 1, 0, 0, 0, 0, tzinfo=datetime.timezone.utc), }, 'unified_job_template': { 'description': 'Sample unified job template description', 'id': 39, 'name': 'Stub Job Template', 'unified_job_type': 'job', }, }, 'timeout': 0, 'type': 'job', 'url': '/api/v2/jobs/13/', 'use_fact_cache': False, 'verbosity': 0, }, 'job_friendly_name': 'Job', 'url': 'https://towerhost/#/jobs/playbook/1010', 'approval_status': 'approved', 'approval_node_name': 'Approve Me', 'workflow_url': 'https://towerhost/#/jobs/workflow/1010', 'job_metadata': , } return context
480
notifications.py
Python
awx/main/models/notifications.py
389c4a318035cdb02a972ba8200391765f522169
awx
1
274,647
5
10
11
32
8
0
5
19
test_build_in_tf_function
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def test_build_in_tf_function(self): m = metrics.MeanTensor(dtype=tf.float64)
117
base_metric_test.py
Python
keras/metrics/base_metric_test.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
276,840
28
14
13
185
20
0
42
105
func_dump
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def func_dump(func): if os.name == "nt": raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/") code = codecs.encode(raw_code, "base64").decode("ascii") else: raw_code = marshal.dumps(func.__code__) code = codecs.encode(raw_code, "base64").decode("ascii") defaults = func.__defaults__ if func.__closure__: closure = tuple(c.cell_contents for c in func.__closure__) else: closure = None return code, defaults, closure
109
generic_utils.py
Python
keras/utils/generic_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
4
257,048
6
8
8
33
5
0
6
20
get_evaluation_sets
EvaluationSetClient for deepset cloud to fetch evaluation sets and la… (#2345) * EvaluationSetClient for deepset cloud to fetch evaluation sets and labels for one specific evaluation set * make DeepsetCloudDocumentStore able to fetch uploaded evaluation set names * fix missing renaming of get_evaluation_set_names in DeepsetCloudDocumentStore * update documentation for evaluation set functionality in deepset cloud document store * DeepsetCloudDocumentStore tests for evaluation set functionality * rename index to evaluation_set_name for DeepsetCloudDocumentStore evaluation set functionality * raise DeepsetCloudError when no labels were found for evaluation set * make use of .get_with_auto_paging in EvaluationSetClient * Return result of get_with_auto_paging() as it parses the response already * Make schema import source more specific * fetch all evaluation sets for a workspace in deepset Cloud * Rename evaluation_set_name to label_index * make use of generator functionality for fetching labels * Update Documentation & Code Style * Adjust function input for DeepsetCloudDocumentStore.get_all_labels, adjust tests for it, fix typos, make linter happy * Match error message with pytest.raises * Update Documentation & Code Style * DeepsetCloudDocumentStore.get_labels_count raises DeepsetCloudError when no evaluation set was found to count labels on * remove unneeded import in tests * DeepsetCloudDocumentStore tests, make reponse bodies a string through json.dumps * DeepsetcloudDocumentStore.get_label_count - move raise to return * stringify uuid before json.dump as uuid is not serilizable * DeepsetcloudDocumentStore - adjust response mocking in tests * DeepsetcloudDocumentStore - json dump response body in test * DeepsetCloudDocumentStore introduce label_index, EvaluationSetClient rename label_index to evaluation_set * Update Documentation & Code Style * DeepsetCloudDocumentStore rename evaluation_set to evaluation_set_response as there is a name clash with the input variable * DeepsetCloudDocumentStore - rename missed variable in test * DeepsetCloudDocumentStore - rename missed label_index to index in doc string, rename label_index to evaluation_set in EvaluationSetClient * Update Documentation & Code Style * DeepsetCloudDocumentStore - update docstrings for EvaluationSetClient * DeepsetCloudDocumentStore - fix typo in doc string Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
https://github.com/deepset-ai/haystack.git
def get_evaluation_sets(self) -> List[dict]: return self.evaluation_set_client.get_evaluation_sets()
19
deepsetcloud.py
Python
haystack/document_stores/deepsetcloud.py
a273c3a51dd432bd125e5b35df4be94260a2cdb7
haystack
1
95,412
36
13
17
274
26
0
52
215
test_simple
feat(codeowners): Add endpoint to view code owner associations per organization (#31030) See API-2186 So the earlier version of this PR just had the endpoint return the entire serialized ProjectCodeOwners for an organization. While that works, the intention behind this feature is to read and use the associations, so sending the raw codeowners file, and timestamps are unnecessary and increase the latency with such large payloads, especially for larger orgs. @NisanthanNanthakumar suggested limiting what the endpoint returns to just what the feature will need on the frontend, and making the endpoint name a bit more specific. OrganizationCodeOwners -> OrganizationCodeOwnersAssocations. Along with this refactor, tests have been updated.
https://github.com/getsentry/sentry.git
def test_simple(self): code_owner_1 = self.create_codeowners( self.project_1, self.code_mapping_1, raw=self.data_1["raw"] ) code_owner_2 = self.create_codeowners( self.project_2, self.code_mapping_2, raw=self.data_2["raw"] ) response = self.get_success_response(self.organization.slug, status=status.HTTP_200_OK) for code_owner in [code_owner_1, code_owner_2]: assert code_owner.project.slug in response.data.keys() associations, errors = ProjectCodeOwners.validate_codeowners_associations( code_owner.raw, code_owner.project ) assert "associations" in response.data[code_owner.project.slug].keys() assert response.data[code_owner.project.slug]["associations"] == associations assert "errors" in response.data[code_owner.project.slug].keys() assert response.data[code_owner.project.slug]["errors"] == errors
175
test_organization_codeowners_associations.py
Python
tests/sentry/api/endpoints/test_organization_codeowners_associations.py
5efa5eeb57ae6ddf740256e08ce3b9ff4ec98eaa
sentry
2
189,402
14
9
54
53
7
0
15
47
set
Clarify the docs for MObject.animate, MObject.set and Variable. (#2407) * Clarify the docs for MObject.animate, MObject.set and Variable. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Slight reword * Apply suggestions from code review Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at>
https://github.com/ManimCommunity/manim.git
def set(self, **kwargs) -> "Mobject": for attr, value in kwargs.items(): setattr(self, attr, value) return self
32
mobject.py
Python
manim/mobject/mobject.py
6d15ca5e745ecdd5d0673adbd55fc7a589abdae3
manim
2
181,598
53
17
23
231
15
0
64
265
test_driver_3
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
https://github.com/EpistasisLab/tpot.git
def test_driver_3(): args_list = [ 'tests/tests.csv', '-is', ',', '-target', 'class', '-g', '1', '-p', '2', '-cv', '3', '-s',' 45', '-config', 'TPOT light', '-v', '2' ] args = _get_arg_parser().parse_args(args_list) with captured_output() as (out, err): tpot_driver(args) ret_stdout = out.getvalue() assert "TPOT settings" in ret_stdout assert "Final Pareto front testing scores" not in ret_stdout try: ret_val = float(ret_stdout.split('\n')[-2].split(': ')[-1]) except Exception: ret_val = -float('inf') assert ret_val > 0.0
125
driver_tests.py
Python
tests/driver_tests.py
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
2
263,901
58
12
15
105
13
0
81
111
_find_all_or_none
hookutils: qt: ensure ANGLE DLLs are collected from Anaconda Qt5 Anaconda's Qt5 ships ANGLE DLLs (`libEGL.dll` and `libGLESv2.dll`) but does not seem to provide the `d3dcompiler_XY.dll`. Therefore, we need to adjust the extra Qt DLL collection to consider the latter an optional dependency whose absence does not preclude the collection of the ANGLE DLL group. Rework the `get_qt_binaries` hook utility function and its `_find_all_or_none` helper to peform collection based on a list of mandatory and a list of optional patterns, instead of a single list and number of expected matches (since up until now, all matches were always expected to be found).
https://github.com/pyinstaller/pyinstaller.git
def _find_all_or_none(qt_library_info, mandatory_dll_patterns, optional_dll_patterns=None): optional_dll_patterns = optional_dll_patterns or [] # Resolve path to the the corresponding python package (actually, its parent directory). Used to preserve directory # structure when DLLs are collected from the python package (e.g., PyPI wheels). package_parent_path = pathlib.Path(qt_library_info.package_location).resolve().parent # In PyQt5/PyQt6, the DLLs we are looking for are located in location['BinariesPath'], whereas in PySide2/PySide6, # they are located in location['PrefixPath']. dll_path = qt_library_info.location['BinariesPath' if qt_library_info.is_pyqt else 'PrefixPath'] dll_path = pathlib.Path(dll_path).resolve() # Helper for processing single DLL pattern
100
qt.py
Python
PyInstaller/utils/hooks/qt.py
49abfa5498b1db83b8f1b2e859e461b1e8540c6f
pyinstaller
6
261,040
20
11
9
104
13
0
25
60
test_asarray_with_order
ENH Adds Array API support to LinearDiscriminantAnalysis (#22554) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Julien Jerphanion <git@jjerphan.xyz>
https://github.com/scikit-learn/scikit-learn.git
def test_asarray_with_order(is_array_api): if is_array_api: xp = pytest.importorskip("numpy.array_api") else: xp = numpy X = xp.asarray([1.2, 3.4, 5.1]) X_new = _asarray_with_order(X, order="F") X_new_np = numpy.asarray(X_new) assert X_new_np.flags["F_CONTIGUOUS"]
67
test_array_api.py
Python
sklearn/utils/tests/test_array_api.py
2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b
scikit-learn
2
277,191
7
8
4
43
6
0
7
35
set_params
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def set_params(self, **params): self.check_params(params) self.sk_params.update(params) return self
25
scikit_learn.py
Python
keras/wrappers/scikit_learn.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
44,722
33
14
20
143
21
0
36
152
_validate_argument_count
Straighten up MappedOperator hierarchy and typing (#21505)
https://github.com/apache/airflow.git
def _validate_argument_count(self) -> None: if isinstance(self.operator_class, str): return # No need to validate deserialized operator. operator = self._create_unmapped_operator( mapped_kwargs={k: unittest.mock.MagicMock(name=k) for k in self.mapped_kwargs}, partial_kwargs=self.partial_kwargs, real=False, ) if operator.task_group: operator.task_group._remove(operator) dag = operator.get_dag() if dag: dag._remove_task(operator.task_id)
90
mappedoperator.py
Python
airflow/models/mappedoperator.py
0cd3b11f3a5c406fbbd4433d8e44d326086db634
airflow
5
299,547
88
13
41
477
42
0
158
656
turn_on
Use LightEntityFeature enum in limitlessled (#71061)
https://github.com/home-assistant/core.git
def turn_on(self, transition_time, pipeline, **kwargs): # The night effect does not need a turned on light if kwargs.get(ATTR_EFFECT) == EFFECT_NIGHT: if EFFECT_NIGHT in self._effect_list: pipeline.night_light() self._effect = EFFECT_NIGHT return pipeline.on() # Set up transition. args = {} if self.config[CONF_FADE] and not self.is_on and self._brightness: args["brightness"] = self.limitlessled_brightness() if ATTR_BRIGHTNESS in kwargs: self._brightness = kwargs[ATTR_BRIGHTNESS] args["brightness"] = self.limitlessled_brightness() if ATTR_HS_COLOR in kwargs and self._supported & SUPPORT_COLOR: self._color = kwargs[ATTR_HS_COLOR] # White is a special case. if self._color[1] < MIN_SATURATION: pipeline.white() self._color = WHITE else: args["color"] = self.limitlessled_color() if ATTR_COLOR_TEMP in kwargs: if self._supported & SUPPORT_COLOR: pipeline.white() self._color = WHITE if self._supported & SUPPORT_COLOR_TEMP: self._temperature = kwargs[ATTR_COLOR_TEMP] args["temperature"] = self.limitlessled_temperature() if args: pipeline.transition(transition_time, **args) # Flash. if ATTR_FLASH in kwargs and self._supported & LightEntityFeature.FLASH: duration = 0 if kwargs[ATTR_FLASH] == FLASH_LONG: duration = 1 pipeline.flash(duration=duration) # Add effects. if ATTR_EFFECT in kwargs and self._effect_list: if kwargs[ATTR_EFFECT] == EFFECT_COLORLOOP: self._effect = EFFECT_COLORLOOP pipeline.append(COLORLOOP) if kwargs[ATTR_EFFECT] == EFFECT_WHITE: pipeline.white() self._color = WHITE
291
light.py
Python
homeassistant/components/limitlessled/light.py
6635fc4e3111f72bfa6095c97b3f522429fa1a8b
core
21
302,108
32
8
15
137
8
0
51
108
test_duplicate_removal
Update MQTT tests to use the config entry setup (#72373) * New testframework and tests for fan platform * Merge test_common_new to test_common * Add alarm_control_panel * Add binary_sensor * Add button * Add camera * Add climate * Add config_flow * Add cover * Add device_tracker_disovery * Add device_trigger * Add diagnostics * Add discovery * Add humidifier * Add init * Add lecacy_vacuum * Add light_json * Add light_template * Add light * Add lock * Add number * Add scene * Add select * Add sensor * Add siren * Add state_vacuum * Add subscription * Add switch * Add tag * Add trigger * Add missed tests * Add another missed test * Add device_tracker * Remove commented out code * Correct tests according comments * Improve mqtt_mock_entry and recover tests * Split fixtures with and without yaml setup * Update fixtures manual_mqtt * Update fixtures mqtt_json * Fix test tasmota * Update fixture mqtt_room * Revert fixture changes, improve test * re-add test
https://github.com/home-assistant/core.git
async def test_duplicate_removal(hass, mqtt_mock_entry_no_yaml_config, caplog): await mqtt_mock_entry_no_yaml_config() async_fire_mqtt_message( hass, "homeassistant/binary_sensor/bla/config", '{ "name": "Beer", "state_topic": "test-topic" }', ) await hass.async_block_till_done() async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla/config", "") await hass.async_block_till_done() assert "Component has already been discovered: binary_sensor bla" in caplog.text caplog.clear() async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla/config", "") await hass.async_block_till_done() assert "Component has already been discovered: binary_sensor bla" not in caplog.text
75
test_discovery.py
Python
tests/components/mqtt/test_discovery.py
52561ce0769ddcf1e8688c8909692b66495e524b
core
1
276,769
53
21
29
297
29
1
79
397
_extract_archive
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _extract_archive(file_path, path=".", archive_format="auto"): if archive_format is None: return False if archive_format == "auto": archive_format = ["tar", "zip"] if isinstance(archive_format, str): archive_format = [archive_format] file_path = io_utils.path_to_string(file_path) path = io_utils.path_to_string(path) for archive_type in archive_format: if archive_type == "tar": open_fn = tarfile.open is_match_fn = tarfile.is_tarfile if archive_type == "zip": open_fn = zipfile.ZipFile is_match_fn = zipfile.is_zipfile if is_match_fn(file_path): with open_fn(file_path) as archive: try: archive.extractall(path) except (tarfile.TarError, RuntimeError, KeyboardInterrupt): if os.path.exists(path): if os.path.isfile(path): os.remove(path) else: shutil.rmtree(path) raise return True return False @keras_export("keras.utils.get_file")
@keras_export("keras.utils.get_file")
169
data_utils.py
Python
keras/utils/data_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
11
31,499
29
12
23
195
23
0
33
106
test_run_image_classification_no_trainer
Change no trainer image_classification test (#17635) * Adjust test arguments and use a new example test
https://github.com/huggingface/transformers.git
def test_run_image_classification_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f.split() if is_cuda_and_apex_available(): testargs.append("--fp16") _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE) result = get_results(tmp_dir) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"], 0.625) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "step_1"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "image_classification_no_trainer")))
112
test_accelerate_examples.py
Python
examples/pytorch/test_accelerate_examples.py
acb709d55150501698b5b500ca49683b913d4b3d
transformers
2
224,518
19
12
10
90
12
0
29
91
nest_paths
Refactor URI handling to not have to deal with backslashes
https://github.com/mkdocs/mkdocs.git
def nest_paths(paths): nested = [] for path in paths: parts = PurePath(path).parent.parts branch = nested for part in parts: part = dirname_to_title(part) branch = find_or_create_node(branch, part) branch.append(path) return nested
55
__init__.py
Python
mkdocs/utils/__init__.py
1c50987f9c17b228fdf22456aa369b83bd6b11b9
mkdocs
3
77,226
4
6
2
16
2
0
4
18
run_before_hook
Extract mixins from Snippet views and use it in generic create/edit/delete views (#8361)
https://github.com/wagtail/wagtail.git
def run_before_hook(self): return None
8
mixins.py
Python
wagtail/admin/views/generic/mixins.py
bc1a2ab1148b0f27cfd1435f8cb0e44c2721102d
wagtail
1
181,586
39
19
8
123
12
0
44
76
test_driver
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
https://github.com/EpistasisLab/tpot.git
def test_driver(): batcmd = "python -m tpot.driver tests/tests.csv -is , -target class -g 1 -p 2 -os 4 -cv 5 -s 45 -v 1" ret_stdout = subprocess.check_output(batcmd, shell=True) try: ret_val = float(ret_stdout.decode('UTF-8').split('\n')[-2].split(': ')[-1]) except Exception as e: ret_val = -float('inf') assert ret_val > 0.0
69
driver_tests.py
Python
tests/driver_tests.py
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
2
164,682
28
10
4
147
13
1
34
66
close
DEP: Protect some ExcelWriter attributes (#45795) * DEP: Deprecate ExcelWriter attributes * DEP: Deprecate ExcelWriter attributes * Fixup for test * Move tests and restore check_extension y * Deprecate xlwt fm_date and fm_datetime; doc improvements
https://github.com/pandas-dev/pandas.git
def close(self) -> None: self._save() self._handles.close() XLS_SIGNATURES = ( b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2 b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3 b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4 b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary ) ZIP_SIGNATURE = b"PK\x03\x04" PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,))) @doc(storage_options=_shared_docs["storage_options"])
@doc(storage_options=_shared_docs["storage_options"])
20
_base.py
Python
pandas/io/excel/_base.py
047137ce2619cfe2027e3999dfb92eb614d9a485
pandas
1
124,708
78
15
15
270
21
1
120
225
test_max_concurrent_in_progress_functions
[Core | State Observability] Implement API Server (Dashboard) HTTP Requests Throttling (#26257) This is to limit the max number of HTTP requests the dashboard (API server) will accept before rejecting more requests. This will make sure the observability requests do not overload the downstream systems (raylet/gcs) when delegating too many concurrent state observability requests to the cluster.
https://github.com/ray-project/ray.git
async def test_max_concurrent_in_progress_functions(extra_req_num): max_req = 10 a = A(max_num_call=max_req) # Run more than allowed concurrent async functions should trigger rate limiting res_arr = await asyncio.gather( *[a.fn1() if i % 2 == 0 else a.fn2() for i in range(max_req + extra_req_num)] ) fail_cnt = 0 for ok in res_arr: fail_cnt += 0 if ok else 1 expected_fail_cnt = max(0, extra_req_num) assert fail_cnt == expected_fail_cnt, ( f"{expected_fail_cnt} out of {max_req + extra_req_num} " f"concurrent runs should fail with max={max_req} but {fail_cnt}." ) assert a.num_call_ == 0, "All requests should be done" @pytest.mark.asyncio @pytest.mark.parametrize( "failures", [ [True, True, True, True, True], [False, False, False, False, False], [False, True, False, True, False], [False, False, False, True, True], [True, True, False, False, False], ], )
@pytest.mark.asyncio @pytest.mark.parametrize( "failures", [ [True, True, True, True, True], [False, False, False, False, False], [False, True, False, True, False], [False, False, False, True, True], [True, True, False, False, False], ], )
96
test_state_head.py
Python
dashboard/tests/test_state_head.py
365ffe21e592589880e3116302705b5e08a5b81f
ray
5
176,904
34
10
41
80
9
0
45
75
astar_path
Updated astar docstring (#5797) The docstring now reflects on heuristic admissibility and heuristic value caching
https://github.com/networkx/networkx.git
def astar_path(G, source, target, heuristic=None, weight="weight"): if source not in G or target not in G: msg = f"Either source {source} or target {target} is not in G" raise nx.NodeNotFound(msg) if heuristic is None: # The default heuristic is h=0 - same as Dijkstra's algorithm
273
astar.py
Python
networkx/algorithms/shortest_paths/astar.py
b28d30bd552a784d60692fd2d2016f8bcd1cfa17
networkx
13
89,927
48
14
24
269
30
0
62
294
test_note_generic_issue
feat(integrations): Support generic issue type alerts (#42110) Add support for issue alerting integrations that use the message builder (Slack and MSTeams) for generic issue types. Preview text for Slack alert: <img width="350" alt="Screen Shot 2022-12-08 at 4 07 16 PM" src="https://user-images.githubusercontent.com/29959063/206593405-7a206d88-a31a-4e85-8c15-1f7534733ca7.png"> Slack generic issue alert shows the `occurrence.issue_title` and the "important" evidence value <img width="395" alt="Screen Shot 2022-12-08 at 4 11 20 PM" src="https://user-images.githubusercontent.com/29959063/206593408-6942d74d-4238-4df9-bfee-601ce2bc1098.png"> MSTeams generic issue alert shows the `occurrence.issue_title` and the "important" evidence value <img width="654" alt="Screen Shot 2022-12-08 at 4 13 45 PM" src="https://user-images.githubusercontent.com/29959063/206593410-2773746a-16b3-4652-ba2c-a7d5fdc76992.png"> Fixes #42047
https://github.com/getsentry/sentry.git
def test_note_generic_issue(self, mock_func, occurrence): event = self.store_event( data={"message": "Hellboy's world", "level": "error"}, project_id=self.project.id ) event = event.for_group(event.groups[0]) notification = NoteActivityNotification( Activity( project=self.project, group=event.group, user=self.user, type=ActivityType.NOTE, data={"text": "text", "mentions": []}, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() assert text == f"New comment by {self.name}" assert attachment["title"] == TEST_ISSUE_OCCURRENCE.issue_title assert attachment["text"] == notification.activity.data["text"] assert ( attachment["footer"] == f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=note_activity-slack-user|Notification Settings>" )
151
test_note.py
Python
tests/sentry/integrations/slack/notifications/test_note.py
3255fa4ebb9fbc1df6bb063c0eb77a0298ca8f72
sentry
1
299,399
4
6
3
16
3
0
4
7
async_add_devices
Insteon Device Control Panel (#70834) Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>
https://github.com/home-assistant/core.git
async def async_add_devices(address, multiple):
26
device.py
Python
homeassistant/components/insteon/api/device.py
a9ca774e7ed1d8fe502a53d5b765c1d9b393a524
core
2
270,861
10
11
5
58
4
0
12
35
is_subclassed
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def is_subclassed(layer): return ( layer.__module__.find("keras.engine") == -1 and layer.__module__.find("keras.layers") == -1 )
32
base_layer_utils.py
Python
keras/engine/base_layer_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
297,866
37
13
13
90
10
0
39
194
_async_device_changed
String formatting and max line length - Part 2 (#84393)
https://github.com/home-assistant/core.git
def _async_device_changed(self, *args, **kwargs) -> None: # Don't update disabled entities if self.enabled: _LOGGER.debug("Event %s (%s)", self.name, CONST_ALARM_CONTROL_PANEL_NAME) self.async_write_ha_state() else: _LOGGER.debug( ( "Device Changed Event for %s (Alarm Control Panel) not fired." " Entity is disabled" ), self.name, )
52
alarm_control_panel.py
Python
homeassistant/components/homematicip_cloud/alarm_control_panel.py
cb13418babd21a1e9584978b0c523f1b1e4e1cb0
core
2
183,841
24
10
6
82
12
0
27
45
test_stylesheet_many_classes_dont_overrule_id
Add various additional tests around CSS specificity
https://github.com/Textualize/textual.git
def test_stylesheet_many_classes_dont_overrule_id(): css = "#id {color: red;} .a.b.c.d {color: blue;}" stylesheet = _make_stylesheet(css) node = DOMNode(classes="a b c d", id="id") stylesheet.apply(node) assert node.styles.color == Color(255, 0, 0)
47
test_stylesheet.py
Python
tests/css/test_stylesheet.py
4dd0d9fae43583638f34257f97d5749ca4f2c00c
textual
1
297,532
4
6
18
16
3
0
4
7
test_check_requesterror
Improve HomeWizard request issue reporting (#82366) * Trigger reauth flow when HomeWizard API was disabled * Add tests for reauth flow * Fix typo in test * Add parallel updates constant * Improve error message when device in unreachable during config * Set quality scale * Remove quality scale * Throw error instead of abort when setup fails * Adjust test for new setup behaviour * Trigger reauth flow when API is disabled and continue retrying * Reload entry and raise AuthFailed during init * Abort running config flow * Listen for coordinator updates to trigger reload * Use build-in backoff system * Fix failing test * Test reauth flow is active after disable-api init * Test reauth flow removal
https://github.com/home-assistant/core.git
async def test_check_requesterror(hass, aioclient_mock):
112
test_config_flow.py
Python
tests/components/homewizard/test_config_flow.py
b41d0be9522fabda0ac8affd2add6876a66205ea
core
1
265,772
53
14
18
194
14
0
87
177
to_grams
9654 device weight (#10448) * 9654 add weight fields to devices * 9654 changes from code review * 9654 change _abs_weight to grams * Resolve migrations conflict * 9654 code-review changes * 9654 total weight on devices * Misc cleanup Co-authored-by: Jeremy Stretch <jstretch@ns1.com>
https://github.com/netbox-community/netbox.git
def to_grams(weight, unit): try: if weight < 0: raise ValueError("Weight must be a positive number") except TypeError: raise TypeError(f"Invalid value '{weight}' for weight (must be a number)") valid_units = WeightUnitChoices.values() if unit not in valid_units: raise ValueError(f"Unknown unit {unit}. Must be one of the following: {', '.join(valid_units)}") if unit == WeightUnitChoices.UNIT_KILOGRAM: return weight * 1000 if unit == WeightUnitChoices.UNIT_GRAM: return weight if unit == WeightUnitChoices.UNIT_POUND: return weight * Decimal(453.592) if unit == WeightUnitChoices.UNIT_OUNCE: return weight * Decimal(28.3495) raise ValueError(f"Unknown unit {unit}. Must be 'kg', 'g', 'lb', 'oz'.")
106
utils.py
Python
netbox/utilities/utils.py
204c10c053fddc26ad23ec15a3c60eee38bfc081
netbox
8
153,944
6
6
27
26
6
0
6
13
_setitem
PERF-#4325: Improve perf of multi-column assignment in `__setitem__` when no new column names are assigning (#4455) Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Myachev <anatoly.myachev@intel.com>
https://github.com/modin-project/modin.git
def _setitem(self, axis, key, value, how="inner"):
168
query_compiler.py
Python
modin/core/storage_formats/pandas/query_compiler.py
eddfda4b521366c628596dcb5c21775c7f50eec1
modin
4
92,181
107
14
68
1,025
52
0
225
858
test_update_organization_config
ref(integrations): Update Vercel endpoints (#36150) This PR updates the endpoints we reach to in the Vercel integration. It seems to work just fine without changes as the payloads returned from vercel haven't updated, but we'll need to specify API Scopes so they don't receive 403s. This also refactored the pagination code to loop 100 at a time, indefinitely I had previously tried to consolidate the project webhooks in this PR, but I'll be doing that separately.
https://github.com/getsentry/sentry.git
def test_update_organization_config(self): with self.tasks(): self.assert_setup_flow() org = self.organization project_id = self.project.id enabled_dsn = ProjectKey.get_default(project=Project.objects.get(id=project_id)).get_dsn( public=True ) sentry_auth_token = SentryAppInstallationToken.objects.get_token(org.id, "vercel") env_var_map = { "SENTRY_ORG": {"type": "encrypted", "value": org.slug}, "SENTRY_PROJECT": {"type": "encrypted", "value": self.project.slug}, "SENTRY_DSN": {"type": "encrypted", "value": enabled_dsn}, "SENTRY_AUTH_TOKEN": {"type": "encrypted", "value": sentry_auth_token}, "VERCEL_GIT_COMMIT_SHA": {"type": "system", "value": "VERCEL_GIT_COMMIT_SHA"}, } # mock get_project API call responses.add( responses.GET, f"{VercelClient.base_url}{VercelClient.GET_PROJECT_URL % self.project_id}", json={"link": {"type": "github"}, "framework": "nextjs"}, ) # mock create the env vars for env_var, details in env_var_map.items(): responses.add( responses.POST, f"{VercelClient.base_url}{VercelClient.CREATE_ENV_VAR_URL % self.project_id}", json={ "key": env_var, "value": details["value"], "target": ["production"], "type": details["type"], }, ) integration = Integration.objects.get(provider=self.provider.key) installation = integration.get_installation(org.id) org_integration = OrganizationIntegration.objects.get( organization_id=org.id, integration_id=integration.id ) assert org_integration.config == {} data = {"project_mappings": [[project_id, self.project_id]]} installation.update_organization_config(data) org_integration = OrganizationIntegration.objects.get( organization_id=org.id, integration_id=integration.id ) assert org_integration.config == {"project_mappings": [[project_id, self.project_id]]} # assert the env vars were created correctly req_params = json.loads(responses.calls[5].request.body) assert req_params["key"] == "SENTRY_ORG" assert req_params["value"] == org.slug assert req_params["target"] == ["production"] assert req_params["type"] == "encrypted" req_params = json.loads(responses.calls[6].request.body) assert req_params["key"] == "SENTRY_PROJECT" assert req_params["value"] == self.project.slug assert req_params["target"] == ["production"] assert req_params["type"] == "encrypted" req_params = json.loads(responses.calls[7].request.body) assert req_params["key"] == "NEXT_PUBLIC_SENTRY_DSN" assert req_params["value"] == enabled_dsn assert req_params["target"] == ["production"] assert req_params["type"] == "encrypted" req_params = json.loads(responses.calls[8].request.body) assert req_params["key"] == "SENTRY_AUTH_TOKEN" assert req_params["target"] == ["production"] assert req_params["type"] == "encrypted" req_params = json.loads(responses.calls[9].request.body) assert req_params["key"] == "VERCEL_GIT_COMMIT_SHA" assert req_params["value"] == "VERCEL_GIT_COMMIT_SHA" assert req_params["target"] == ["production"] assert req_params["type"] == "system"
566
test_integration.py
Python
tests/sentry/integrations/vercel/test_integration.py
8201e74ec3d81e89354905c946e62436f0247602
sentry
2
293,197
80
16
28
242
32
0
104
536
async_step_manual_connection
Ensure elkm1 can be manually configured when discovered instance is not used (#67712)
https://github.com/home-assistant/core.git
async def async_step_manual_connection(self, user_input=None): errors = {} if user_input is not None: # We might be able to discover the device via directed UDP # in case its on another subnet if device := await async_discover_device( self.hass, user_input[CONF_ADDRESS] ): await self.async_set_unique_id( dr.format_mac(device.mac_address), raise_on_progress=False ) self._abort_if_unique_id_configured() # Ignore the port from discovery since its always going to be # 2601 if secure is turned on even though they may want insecure user_input[CONF_ADDRESS] = device.ip_address errors, result = await self._async_create_or_error(user_input, False) if not errors: return result return self.async_show_form( step_id="manual_connection", data_schema=vol.Schema( { **BASE_SCHEMA, vol.Required(CONF_ADDRESS): str, vol.Optional(CONF_PREFIX, default=""): str, vol.Required( CONF_PROTOCOL, default=DEFAULT_SECURE_PROTOCOL ): vol.In(ALL_PROTOCOLS), } ), errors=errors, )
153
config_flow.py
Python
homeassistant/components/elkm1/config_flow.py
26c5dca45d9b3dee002dfe1549780747e5007e06
core
4
224,049
6
9
2
35
4
0
6
20
on_page_read_source
Remove spaces at the ends of docstrings, normalize quotes
https://github.com/mkdocs/mkdocs.git
def on_page_read_source(self, **kwargs): return f'{self.config["foo"]} source'
12
plugin_tests.py
Python
mkdocs/tests/plugin_tests.py
e7f07cc82ab2be920ab426ba07456d8b2592714d
mkdocs
1
259,640
115
16
84
352
32
0
173
322
trustworthiness
FIX Raise error when n_neighbors >= n_samples / 2 in manifold.trustworthiness (#23033) Co-authored-by: Shao Yang Hong <hongsy2006@gmail.com> Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
https://github.com/scikit-learn/scikit-learn.git
def trustworthiness(X, X_embedded, *, n_neighbors=5, metric="euclidean"): r n_samples = X.shape[0] if n_neighbors >= n_samples / 2: raise ValueError( f"n_neighbors ({n_neighbors}) should be less than n_samples / 2" f" ({n_samples / 2})" ) dist_X = pairwise_distances(X, metric=metric) if metric == "precomputed": dist_X = dist_X.copy() # we set the diagonal to np.inf to exclude the points themselves from # their own neighborhood np.fill_diagonal(dist_X, np.inf) ind_X = np.argsort(dist_X, axis=1) # `ind_X[i]` is the index of sorted distances between i and other samples ind_X_embedded = ( NearestNeighbors(n_neighbors=n_neighbors) .fit(X_embedded) .kneighbors(return_distance=False) ) # We build an inverted index of neighbors in the input space: For sample i, # we define `inverted_index[i]` as the inverted index of sorted distances: # inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1) inverted_index = np.zeros((n_samples, n_samples), dtype=int) ordered_indices = np.arange(n_samples + 1) inverted_index[ordered_indices[:-1, np.newaxis], ind_X] = ordered_indices[1:] ranks = ( inverted_index[ordered_indices[:-1, np.newaxis], ind_X_embedded] - n_neighbors ) t = np.sum(ranks[ranks > 0]) t = 1.0 - t * ( 2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0)) ) return t
228
_t_sne.py
Python
sklearn/manifold/_t_sne.py
ade90145c9c660a1a7baf2315185995899b0f356
scikit-learn
3
132,895
34
12
10
108
9
0
38
140
start
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def start(self): if self.actors and len(self.actors) > 0: raise RuntimeError( "The actors have already been started. " "Please call `shutdown` first if you want to " "restart them." ) logger.debug(f"Starting {self.num_actors} actors.") self.add_actors(self.num_actors) logger.debug(f"{len(self.actors)} actors have successfully started.")
49
actor_group.py
Python
python/ray/util/actor_group.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
3
126,264
59
14
20
179
21
0
72
215
_detect_checkpoint_function
[air] Add annotation for Tune module. (#27060) Co-authored-by: Kai Fricke <kai@anyscale.com>
https://github.com/ray-project/ray.git
def _detect_checkpoint_function(train_func, abort=False, partial=False): func_sig = inspect.signature(train_func) validated = True try: # check if signature is func(config, checkpoint_dir=None) if partial: func_sig.bind_partial({}, checkpoint_dir="tmp/path") else: func_sig.bind({}, checkpoint_dir="tmp/path") except Exception as e: logger.debug(str(e)) validated = False if abort and not validated: func_args = inspect.getfullargspec(train_func).args raise ValueError( "Provided training function must have 2 args " "in the signature, and the latter arg must " "contain `checkpoint_dir`. For example: " "`func(config, checkpoint_dir=None)`. Got {}".format(func_args) ) return validated
102
util.py
Python
python/ray/tune/utils/util.py
eb69c1ca286a2eec594f02ddaf546657a8127afd
ray
5
323,123
10
10
5
43
5
0
13
56
should_log
[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761) * add some datasets for finetune. * support fine tune for all tastks. * add trainer prototype. * init verison for paddlenlp trainer. * refine trainer. * update for some details. * support multi-cards training evaluation. * support load from ckpt. * support for export inference model. * first version of trainer. * seq cls support clue. * trainer support for token classification and question answersing tasks. * fix as reviews. Co-authored-by: Zeyu Chen <chenzeyu01@baidu.com>
https://github.com/PaddlePaddle/PaddleNLP.git
def should_log(self): if self.log_on_each_node: return self.local_process_index == 0 else: return self.process_index == 0
25
trainer_args.py
Python
paddlenlp/trainer/trainer_args.py
44a290e94d1becd1f09fddc3d873f9e19c9d6919
PaddleNLP
2
300,405
37
19
21
169
8
0
50
309
test_all_optional_config
Remove unused calls fixture from template tests (#71735)
https://github.com/home-assistant/core.git
async def test_all_optional_config(hass): with assert_setup_component(1, "template"): assert await setup.async_setup_component( hass, "template", { "template": { "number": { "state": "{{ 4 }}", "set_value": {"service": "script.set_value"}, "min": "{{ 3 }}", "max": "{{ 5 }}", "step": "{{ 1 }}", } } }, ) await hass.async_block_till_done() await hass.async_start() await hass.async_block_till_done() _verify(hass, 4, 1, 3, 5)
90
test_number.py
Python
tests/components/template/test_number.py
b70e97e949ca73fe57849625c0b0c51f0b8796f7
core
1
167,393
21
9
79
88
10
0
21
75
radviz
TYP: Missing return annotations in util/tseries/plotting (#47510) * TYP: Missing return annotations in util/tseries/plotting * the more tricky parts
https://github.com/pandas-dev/pandas.git
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds) -> Axes: plot_backend = _get_plot_backend("matplotlib") return plot_backend.radviz( frame=frame, class_column=class_column, ax=ax, color=color, colormap=colormap, **kwds, )
60
_misc.py
Python
pandas/plotting/_misc.py
4bb1fd50a63badd38b5d96d9c4323dae7bc36d8d
pandas
1
266,663
19
12
4
63
10
0
19
40
download_file
ansible-test - Fix consistency of managed venvs. (#77028)
https://github.com/ansible/ansible.git
def download_file(url, path): # type: (str, str) -> None with open(to_bytes(path), 'wb') as saved_file: download = urlopen(url) shutil.copyfileobj(download, saved_file)
35
requirements.py
Python
test/lib/ansible_test/_util/target/setup/requirements.py
68fb3bf90efa3a722ba5ab7d66b1b22adc73198c
ansible
1
46,474
4
11
2
44
3
0
4
18
extract_bucket_name
Create Endpoint and Model Service, Batch Prediction and Hyperparameter Tuning Jobs operators for Vertex AI service (#22088)
https://github.com/apache/airflow.git
def extract_bucket_name(config): return config["artifact_destination"]["output_uri_prefix"].rpartition("gs://")[-1]
23
vertex_ai.py
Python
airflow/providers/google/cloud/links/vertex_ai.py
ca4b8d1744cd1de9b6af97dacb0e03de0f014006
airflow
1
12,472
6
10
4
48
9
0
6
22
export_kubernetes
refactor: rename cli to jina_cli (#4890) * chore: fix readme * chore: fix readme * chore: fix dockerignore * fix: #4845 * style: fix overload and cli autocomplete * fix: cicd export cli Co-authored-by: Jina Dev Bot <dev-bot@jina.ai>
https://github.com/jina-ai/jina.git
def export_kubernetes(args): Flow.load_config(args.flowpath).to_kubernetes_yaml( output_base_path=args.outpath, k8s_namespace=args.k8s_namespace )
29
exporter.py
Python
jina/exporter.py
16b16b07a66cd5a8fc7cca1d3f1c378a9c63d38c
jina
1
86,267
50
14
18
272
17
0
87
257
expand_frame
ref(processor): Use symbolic-sourcemapcache for JavaScript Sourcemap processing (#38551) This PR attempts to replace the currently used `rust-sourcemap` crate and it's symbolic python bindings, with `symbolic-sourcemapcache` crate. It makes the whole processing pipeline easier to maintain, as it pushes some work directly to Symbolic, as well as we get better function names due to better scope resolution and in some cases better file URLs. Other than that, we don't use `SourceView` anymore, as it seemed like an unnecessary layer of abstraction for something that is used only for `context_lines` extraction. We cache `utf-8` decoded sources directly now, as this way we can encode them only once for `SmCache` instance initialization, and use the source directly otherwise for context lines extraction. Some tests had to updated to express current behavior. The notable thing is `useless_fn_names = ["<anonymous>", "__webpack_require__", "__webpack_modules__"]`, which is mostly for `production` mode of webpack, that by default trims all the function names, and we decided to fallback to the minified names in those cases instead (this was already the old behavior). It should be possible to extract something better, but we'd need to parse all `sourceContents` from sourcemap to do that, as the only thing we can get better function name for the case mentioned above, is if we look at the right-hand side of default node export, in form of `module.exports = function foo () {}`. This should give us `foo`, yet the only thing we can extract is `module.exports`, as minified form of this expression in webpack production mode is `module.exports = function () {}`.
https://github.com/getsentry/sentry.git
def expand_frame(self, frame, source_context=None, source=None): if frame.get("lineno") is None: return False if source_context is None: source = source or self.get_sourceview(frame["abs_path"]) if source is None: logger.debug("No source found for %s", frame["abs_path"]) return False (pre_context, context_line, post_context) = source_context or get_raw_source_context( source=source, lineno=frame["lineno"] ) if pre_context is not None and len(pre_context) > 0: frame["pre_context"] = [trim_line(x) for x in pre_context] if context_line is not None: frame["context_line"] = trim_line(context_line, frame.get("colno") or 0) if post_context is not None and len(post_context) > 0: frame["post_context"] = [trim_line(x) for x in post_context] return True
169
processor.py
Python
src/sentry/lang/javascript/processor.py
ae9c0d8a33d509d9719a5a03e06c9797741877e9
sentry
14