id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
260,872
24
10
13
174
16
0
36
83
test_skewed_chi2_sampler_dtype_equivalence
ENH Add dtype preservation to SkewedChi2Sampler (#24350) Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com> Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
https://github.com/scikit-learn/scikit-learn.git
def test_skewed_chi2_sampler_dtype_equivalence(): skewed_chi2_sampler_32 = SkewedChi2Sampler(random_state=42) X_32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) skewed_chi2_sampler_32.fit(X_32) skewed_chi2_sampler_64 = SkewedChi2Sampler(random_state=42) X_64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64) skewed_chi2_sampler_64.fit(X_64) assert_allclose( skewed_chi2_sampler_32.random_offset_, skewed_chi2_sampler_64.random_offset_ ) assert_allclose( skewed_chi2_sampler_32.random_weights_, skewed_chi2_sampler_64.random_weights_ )
117
test_kernel_approximation.py
Python
sklearn/tests/test_kernel_approximation.py
1eea19d2dca78acebf8b82d3a6c608ab6edd8b34
scikit-learn
1
276,120
27
13
8
107
14
0
31
78
_wrap_unconditional_loss
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _wrap_unconditional_loss(loss_fn, index): # Extract original loss function from partial function fn = loss_fn.args[0] if isinstance(loss_fn, functools.partial) else loss_fn if isinstance(fn, tf.__internal__.function.Function): return fn else: return tf.__internal__.function.Function( fn, "loss_fn_{}".format(index), input_signature=[] )
68
save_impl.py
Python
keras/saving/saved_model/save_impl.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
3
168,027
11
9
11
57
7
0
12
26
barh
TYP: pandas/plotting annotations from pandas-stubs (#47827) * TYP: pandas/plotting annotations from pandas-stubs * xticks + pyright
https://github.com/pandas-dev/pandas.git
def barh(self, x=None, y=None, **kwargs) -> PlotAccessor: return self(kind="barh", x=x, y=y, **kwargs)
37
_core.py
Python
pandas/plotting/_core.py
4d7cfc436f8a7bc65c11770aa16f05e875b74077
pandas
1
104,508
134
19
41
541
29
0
270
824
encode_nested_example
Module namespace cleanup for v2.0 (#3875) * Imports cleaning * Small change * Remove unused methods * Small fix * Additional fix * Final fix * Fix benchmark test * Fix benchmark test #2
https://github.com/huggingface/datasets.git
def encode_nested_example(schema, obj): # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return {k: encode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)} elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if encode_nested_example(sub_schema, first_elmt) != first_elmt: return [encode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): # dict of list to fill list_dict = {} if isinstance(obj, (list, tuple)): # obj is a list of dict for k, dict_tuples in zip_dict(schema.feature, *obj): list_dict[k] = [encode_nested_example(dict_tuples[0], o) for o in dict_tuples[1:]] return list_dict else: # obj is a single dict for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj): list_dict[k] = [encode_nested_example(sub_schema, o) for o in sub_objs] return list_dict # schema.feature is not a dict if isinstance(obj, str): # don't interpret a string as a list raise ValueError(f"Got a string but expected a list instead: '{obj}'") if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, schema.feature): break # be careful when comparing tensors here if not isinstance(first_elmt, list) or encode_nested_example(schema.feature, first_elmt) != first_elmt: return [encode_nested_example(schema.feature, o) for o in obj] return list(obj) # Object with special encoding: # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)): return schema.encode_example(obj) if obj is not None else None # Other object should be directly convertible to a native Arrow type (like Translation and Translation) return obj
356
features.py
Python
src/datasets/features/features.py
ba4d30c42e0702bd894c36777d7d2c0adf74516c
datasets
27
314,423
8
8
3
33
5
0
8
22
current_cover_position
Adjust CoverEntity property type hints in components (#73943) * Adjust CoverEntity property type hints in components * Revert changes to rflink * Revert changes to wilight
https://github.com/home-assistant/core.git
def current_cover_position(self) -> int | None: return self.bound(self.level)
19
cover.py
Python
homeassistant/components/fibaro/cover.py
10dc38e0ec27f7bef990ee431459342f9c3c52b4
core
1
297,103
11
7
5
36
8
0
12
44
max_temp
Use UnitOfTemperature in climate entities [g-l] (#83127) * Use UnitOfTemperature in climate entities [g-l] * Adjust gree * Adjust honeywell
https://github.com/home-assistant/core.git
def max_temp(self) -> float: if self.temperature_unit == UnitOfTemperature.CELSIUS: return TEMP_MAX return TEMP_MAX_F
21
climate.py
Python
homeassistant/components/gree/climate.py
68e454712dae5b65599ef12a025bc4446f7e3e6e
core
2
294,545
18
10
10
86
12
0
19
94
_get_entry_from_bridge
Add support for setting up encrypted samsung tvs from config flow (#68717) Co-authored-by: epenet <epenet@users.noreply.github.com>
https://github.com/home-assistant/core.git
def _get_entry_from_bridge(self) -> data_entry_flow.FlowResult: assert self._bridge data = self._base_config_entry() if self._bridge.token: data[CONF_TOKEN] = self._bridge.token return self.async_create_entry( title=self._title, data=data, )
54
config_flow.py
Python
homeassistant/components/samsungtv/config_flow.py
cc75cebfc5b3e9316cdbaf82c5c72437521f819b
core
2
285,033
127
20
44
632
34
0
193
917
preprocess_orderbook
Overhaul Portfolio class (#2021) * adds pythonic portfolio class * start calculate trades refactoring * adds comments to portfolio model - delete afterwards * finish calculate trades refactoring * restore original portfolio_model.py * implement calculate_allocations * adapt and test controller load, show, bench, alloc and perf * add old code that was ok * adapt controller * adapt portfolio_view * run black on pythonic_portfolio.py * fix crypto bug * change column name in example datasets * substitute portfolio_model.py * update show command * push cumulative returns calculation to model * fix last change in cumulative returns * add comments on possibly unused code * run black on changes * bring metrics from helper to model * push rolling metrics from view to model * Details and linting * Fix tests * remove empty attribute and rename class * fix view and controller rf * change returns calculation method * remove CASH from code * remove cash from tickers_list * run black on changes * change function name * adapt to PortfolioModel * fix tests * fix tests on help * fix linting * call metrics from PortfolioModel * call drawdown from model * fix some mypy issues * fix remaining mypy issues * fix test * Fix linting * Remove unused function * Small fixes * Remove old code and adjust summary to simply work * Update the Excel since CASH is no longer a thing * Fix tests * Update the csvs * Updates to usage of full_shares and more details * Fix -t flag for perf Co-authored-by: Jeroen Bouma <jer.bouma@gmail.com>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def preprocess_orderbook(self): # descrbibe outputs try: # Convert Date to datetime self.__orderbook["Date"] = pd.to_datetime(self.__orderbook["Date"]) # Sort orderbook by date self.__orderbook = self.__orderbook.sort_values(by="Date") # Capitalize Ticker and Type [of instrument...] self.__orderbook["Ticker"] = self.__orderbook["Ticker"].map( lambda x: x.upper() ) self.__orderbook["Type"] = self.__orderbook["Type"].map(lambda x: x.upper()) # Translate side: ["deposit", "buy"] -> 1 and ["withdrawal", "sell"] -> -1 self.__orderbook["Side"] = self.__orderbook["Side"].map( lambda x: 1 if x.lower() in ["deposit", "buy"] else (-1 if x.lower() in ["withdrawal", "sell"] else 0) ) # Convert quantity to signed integer self.__orderbook["Quantity"] = ( self.__orderbook["Quantity"] * self.__orderbook["Side"] ) # Determining the investment/divestment value self.__orderbook["Investment"] = ( self.__orderbook["Quantity"] * self.__orderbook["Price"] - self.__orderbook["Fees"] ) # Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD) crypto_trades = self.__orderbook[self.__orderbook.Type == "CRYPTO"] self.__orderbook.loc[(self.__orderbook.Type == "CRYPTO"), "Ticker"] = [ f"{crypto}-{currency}" for crypto, currency in zip( crypto_trades.Ticker, crypto_trades.Currency ) ] # Create tickers dictionary with structure {'Type': [Ticker]} for ticker_type in set(self.__orderbook["Type"]): self.tickers[ticker_type] = list( set( self.__orderbook[self.__orderbook["Type"].isin([ticker_type])][ "Ticker" ] ) ) # Create list with tickers except cash self.tickers_list = list(set(self.__orderbook["Ticker"])) # Save orderbook inception date self.inception_date = self.__orderbook["Date"][0] # Save trades static data self.static_data = self.__orderbook.pivot( index="Ticker", columns=[], values=["Type", "Sector", "Industry", "Country"], ) except Exception: console.print("Could not preprocess orderbook.")
362
portfolio_model.py
Python
openbb_terminal/portfolio/portfolio_model.py
2c3e10a128fa0ce4e937d8d50dc0cd6d7cd11485
OpenBBTerminal
6
152,970
62
14
23
267
43
0
83
291
parse
REFACTOR-#3768: change 'compute_chunksize' signature (#3769) Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Anatoly Myachev <anatoly.myachev@intel.com>
https://github.com/modin-project/modin.git
def parse(self, fname, num_splits, start, end, header, **kwargs): import pyarrow as pa import pyarrow.csv as csv bio = open(fname, "rb") # The header line for the CSV file first_line = bio.readline() bio.seek(start) to_read = header + first_line + bio.read(end - start) bio.close() table = csv.read_csv( BytesIO(to_read), parse_options=csv.ParseOptions(header_rows=1) ) chunksize = compute_chunksize(table.num_columns, num_splits) chunks = [ pa.Table.from_arrays(table.columns[chunksize * i : chunksize * (i + 1)]) for i in range(num_splits) ] return chunks + [ table.num_rows, pandas.Series( [t.to_pandas_dtype() for t in table.schema.types], index=table.schema.names, ), ]
174
parsers.py
Python
modin/core/storage_formats/pyarrow/parsers.py
0bdc482d6f1682e103b4c4d7ee7c4d505d2d3b1c
modin
3
212,498
45
14
20
308
29
0
77
228
value_as_date
Normalize built-in types and remove `Unknown` (#12252) * Use lower case names for built-in types Also incidentally apply TypeAlias marker. * Drop `Unknown` in favour of consistent usage of `Any` * Enable lazy annotations in conftest.py
https://github.com/bokeh/bokeh.git
def value_as_date(self) -> tuple[date, date] | None: if self.value is None: return None v1, v2 = self.value if isinstance(v1, numbers.Number): dt = datetime.utcfromtimestamp(v1 / 1000) d1 = date(*dt.timetuple()[:3]) else: d1 = v1 if isinstance(v2, numbers.Number): dt = datetime.utcfromtimestamp(v2 / 1000) d2 = date(*dt.timetuple()[:3]) else: d2 = v2 return d1, d2 value = Required(Tuple(Datetime, Datetime), help=) value_throttled = Readonly(Required(Tuple(Datetime, Datetime)), help=) start = Required(Datetime, help=) end = Required(Datetime, help=) step = Int(default=1, help=) format = Override(default="%d %b %Y")
115
sliders.py
Python
bokeh/models/widgets/sliders.py
528d85e642340ef30ec91f30b65c7c43370f648d
bokeh
4
60,205
25
10
6
82
6
0
32
54
check_params
Balanced joint maximum mean discrepancy for deep transfer learning
https://github.com/jindongwang/transferlearning.git
def check_params(params): assert 'split' in params.keys( ), 'Params must include split (train, val, or test).' required = ['batch_size', 'pascal_root', 'im_shape'] for r in required: assert r in params.keys(), 'Params must include {}'.format(r)
45
pascal_multilabel_datalayers.py
Python
code/deep/BJMMD/caffe/examples/pycaffe/layers/pascal_multilabel_datalayers.py
cc4d0564756ca067516f71718a3d135996525909
transferlearning
2
177,117
50
15
17
156
15
0
82
223
bfs_layers
Adds ```nx.bfs_layers``` method (#5879) * reformatted the files * reformatted the files * added final changes * changed descendants_at_distance * fixed comment in bfs_layers * fixed comment in bfs_layers
https://github.com/networkx/networkx.git
def bfs_layers(G, sources): if sources in G: sources = [sources] current_layer = list(sources) visited = set(sources) for source in current_layer: if source not in G: raise nx.NetworkXError(f"The node {source} is not in the graph.") # this is basically BFS, except that the current layer only stores the nodes at # same distance from sources at each iteration while current_layer: yield current_layer next_layer = list() for node in current_layer: for child in G[node]: if child not in visited: visited.add(child) next_layer.append(child) current_layer = next_layer
93
breadth_first_search.py
Python
networkx/algorithms/traversal/breadth_first_search.py
4a019f04d0e304ecd2f28b15d854e1282e03461d
networkx
8
300,610
15
13
7
65
9
0
17
58
forgiving_as_timestamp
Fail template functions when no default specified (#71687)
https://github.com/home-assistant/core.git
def forgiving_as_timestamp(value, default=_SENTINEL): try: return dt_util.as_timestamp(value) except (ValueError, TypeError): if default is _SENTINEL: raise_no_default("as_timestamp", value) return default
39
template.py
Python
homeassistant/helpers/template.py
4885331509eeffe50f42d76b234996467b06170f
core
3
216,308
62
13
36
363
16
0
110
318
event_fire
fix(consul): serialize to JSON only non string objects. Fixes 35215
https://github.com/saltstack/salt.git
def event_fire(consul_url=None, token=None, name=None, **kwargs): ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error("No Consul URL found.") ret["message"] = "No Consul URL found." ret["res"] = False return ret if not name: raise SaltInvocationError('Required argument "name" is missing.') if "dc" in kwargs: query_params = kwargs["dc"] if "node" in kwargs: query_params = kwargs["node"] if "service" in kwargs: query_params = kwargs["service"] if "tag" in kwargs: query_params = kwargs["tag"] function = "event/fire/{}".format(name) res = _query( consul_url=consul_url, token=token, query_params=query_params, method="PUT", function=function, ) if res["res"]: ret["res"] = True ret["message"] = "Event {} fired.".format(name) ret["data"] = res["data"] else: ret["res"] = False ret["message"] = "Cloning ACL item {} failed.".format(kwargs["name"]) return ret
202
consul.py
Python
salt/modules/consul.py
50a17432015fb712ec4dc7d3ead79e8939e2bf96
salt
9
280,501
82
14
28
220
23
1
98
266
convert_to_legacy_optimizer
Move new optimizer out of optimizer_experimental/ directory. PiperOrigin-RevId: 488998585
https://github.com/keras-team/keras.git
def convert_to_legacy_optimizer(optimizer): if not isinstance(optimizer, base_optimizer.Optimizer): raise ValueError( "`convert_to_legacy_optimizer` should only be called " "on instances of `tf.keras.optimizers.Optimizer`, but " f"received {optimizer} of type {type(optimizer)}." ) optimizer_name = optimizer.__class__.__name__.lower() config = optimizer.get_config() # Remove fields that only exist in experimental optimizer. keys_to_remove = [ "weight_decay", "use_ema", "ema_momentum", "ema_overwrite_frequency", "jit_compile", "is_legacy_optimizer", ] for key in keys_to_remove: config.pop(key, None) # Learning rate can be a custom LearningRateSchedule, which is stored as # a dict in config, and cannot be deserialized. if isinstance( optimizer._learning_rate, learning_rate_schedule.LearningRateSchedule ): config["learning_rate"] = optimizer._learning_rate legacy_optimizer_config = { "class_name": optimizer_name, "config": config, } return deserialize(legacy_optimizer_config, use_legacy_optimizer=True) @keras_export("keras.optimizers.get")
@keras_export("keras.optimizers.get")
113
__init__.py
Python
keras/optimizers/__init__.py
5a105aadbdc6fde2c2529280c4789864adbb81c7
keras
4
22,110
10
8
9
52
6
0
11
31
head
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
https://github.com/pypa/pipenv.git
def head(self, url, **kwargs): r kwargs.setdefault("allow_redirects", False) return self.request("HEAD", url, **kwargs)
32
sessions.py
Python
pipenv/patched/pip/_vendor/requests/sessions.py
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
1
319,613
17
12
15
215
19
0
23
144
test_unset_document_storage_path
Feature: Dynamic document storage pathes (#916) * Added devcontainer * Add feature storage pathes * Exclude tests and add versioning * Check escaping * Check escaping * Check quoting * Echo * Escape * Escape : * Double escape \ * Escaping * Remove if * Escape colon * Missing \ * Esacpe : * Escape all * test * Remove sed * Fix exclude * Remove SED command * Add LD_LIBRARY_PATH * Adjusted to v1.7 * Updated test-cases * Remove devcontainer * Removed internal build-file * Run pre-commit * Corrected flak8 error * Adjusted to v1.7 * Updated test-cases * Corrected flak8 error * Adjusted to new plural translations * Small adjustments due to code-review backend * Adjusted line-break * Removed PAPERLESS prefix from settings variables * Corrected style change due to search+replace * First documentation draft * Revert changes to Pipfile * Add sphinx-autobuild with keep-outdated * Revert merge error that results in wrong storage path is evaluated * Adjust styles of generated files ... * Adds additional testing to cover dynamic storage path functionality * Remove unnecessary condition * Add hint to edit storage path dialog * Correct spelling of pathes to paths * Minor documentation tweaks * Minor typo * improving wrapping of filter editor buttons with new storage path button * Update .gitignore * Fix select border radius in non input-groups * Better storage path edit hint * Add note to edit storage path dialog re document_renamer * Add note to bulk edit storage path re document_renamer * Rename FILTER_STORAGE_DIRECTORY to PATH * Fix broken filter rule parsing * Show default storage if unspecified * Remove note re storage path on bulk edit * Add basic validation of filename variables Co-authored-by: Markus Kling <markus@markus-kling.net> Co-authored-by: Trenton Holmes <holmes.trenton@gmail.com> Co-authored-by: Michael Shamoon <4887959+shamoon@users.noreply.github.com> Co-authored-by: Quinn Casey <quinn@quinncasey.com>
https://github.com/paperless-ngx/paperless-ngx.git
def test_unset_document_storage_path(self): self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) bulk_edit.set_storage_path( [self.doc1.id], self.sp1.id, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 4) bulk_edit.set_storage_path( [self.doc1.id], None, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) self.async_task.assert_called() args, kwargs = self.async_task.call_args self.assertCountEqual(kwargs["document_ids"], [self.doc1.id])
136
test_api.py
Python
src/documents/tests/test_api.py
69ef26dab04d51e7e102dcb33cd98ddc6ad975fd
paperless-ngx
1
129,598
12
12
4
53
6
0
12
52
_configure_experiment_defaults
Comet Integration (#20766) This PR adds a `CometLoggerCallback` to the Tune Integrations, allowing users to log runs from Ray to [Comet](https://www.comet.ml/site/). Co-authored-by: Michael Cullan <mjcullan@gmail.com> Co-authored-by: Antoni Baum <antoni.baum@protonmail.com>
https://github.com/ray-project/ray.git
def _configure_experiment_defaults(self): for option in self._exclude_autolog: if not self.experiment_kwargs.get(option): self.experiment_kwargs[option] = False
32
comet.py
Python
python/ray/tune/integration/comet.py
3d79815cd08c1be8e56c245e662f34366523847e
ray
3
322,116
61
17
44
349
19
0
114
705
build_vocab
Update neural search readme and Add Paddle Serving Support (#1558) * add recall inference similarity * update examples * updatea readme * update dir name * update neural search readme * update milvus readme * update domain adaptive pretraining readme * fix the mistakes * update readme * add recall Paddle Serving Support * update readme * update readme and format the code * reformat the files * move the files * reformat the code * remove redundant code Co-authored-by: Zeyu Chen <chenzeyu01@baidu.com> Co-authored-by: tianxin <tianxin04@baidu.com>
https://github.com/PaddlePaddle/PaddleNLP.git
def build_vocab(corpus, tokenizer, encoding_model, feat): word_examples, feat_examples, rel_examples = corpus # Build word vocab and feature vocab if encoding_model == "lstm": # Using token_to_idx to specifies the mapping # relationship between tokens and indices word_vocab = Vocab.build_vocab( word_examples, min_freq=2, token_to_idx={"[PAD]": 0, "[UNK]": 1, "[BOS]": 2, "[EOS]": 3}, unk_token="[UNK]", pad_token="[PAD]", bos_token="[BOS]", eos_token="[EOS]", ) if feat == "pos": feat_vocab = Vocab.build_vocab( feat_examples, token_to_idx={"[BOS]": 0, "[EOS]": 1}, bos_token="[BOS]", eos_token="[EOS]", ) else: feat_vocab = Vocab.build_vocab( feat_examples, token_to_idx={"[PAD]": 0, "[UNK]": 1, "[BOS]": 2, "[EOS]": 3}, unk_token="[UNK]", pad_token="[PAD]", bos_token="[BOS]", eos_token="[EOS]", ) else: word_vocab = tokenizer.vocab feat_vocab = None # Build relation vocab rel_vocab = Vocab.build_vocab( rel_examples, token_to_idx={"[BOS]": 0, "[EOS]": 1, "[UNK]": 2}, bos_token="[BOS]", eos_token="[EOS]", unk_token="[UNK]", ) return word_vocab, feat_vocab, rel_vocab
207
data.py
Python
examples/dependency_parsing/ddparser/data.py
621357338437ee420eabbbf5ab19065bc85e73a5
PaddleNLP
3
266,782
14
13
2
41
7
0
14
29
create_single_host
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
https://github.com/ansible/ansible.git
def create_single_host(name, variables): # type: (str, t.Dict[str, t.Union[str, int]]) -> Inventory return Inventory(host_groups=dict(all={name: variables}))
24
host_profiles.py
Python
test/lib/ansible_test/_internal/host_profiles.py
a06fa496d3f837cca3c437ab6e9858525633d147
ansible
1
159,537
6
7
3
27
5
0
6
20
keys
Enable `mypy` `override` check and fix errors (#10965) * enable override check, fix errors in events.py and utils package * fix errors in shared package * fix errors in nlu package * fix errors in policies * fix last errors in core package * docstring fixes * use generics to fix override errors in brokers and events * address review comments * make attribute private * fix error in tracker featurizer * modify tracker with cached states * refactor tracker stores with mixin serialisation class * undo serialise_tracker method as staticmethod, change type in tracker from_dict method * revert to staticmethod for tracker stores, fix type in __contains__ * address final review comments * fix failed tests
https://github.com/RasaHQ/rasa.git
def keys(self) -> Iterable[Text]: raise NotImplementedError()
15
tracker_store.py
Python
rasa/core/tracker_store.py
291f6c6b9e85a9bce49289d15a72027b00b95f69
rasa
1
37,487
7
10
2
37
5
0
7
13
require_pyctcdecode
Update all require decorators to use skipUnless when possible (#16999)
https://github.com/huggingface/transformers.git
def require_pyctcdecode(test_case): return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
20
testing_utils.py
Python
src/transformers/testing_utils.py
57e6464ac9a31156f1c93e59107323e6ec01309e
transformers
1
285,202
8
7
9
42
7
1
9
14
get_macro_parameters
Here we merge all API Refactor related branches (#2236) * Update api.py * Updated forex menu * refactor ycrv command * refactor ycrv command black * refactor ecocal command * Minh changes * Adding space to test pushing * title fix ecocal df * get economic calendar annotation * fix investingcom tests * refactor index command * refactor overview command * give defaults to wsj view function args * rename date args investincom * refacto bigmac command * fix ecocal typo * refactor rtps command * alphavantage gdp * alphavantage gdp per capita * alphavantage cpi * alphavantage tyld * alphavantage inf * refactor macro command * refactor macro command w helpers * refactor treasury command * fix macro on terminal * treasury labels * refactor maturities * update treasury maturities doc strings * refactor get economic calendar finhub * refactor map command api * display map filter choices * route economy api to performance map * route economy api to performance map * display group choices on valuation command * refactor performance and valuation commands * refactor spectrum model and view * add choices to spectrum controller * delete image after view * fix model tests finviz * fix finciz view tests * refactor futures * fix some tests * fix more tests * fix controller test * refactor fred series notes * update fred notes docstring * refacto fred series ids * fix pred and qa when empty datasets * refactor fred * uncomment stuff * refacto get series data * fix some tests * set defaults on args * refactor fred yield curve * black * fix spell and remove ecocal names * fix linting * linting * pylint fix * change dangerous defaults * Working through crypto fixes (#2256) * Working through crypto fixes * Continued adding crypto stuff * Added crypto overview * Added test fixes * Added fixtures * Fixed tests * Fixed charting issue * Removed broken APIs * Final adjustments * Added test fixes * map get groups and get ycrv countries into old api * exposed econdb helper funcs * remove helpers * refactor search indices * linting * refactor arg currency * pylint from currency * Started switching crpyto ascending to ascend * Merging * Portfolio model arguements, params, and docstring * Refactored for etf commands (#2292) * Refactored for etf commands * Fixed tests * Added load command * Fixed menu * Portfolio logic fixes * Added econometrics (#2260) * Added econometrics * Fixed tests * Simplified API * Added test fixes * Added test csv * Allowed examples to be loaded * Fund refactor (#2291) * Fund refactor * Changed fund_name and fund to name * Changed ascending to ascend * Stock menu refactoring for easier API usage (#2194) * Stocks refactoring for easier API usage * Linting * Refactor newly added features * Linting * Fixing tests * Refactor common files used by stocks menu * Fixing flake8 * Fix linting and tests * Linting * Fix flake8 * refactor insider_data * refactor mentions * refactor watchlist * refactor sentiment * refactor sentiment * fix yahoofinance tests * refactor load and candle * refactor get_news and display_news * refactor stocks.ins.act * candle default matplotlib * fix yahoofinance_view tests * fix ark model tests * fix ark view tests * fix business insider model * fix business insider view * refactor csimarket model * fix tests csi market model * update dd controller * fix get suppliers tests * fix dd controller tests * fix finhub tests * fix finviz tests * fix fmp tests * fix marketwatch tests * corrected argument keywords in test_bt_model * corrected argument keywords in test_bt_view * refactor fa controller * refactor marketwatch view * refactor gov controller * fix tests fa av * fix tests elect * fix dcf tests * fix polygon tests * fix fmp tests * fix quiverquant tests * fix yahoofinance fa tests * fix more fa tests * fix insider tests * fix more tests * fix more tests * fix options tests * fix stock gov tests * fix tests test_ba_controller * fix tests for test_finviz_compare_model.py * fixed 2 tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fix final tests * fixed tests * fixed tests * Fix tests * black * forgot to black tests * fixed tests * fixed tests * fixed tests * fixed tests * flakefix * Tests + code : Stocks / Discovery * fix tests * added recorder * fixed tests * fixed tests * black * black * remove unused imports * refactor display raw * sia dicts fix * pylint * linting * remove dangerous default * fix tests * fix beta model test * black * skip screener qa test * change sector path to sectors * update tests readme * fix metric defaults * black * substitute lost ticker * defaults cpic * another round on sia * refactor cramer * reduce default tweets on sentiment * refactor yf hist, corr, volume * arkorders default * refactor income, balance, cashflow * refacto scorr, screener, getfinnhub * refactor stockgrid * ibkr refactor * another round on stockgrid * add dividens end point * refactor discovery endpoints * update docstrings with similar input * refactor messages * refactor ba * refactor regioons * refactor twitter sentiment * refactor hist * refactor regions * give default to timeframe * refactor bunch of defaults and arg names * remove leftover imports * refactor vwap * let tests run * fix tests * fix stock tests * fix stockanalysis tests * flake * MYPY * Made important changes * added fixes * Fixed big issue * Added fixes to tests * fix qa tests * fix tests * fix 1 more test * last stocks failing * fix crypto test Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: colin99d <colin99delahunty@gmail.com> * fix portfolio tests * change period to window * update ca docstrings * refactor get_similar_companies func * Fixed * Update CI * Update CI 2 * Update CI 3 * Update dependencies Co-authored-by: colin99d <colin99delahunty@gmail.com> Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> Co-authored-by: montezdesousa <montezdesousa@gmail.com> Co-authored-by: James Simmons <simmonsj330@gmail.com> Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com> Co-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com> Co-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com> Co-authored-by: hjoaquim <h.joaquim@campus.fct.unl.pt>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def get_macro_parameters() -> Dict[str, Dict[str, str]]: return PARAMETERS @log_start_end(log=logger)
@log_start_end(log=logger)
19
econdb_model.py
Python
openbb_terminal/economy/econdb_model.py
9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b
OpenBBTerminal
1
109,225
5
8
2
26
4
0
5
11
safe_first_element
BUG: modified safe_first_element and added tests Co-authored-by: Thomas A Caswell <tcaswell@gmail.com>
https://github.com/matplotlib/matplotlib.git
def safe_first_element(obj): return _safe_first_non_none(obj, skip_none=False)
15
__init__.py
Python
lib/matplotlib/cbook/__init__.py
a8c01a42c5bbe96fa6c536c72e6c26954c798908
matplotlib
1
186,627
11
11
8
56
6
0
13
56
_autohsts_fetch_state
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <ferrand.ad@gmail.com>
https://github.com/certbot/certbot.git
def _autohsts_fetch_state(self) -> None: try: self._autohsts = self.storage.fetch("autohsts") except KeyError: self._autohsts = {}
31
configurator.py
Python
certbot-apache/certbot_apache/_internal/configurator.py
7d9e9a49005de7961e84d2a7c608db57dbab3046
certbot
2
60,390
66
13
16
177
12
0
90
176
CheckComment
Balanced joint maximum mean discrepancy for deep transfer learning
https://github.com/jindongwang/transferlearning.git
def CheckComment(comment, filename, linenum, error): match = _RE_PATTERN_TODO.match(comment) if match: # One whitespace is correct; zero whitespace is handled elsewhere. leading_whitespace = match.group(1) if len(leading_whitespace) > 1: error(filename, linenum, 'whitespace/todo', 2, 'Too many spaces before TODO') username = match.group(2) if not username: error(filename, linenum, 'readability/todo', 2, 'Missing username in TODO; it should look like ' '"// TODO(my_username): Stuff."') middle_whitespace = match.group(3) # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison if middle_whitespace != ' ' and middle_whitespace != '': error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space')
105
cpp_lint.py
Python
code/deep/BJMMD/caffe/scripts/cpp_lint.py
cc4d0564756ca067516f71718a3d135996525909
transferlearning
6
270,990
8
8
12
31
3
0
9
34
make_adapt_function
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def make_adapt_function(self): if self._adapt_function is not None: return self._adapt_function
65
base_preprocessing_layer.py
Python
keras/engine/base_preprocessing_layer.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
4
20,600
11
7
38
39
7
0
11
28
explain
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def explain(self, depth=16) -> str: return self.explain_exception(self, depth) markInputline = mark_input_line
21
exceptions.py
Python
pipenv/patched/notpip/_vendor/pyparsing/exceptions.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
1
118,626
26
10
17
148
20
0
33
176
test_set_page_config_first
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
https://github.com/streamlit/streamlit.git
def test_set_page_config_first(self): fake_enqueue = lambda msg: None ctx = ScriptRunContext( "TestSessionID", fake_enqueue, "", SessionState(), UploadedFileManager(), ) ctx.on_script_start() markdown_msg = ForwardMsg() markdown_msg.delta.new_element.markdown.body = "foo" msg = ForwardMsg() msg.page_config_changed.title = "foo" ctx.enqueue(markdown_msg) with self.assertRaises(StreamlitAPIException): ctx.enqueue(msg)
84
report_context_test.py
Python
lib/tests/streamlit/report_context_test.py
704eab3478cf69847825b23dabf15813a8ac9fa2
streamlit
1
130,160
30
15
10
96
9
0
32
158
update_if_absent
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def update_if_absent(self, **kwargs): for arg in kwargs: if hasattr(self, arg): if getattr(self, arg) is None: setattr(self, arg, kwargs[arg]) else: raise ValueError( "Invalid RayParams parameter in" " update_if_absent: %s" % arg ) self._check_usage()
58
parameter.py
Python
python/ray/_private/parameter.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
4
248,339
8
9
3
42
7
0
8
33
remove_subscriber
Lay some foundation work to allow workers to only subscribe to some kinds of messages, reducing replication traffic. (#12672)
https://github.com/matrix-org/synapse.git
def remove_subscriber(self, conn): for subscribers in self._subscribers_by_channel.values(): subscribers.discard(conn)
25
_base.py
Python
tests/replication/_base.py
177b884ad7cc1ecdd92ff74188732734df203150
synapse
2
153,549
106
16
48
641
58
0
157
655
_read
REFACTOR-#3853: interacting with Dask interface through 'DaskWrapper' class (#3854) Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com> Co-authored-by: Dmitry Chigarev <dchigarev@users.noreply.github.com> Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Anatoly Myachev <anatoly.myachev@intel.com>
https://github.com/modin-project/modin.git
def _read(cls, path_or_buf, **kwargs): path_or_buf = cls.get_path_or_buffer(path_or_buf) if isinstance(path_or_buf, str): if not cls.file_exists(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) path_or_buf = cls.get_path(path_or_buf) elif not cls.pathlib_or_pypath(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) if not kwargs.get("lines", False): return cls.single_worker_read(path_or_buf, **kwargs) with OpenFile(path_or_buf, "rb") as f: columns = pandas.read_json(BytesIO(b"" + f.readline()), lines=True).columns kwargs["columns"] = columns empty_pd_df = pandas.DataFrame(columns=columns) with OpenFile(path_or_buf, "rb", kwargs.get("compression", "infer")) as f: partition_ids = [] index_ids = [] dtypes_ids = [] column_widths, num_splits = cls._define_metadata(empty_pd_df, columns) args = {"fname": path_or_buf, "num_splits": num_splits, **kwargs} splits = cls.partitioned_file( f, num_partitions=NPartitions.get(), ) for start, end in splits: args.update({"start": start, "end": end}) partition_id = cls.deploy(cls.parse, num_returns=num_splits + 3, **args) partition_ids.append(partition_id[:-3]) index_ids.append(partition_id[-3]) dtypes_ids.append(partition_id[-2]) # partition_id[-1] contains the columns for each partition, which will be useful # for implementing when `lines=False`. row_lengths = cls.materialize(index_ids) new_index = pandas.RangeIndex(sum(row_lengths)) dtypes = cls.get_dtypes(dtypes_ids) partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths) if isinstance(dtypes, pandas.Series): dtypes.index = columns else: dtypes = pandas.Series(dtypes, index=columns) new_frame = cls.frame_cls( np.array(partition_ids), new_index, columns, row_lengths, column_widths, dtypes=dtypes, ) new_frame.synchronize_labels(axis=0) return cls.query_compiler_cls(new_frame)
398
json_dispatcher.py
Python
modin/core/io/text/json_dispatcher.py
97769988a6f19e4b76f34238c97bf159ee7626a5
modin
7
270,991
4
6
2
19
3
0
4
18
is_adapted
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def is_adapted(self): return self._is_adapted
10
base_preprocessing_layer.py
Python
keras/engine/base_preprocessing_layer.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
275,870
132
16
67
619
36
0
251
781
load_weights_from_hdf5_group
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def load_weights_from_hdf5_group(f, model): if "keras_version" in f.attrs: original_keras_version = f.attrs["keras_version"] if hasattr(original_keras_version, "decode"): original_keras_version = original_keras_version.decode("utf8") else: original_keras_version = "1" if "backend" in f.attrs: original_backend = f.attrs["backend"] if hasattr(original_backend, "decode"): original_backend = original_backend.decode("utf8") else: original_backend = None filtered_layers = [] for layer in model.layers: weights = _legacy_weights(layer) if weights: filtered_layers.append(layer) layer_names = load_attributes_from_hdf5_group(f, "layer_names") filtered_layer_names = [] for name in layer_names: g = f[name] weight_names = load_attributes_from_hdf5_group(g, "weight_names") if weight_names: filtered_layer_names.append(name) layer_names = filtered_layer_names if len(layer_names) != len(filtered_layers): raise ValueError( f"Layer count mismatch when loading weights from file. " f"Model expected {len(filtered_layers)} layers, found " f"{len(layer_names)} saved layers." ) # We batch weight value assignments in a single backend call # which provides a speedup in TensorFlow. weight_value_tuples = [] for k, name in enumerate(layer_names): g = f[name] layer = filtered_layers[k] symbolic_weights = _legacy_weights(layer) weight_values = load_subset_weights_from_hdf5_group(g) weight_values = preprocess_weights_for_loading( layer, weight_values, original_keras_version, original_backend ) if len(weight_values) != len(symbolic_weights): raise ValueError( f"Weight count mismatch for layer #{k} (named {layer.name} in the " f"current model, {name} in the save file). " f"Layer expects {len(symbolic_weights)} weight(s). Received " f"{len(weight_values)} saved weight(s)" ) weight_value_tuples += zip(symbolic_weights, weight_values) if "top_level_model_weights" in f: symbolic_weights = ( model._trainable_weights + model._non_trainable_weights ) weight_values = load_subset_weights_from_hdf5_group( f["top_level_model_weights"] ) if len(weight_values) != len(symbolic_weights): raise ValueError( f"Weight count mismatch for top-level weights when loading weights " f"from file. " f"Model expects {len(symbolic_weights)} top-level weight(s). " f"Received {len(weight_values)} saved top-level weight(s)" ) weight_value_tuples += zip(symbolic_weights, weight_values) backend.batch_set_value(weight_value_tuples) # Perform any layer defined finalization of the layer state. for layer in model._flatten_layers(): layer.finalize_state()
327
hdf5_format.py
Python
keras/saving/hdf5_format.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
15
288,146
6
6
3
22
4
0
6
20
characteristic_handle
Add ESPHome BleakClient (#78911) Co-authored-by: Paulus Schoutsen <balloob@gmail.com>
https://github.com/home-assistant/core.git
def characteristic_handle(self) -> int: return self.__characteristic_handle
12
descriptor.py
Python
homeassistant/components/esphome/bluetooth/descriptor.py
7042d6d35be54865b1252c0b28a50cce1a92eabc
core
1
309,596
26
13
10
129
15
0
33
86
_async_migrate_options_from_data
Add LG webOS Smart TV config flow support (#64117) * Add webOS Smart TV config flow support (#53256) * Add Webostv config flow * Fix tests mocks and apply review comments * Apply review comments * Change config flow to use ssdp UDN as unique_id * Fix device info * More review comments * Fix _async_check_configured_entry * Remove turn on script * Add webOS Smart TV device triggers (#53752) * Add webOS Smart TV config flow support (#53256) * Add Webostv config flow * Fix tests mocks and apply review comments * Apply review comments * Change config flow to use ssdp UDN as unique_id * Fix device info * More review comments * Fix _async_check_configured_entry * Remove turn on script * Add webOS Smart TV device triggers (#53752) * Fix webOS Smart TV mypy and pylint errors (#62620) * Change webOS Smart TV PyPi aiopylgtv package to bscpylgtv (#62633) * Change webOS Smart TV PyPi aiopylgtv package to bscpylgtv * Update bscpylgtv to 0.2.8 (revised websockets requirment) * Change webOS Smart TV PyPi package to aiowebostv (#63759) * Change webOS Smart TV PyPi package to aiowebostv * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * webOS TV check UUID for user added device (#63817) * webOS TV check uuid when for user added device * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Add test for form abort and host update Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Rework webOS Smart TV device trigger to custom trigger platform (#63950) * Rework webOS Smart TV device trigger to custom trigger platform * Review comments and add tests * Fix webOS TV import from YAML (#63996) * Fix webOS TV import from YAML * Fix requirements * Migrate YAML entities unique id to UUID * Add backoff to migration task delay * Assert result data and unique_id * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Add codeowner Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
https://github.com/home-assistant/core.git
def _async_migrate_options_from_data(hass, config_entry): if config_entry.options: return config = config_entry.data options = {} # Get Preferred Sources if sources := config.get(CONF_CUSTOMIZE, {}).get(CONF_SOURCES): options[CONF_SOURCES] = sources if not isinstance(sources, list): options[CONF_SOURCES] = sources.split(",") hass.config_entries.async_update_entry(config_entry, options=options)
79
__init__.py
Python
homeassistant/components/webostv/__init__.py
dee843bf6e5ca84a94f336a239f6a6138c4c28e6
core
4
131,545
6
8
21
36
6
2
7
17
test_list_named_actors_detached
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def test_list_named_actors_detached(ray_start_regular): address = ray_start_regular["address"] driver_script =
driver_script = """ import ray ray.init(address="{}", namespace="default_test_namespace")@ray.remote
36
test_list_actors.py
Python
python/ray/tests/test_list_actors.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
95,916
8
6
5
22
3
0
8
22
analytics_instance
ref(notifications): adds analytics instance (#31441) This PR adds an optional instance for notification analytics corresponding to the signature here: https://github.com/getsentry/sentry/blob/master/src/sentry/analytics/base.py#L20 The idea is instead of the caller function decomposing an object (say a subscription) into all the fields we want, we pass in the instance to analytics and let it pull the fields from that instance.
https://github.com/getsentry/sentry.git
def analytics_instance(self) -> Any | None: return None
12
base.py
Python
src/sentry/notifications/notifications/base.py
00652b036135d57ffb4b312ba4bf6f74f25e094b
sentry
1
313,444
19
13
9
141
15
0
23
70
test_hidden_by_str_not_allowed
Enforce RegistryEntryHider in entity registry (#73219)
https://github.com/home-assistant/core.git
async def test_hidden_by_str_not_allowed(hass): reg = er.async_get(hass) with pytest.raises(ValueError): reg.async_get_or_create( "light", "hue", "1234", hidden_by=er.RegistryEntryHider.USER.value ) entity_id = reg.async_get_or_create("light", "hue", "1234").entity_id with pytest.raises(ValueError): reg.async_update_entity(entity_id, hidden_by=er.RegistryEntryHider.USER.value)
80
test_entity_registry.py
Python
tests/helpers/test_entity_registry.py
4435c641decd0269e03ba752c35e0aca468c1ab3
core
1
300,598
9
12
3
49
7
0
9
34
async_stop_cover
Support this variable in template cover actions (#71793)
https://github.com/home-assistant/core.git
async def async_stop_cover(self, **kwargs): if self._stop_script: await self.async_run_script(self._stop_script, context=self._context)
29
cover.py
Python
homeassistant/components/template/cover.py
83080dbba8c87f43871d1c8f77166588c56f0663
core
2
10,924
44
10
33
141
16
0
53
182
mixin_distributed_feature_parser
refactor: rename pod to deployment (#4230) * refactor: rename pod to deployment * style: fix overload and cli autocomplete * fix: undo daemon mistake * refactor: leftover cleanup * fix: more test fixes * fix: more fixes * fix: more fixes * fix: more fixes * fix: more tests * fix: fix more tests * refactor: fix more tests * refactor: more tests fixes * refactor: rename pea to pod * refactor: adjust docs * refactor: complete pea renaming * refactor: more fixes * fix: pea_type in k8s yamls * fix: adjust pod args name * refactor: rename peapods parser folder * fix: da init Co-authored-by: Jina Dev Bot <dev-bot@jina.ai>
https://github.com/jina-ai/jina.git
def mixin_distributed_feature_parser(parser): gp = add_arg_group(parser, title='Distributed') gp.add_argument( '--quiet-remote-logs', action='store_true', default=False, help='Do not display the streaming of remote logs on local console', ) gp.add_argument( '--upload-files', type=str, nargs='*', metavar='FILE', help=, ) gp.add_argument( '--disable-remote', action='store_true', default=False, help='If set, remote pod invocation is avoided. This is used by pods created by JinaD' if _SHOW_ALL_ARGS else argparse.SUPPRESS, )
83
distributed.py
Python
jina/parsers/orchestrate/runtimes/distributed.py
13edc16d806fb5d77a6849551178ccc75937f25f
jina
2
132,217
17
11
6
83
8
0
18
44
setup_process_group
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def setup_process_group(worker_addresses, index): tf_config = { "cluster": {"worker": worker_addresses}, "task": {"type": "worker", "index": index}, } os.environ["TF_CONFIG"] = json.dumps(tf_config)
45
tensorflow.py
Python
python/ray/tune/integration/tensorflow.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
64,405
33
14
16
164
14
0
40
24
update_bin_on_delete
fix: avoid creating bins without item-wh Co-Authored-By: Shadrak Gurupnor <30501401+shadrak98@users.noreply.github.com> Co-Authored-By: Saurabh <saurabh6790@gmail.com>
https://github.com/frappe/erpnext.git
def update_bin_on_delete(row, doctype): from erpnext.stock.stock_balance import ( get_indented_qty, get_ordered_qty, get_reserved_qty, update_bin_qty, ) qty_dict = {} if doctype == "Sales Order": qty_dict["reserved_qty"] = get_reserved_qty(row.item_code, row.warehouse) else: if row.material_request_item: qty_dict["indented_qty"] = get_indented_qty(row.item_code, row.warehouse) qty_dict["ordered_qty"] = get_ordered_qty(row.item_code, row.warehouse) if row.warehouse: update_bin_qty(row.item_code, row.warehouse, qty_dict)
103
accounts_controller.py
Python
erpnext/controllers/accounts_controller.py
c36bd7e1a6fe48c5fff4765e843571a0d6560dd1
erpnext
4
26,686
12
9
4
64
8
1
13
30
test_payment_refund_or_void_no_payment
Fix payment flow (#9504) * Do not capture payment again when it should be refunded or voided * Do not create order when then is ongoing refund
https://github.com/saleor/saleor.git
def test_payment_refund_or_void_no_payment(refund_mock, void_mock): # when gateway.payment_refund_or_void(None, get_plugins_manager(), None) # then refund_mock.assert_not_called() void_mock.assert_not_called() @patch("saleor.payment.gateway.refund")
@patch("saleor.payment.gateway.refund")
30
test_gateway.py
Python
saleor/payment/tests/test_gateway.py
0881beec1ac02dfa97525c5173687defb356d85c
saleor
1
100,933
61
16
31
387
41
0
82
587
_multi_option_control
Core updates - Change loss loading mechanism - Autosize tooltips based on content size - Random linting + code modernisation
https://github.com/deepfakes/faceswap.git
def _multi_option_control(self, option_type): logger.debug("Adding %s group: %s", option_type, self.option.name) help_intro, help_items = self._get_multi_help_items(self.option.helptext) ctl = ttk.LabelFrame(self.frame, text=self.option.title, name=f"{option_type}_labelframe", style=f"{self._style}Group.TLabelframe") holder = AutoFillContainer(ctl, self.option_columns, self.option_columns, style=f"{self._style}Group.") for choice in self.option.choices: if option_type == "radio": ctl = ttk.Radiobutton style = f"{self._style}Group.TRadiobutton" else: ctl = MultiOption style = f"{self._style}Group.TCheckbutton" ctl = ctl(holder.subframe, text=choice.replace("_", " ").title(), value=choice, variable=self.option.tk_var, style=style) if choice.lower() in help_items: self.helpset = True helptext = help_items[choice.lower()] helptext = f"{helptext}\n\n - {help_intro}" _get_tooltip(ctl, text=helptext) ctl.pack(anchor=tk.W, fill=tk.X) logger.debug("Added %s option %s", option_type, choice) return holder.parent
220
control_helper.py
Python
lib/gui/control_helper.py
bad5025aea1adb9126580e14e064e6c99089243d
faceswap
4
247,267
70
17
29
347
19
0
89
400
test_include_context
Add type hints to `tests/rest/client` (#12108) * Add type hints to `tests/rest/client` * newsfile * fix imports * add `test_account.py` * Remove one type hint in `test_report_event.py` * change `on_create_room` to `async` * update new functions in `test_third_party_rules.py` * Add `test_filter.py` * add `test_rooms.py` * change to `assertEquals` to `assertEqual` * lint
https://github.com/matrix-org/synapse.git
def test_include_context(self) -> None: # The other user sends some messages self.helper.send(self.room, body="Hi!", tok=self.other_access_token) self.helper.send(self.room, body="There!", tok=self.other_access_token) channel = self.make_request( "POST", "/search?access_token=%s" % (self.access_token,), { "search_categories": { "room_events": { "keys": ["content.body"], "search_term": "Hi", "event_context": {"include_profile": True}, } } }, ) # Check we get the results we expect -- one search result, of the sent # messages self.assertEqual(channel.code, 200) results = channel.json_body["search_categories"]["room_events"] self.assertEqual(results["count"], 1) self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") # We should get context info, like the two users, and the display names. context = results["results"][0]["context"] self.assertEqual(len(context["profile_info"].keys()), 2) self.assertEqual( context["profile_info"][self.other_user_id]["displayname"], "otheruser" )
199
test_rooms.py
Python
tests/rest/client/test_rooms.py
2ffaf30803f93273a4d8a65c9e6c3110c8433488
synapse
1
308,633
33
14
23
171
16
0
40
236
as_dict
Add strict typing to `core.py` (2) - State (#63240)
https://github.com/home-assistant/core.git
def as_dict(self) -> dict[str, Collection[Any]]: if not self._as_dict: last_changed_isoformat = self.last_changed.isoformat() if self.last_changed == self.last_updated: last_updated_isoformat = last_changed_isoformat else: last_updated_isoformat = self.last_updated.isoformat() self._as_dict = { "entity_id": self.entity_id, "state": self.state, "attributes": dict(self.attributes), "last_changed": last_changed_isoformat, "last_updated": last_updated_isoformat, "context": self.context.as_dict(), } return self._as_dict
103
core.py
Python
homeassistant/core.py
3a32fe9a344f831029e5ab9c6237b44cdad6d3af
core
3
130,955
66
9
17
191
15
1
104
189
test_replicas_delayed_startup
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def test_replicas_delayed_startup(): config = AutoscalingConfig( min_replicas=1, max_replicas=200, target_num_ongoing_requests_per_replica=1, upscale_delay_s=0, downscale_delay_s=100000, ) policy = BasicAutoscalingPolicy(config) new_num_replicas = policy.get_decision_num_replicas([100], 1) assert new_num_replicas == 100 # New target is 100, but no new replicas finished spinning up during this # timestep. new_num_replicas = policy.get_decision_num_replicas([100], 100) assert new_num_replicas == 100 # Two new replicas spun up during this timestep. new_num_replicas = policy.get_decision_num_replicas([100, 20, 3], 100) assert new_num_replicas == 123 # A lot of queries got drained and a lot of replicas started up, but # new_num_replicas should not decrease, because of the downscale delay. new_num_replicas = policy.get_decision_num_replicas([6, 2, 1, 1], 123) assert new_num_replicas == 123 @pytest.mark.parametrize("delay_s", [30.0, 0.0])
@pytest.mark.parametrize("delay_s", [30.0, 0.0])
110
test_autoscaling_policy.py
Python
python/ray/serve/tests/test_autoscaling_policy.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
272,207
82
13
28
288
42
1
110
284
_train_with_recompute
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _train_with_recompute(n_steps): img_dim, n_channels, batch_size = 256, 1, 4 x, y = _get_dummy_data(img_dim, n_channels, batch_size) # This model is the same model as _get_big_cnn_model but split into 3 parts. models = _get_split_cnn_model( img_dim, n_channels, num_partitions=3, blocks_per_partition=2 ) model1, model2, model3 = models # Apply gradient checkpointing to the submodels using tf.recompute_grad. model1_re = tf.recompute_grad(model1) model2_re = tf.recompute_grad(model2) model3_re = tf.recompute_grad(model3) optimizer = optimizers.SGD() tr_vars = ( model1.trainable_variables + model2.trainable_variables + model3.trainable_variables ) losses = [] for _ in range(n_steps): with tf.GradientTape() as tape: logits1 = model1_re(x) logits2 = model2_re(logits1) logits3 = model3_re(logits2) loss = _compute_loss(logits3, y) losses.append(loss) grads = tape.gradient(loss, tr_vars) # tr_vars optimizer.apply_gradients(zip(grads, tr_vars)) del grads return losses @tf_test_utils.with_eager_op_as_function
@tf_test_utils.with_eager_op_as_function
176
gradient_checkpoint_test.py
Python
keras/integration_test/gradient_checkpoint_test.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
153,774
31
12
8
71
10
0
31
108
update_df
FEAT-#4412: Add Batch Pipeline API to Modin (#4452) Co-authored-by: Yaroslav Igoshev <Poolliver868@mail.ru> Co-authored-by: Mahesh Vashishtha <mvashishtha@users.noreply.github.com> Signed-off-by: Rehan Durrani <rehan@ponder.io>
https://github.com/modin-project/modin.git
def update_df(self, df): if get_current_execution() != "PandasOnRay" or ( not isinstance(df._query_compiler._modin_frame, PandasOnRayDataframe) ): # pragma: no cover ErrorMessage.not_implemented( "Batch Pipeline API is only implemented for `PandasOnRay` execution." ) self.df = df
40
pipeline.py
Python
modin/experimental/batch/pipeline.py
3d4404e9d9a9b2a3327f8aee664a8e71ac1f18b8
modin
3
268,865
14
11
6
65
10
0
15
27
find_modules
Add a keras doctest modeled on tensorflow doctest PiperOrigin-RevId: 424672415
https://github.com/keras-team/keras.git
def find_modules(): tf_modules = [] for name, module in sys.modules.items(): if name.startswith(PACKAGE): tf_modules.append(module) return tf_modules
38
keras_doctest.py
Python
keras/tests/keras_doctest.py
a449efe29b092e658a29cd847e0494979a47d252
keras
3
304,020
12
9
5
45
10
0
12
44
async_all_discovered_devices
Rework bluetooth to support scans from multiple sources (#76900)
https://github.com/home-assistant/core.git
def async_all_discovered_devices(self) -> Iterable[BLEDevice]: return itertools.chain.from_iterable( scanner.discovered_devices for scanner in self._scanners )
28
manager.py
Python
homeassistant/components/bluetooth/manager.py
3bcc274dfa90d7d3c01ace83137c46a0898c107f
core
2
210,799
72
19
44
581
33
0
189
736
__call__
Scale frames before fight action recognition (#6170) * Scale frames before fight action recognition * put short_size = self.cfg["VIDEO_ACTION"]["short_size"] scale = Scale(short_size) out of while * change class name from Scale to ShortSizeScale
https://github.com/PaddlePaddle/PaddleDetection.git
def __call__(self, img): result_img = None if isinstance(img, np.ndarray): h, w, _ = img.shape elif isinstance(img, Image.Image): w, h = img.size else: raise NotImplementedError if w <= h: ow = self.short_size if self.fixed_ratio: # default is True oh = int(self.short_size * 4.0 / 3.0) elif not self.keep_ratio: # no oh = self.short_size else: scale_factor = self.short_size / w oh = int(h * float(scale_factor) + 0.5) if self.do_round else int(h * self.short_size / w) ow = int(w * float(scale_factor) + 0.5) if self.do_round else int(w * self.short_size / h) else: oh = self.short_size if self.fixed_ratio: ow = int(self.short_size * 4.0 / 3.0) elif not self.keep_ratio: # no ow = self.short_size else: scale_factor = self.short_size / h oh = int(h * float(scale_factor) + 0.5) if self.do_round else int(h * self.short_size / w) ow = int(w * float(scale_factor) + 0.5) if self.do_round else int(w * self.short_size / h) if type(img) == np.ndarray: img = Image.fromarray(img, mode='RGB') if self.backend == 'pillow': result_img = img.resize((ow, oh), Image.BILINEAR) elif self.backend == 'cv2' and (self.keep_ratio is not None): result_img = cv2.resize( img, (ow, oh), interpolation=cv2.INTER_LINEAR) else: result_img = Image.fromarray( cv2.resize( np.asarray(img), (ow, oh), interpolation=cv2.INTER_LINEAR)) return result_img
385
preprocess.py
Python
deploy/python/preprocess.py
1c4da10b6c836f7f0b74c0847afcbca6d0c3ef30
PaddleDetection
16
281,151
46
16
30
256
28
0
71
342
futures_command
Bot logging fix (#1105) * Write bot logs to stdout instead of a file Heroku's logging uses the stdout and has problems with files * Send "you snooze you lose" only if debug flag is enabled * Replace print statements with logger entries in the economy menu * Add logging to bot menu command calls * Silence bandit warnings about the REPLACE_ME token * Organize imports and update logging in economy menu * Organize imports and update logging in dps menu * Organize imports and update logging in dd menu * Organize imports and update logging in gov menu * Organize imports and update logging in options menu * Organize imports and update logging in screener menu * Organize imports and update logging in ta menu * Revert automatic import sorting * Add logging to the options reaction helper
https://github.com/OpenBB-finance/OpenBBTerminal.git
async def futures_command(ctx): try: # Retrieve data df_data = wsj_model.top_commodities() # Debug user output if cfg.DEBUG: logger.debug(df_data.to_string()) # Output data if df_data.empty: df_data_str = "No futures/commodities data available" else: df_data_str = "```" + df_data.to_string(index=False) + "```" embed = discord.Embed( title="Economy: [WSJ] Futures/Commodities", description=df_data_str, colour=cfg.COLOR, ) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) await ctx.send(embed=embed) except Exception as e: embed = discord.Embed( title="ERROR Economy: [WSJ] Futures/Commodities", colour=cfg.COLOR, description=e, ) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) await ctx.send(embed=embed)
153
futures.py
Python
discordbot/economy/futures.py
f40ba0d256a78ab2b8461f0df3a9a52ca7dc5704
OpenBBTerminal
4
42,069
8
8
3
46
8
0
8
17
set_style
Convert docs to pydata-sphinx-theme and add new material (#2842) * Do basic conversion of site to pydata_sphinx_theme * Remove some pae structure customizations we no longer need * Add some custom CSS * Tweak a few more colors * Remove vestigial div closing tag * Reorganize release notes into hierarchical pages * Rebuild full docs and fix some resulting issues * Make release note doc refs absolute * Convert homepage to use sphinx-design instead of hand-crafted html * Remove original custom css * Simplify header and put archive switcher in footer * Streamline API docs for objects * Play around with templates to fix shrinking content (not perfect yet) * Improve use of horizontal space without sidebars * Various tweaks * Convert tutorial homepage source to native sphinx-design directives * Move intro page into tutorial * More tweaks * Tweak theme colors and footer * Remove reference to navbar version * Note that error bar tutorial demonstrates new features as of v0.12 * Update layout customization for new theme features * Various layout and CSS tweaks * Narrow support guidance to StackOverflow * Run all notebooks * Adapt to new dropdown navbar in pydata theme * Separate tutorial source and outputs * Separate dostring source and outputs * Add scale API template * Update API docs * Fix requirements * Add new objects * Point doc requirements at v0.10 RC for theme
https://github.com/mwaskom/seaborn.git
def set_style(style=None, rc=None): style_object = axes_style(style, rc) mpl.rcParams.update(style_object)
28
rcmod.py
Python
seaborn/rcmod.py
34662f4be5c364e7518f9c1118c9b362038ee5dd
seaborn
1
177,163
22
12
5
80
10
0
27
73
compute_v_structures
[ENH] Find and verify a minimal D-separating set in DAG (#5898) * Ran black * Add unit tests * Rename and fix citation * Black * Fix unite tests * Isort * Add algo description * Update networkx/algorithms/tests/test_d_separation.py * Update networkx/algorithms/traversal/breadth_first_search.py * Address dans comments * Fix unit tests * Update networkx/algorithms/tests/test_d_separation.py Co-authored-by: Dan Schult <dschult@colgate.edu> * Apply suggestions from code review Co-authored-by: Dan Schult <dschult@colgate.edu> * Update networkx/algorithms/dag.py Co-authored-by: Dan Schult <dschult@colgate.edu> * Update networkx/algorithms/dag.py Co-authored-by: Dan Schult <dschult@colgate.edu> * Fix comments * Clean up the docs a bit more * Merge Co-authored-by: Dan Schult <dschult@colgate.edu>
https://github.com/networkx/networkx.git
def compute_v_structures(G): for collider, preds in G.pred.items(): for common_parents in combinations(preds, r=2): # ensure that the colliders are the same common_parents = sorted(common_parents) yield (common_parents[0], collider, common_parents[1])
51
dag.py
Python
networkx/algorithms/dag.py
df9a128f4171d95671e5d9f5460970cc4bf8e3b3
networkx
3
288,780
11
9
3
39
7
0
11
25
assumed_state
Add support for Snooz BLE devices (#78790) Co-authored-by: J. Nick Koston <nick@koston.org>
https://github.com/home-assistant/core.git
def assumed_state(self) -> bool: return not self._device.is_connected or self._device.state is UnknownSnoozState
23
fan.py
Python
homeassistant/components/snooz/fan.py
7d097d18b0c6041475080b3c400e37b25185faba
core
2
292,454
6
9
3
36
5
0
6
20
udn
Add dlna_dms integration to support DLNA Digital Media Servers (#66437)
https://github.com/home-assistant/core.git
def udn(self) -> str: return self.usn.partition("::")[0]
20
dms.py
Python
homeassistant/components/dlna_dms/dms.py
b19bf9b147f4321e89d1f7f01e68337f2102f460
core
1
190,649
55
15
20
155
18
0
67
221
make_unique_parameterset_ids
Refactor idmaker functions into class IdMaker This commit only refactors, it does not change or add functionality yet. Public API is retained. Reason or refactoring: User provided parameter IDs (e.g. Metafunc.parametrize(ids=...)) had so far only been used to calculate a unique test ID for each test invocation. That test ID was a joined string where each parameter contributed some partial ID. We're soon going to reuse functionality to generate parameter keys for reorder_items and FixtureDef cache. We will be interested in the partial IDs, and only if they originate from explicit user information. Refactoring makes logic and data accessible for reuse, and increases cohesion in general.
https://github.com/pytest-dev/pytest.git
def make_unique_parameterset_ids(self) -> List[str]: resolved_ids = list(self._resolve_ids()) # All IDs must be unique! if len(resolved_ids) != len(set(resolved_ids)): # Record the number of occurrences of each ID. id_counts = Counter(resolved_ids) # Map the ID to its next suffix. id_suffixes: Dict[str, int] = defaultdict(int) # Suffix non-unique IDs to make them unique. for index, id in enumerate(resolved_ids): if id_counts[id] > 1: resolved_ids[index] = f"{id}{id_suffixes[id]}" id_suffixes[id] += 1 return resolved_ids
87
python.py
Python
src/_pytest/python.py
b21b008118fc8cf65b4bcd9b059f1cd704e05c68
pytest
4
127,599
22
12
8
90
9
0
23
95
get_next
[tune] Fix trial cleanup after x seconds, set default to 600 (#28449) This currently does not work in three places: 1) We need to kill the actor as garbage collection will not work with futures in flight, 2) We need to trigger the _stop_actor method after clearing the futures, as it will create a new future, 3) the future was not fetched correctly. We also set the default cleanup time to 10 minutes, which should suffice for most cases and avoids deadlocks in long-running tasks. Signed-off-by: Kai Fricke <kai@anyscale.com>
https://github.com/ray-project/ray.git
def get_next(self): if len(self._future_to_insert_time) > 0 and ( self._future_to_insert_time[0][1] + self._force_cleanup < time.time() ): future, _time = self._future_to_insert_time.popleft() return future else: return None
55
ray_trial_executor.py
Python
python/ray/tune/execution/ray_trial_executor.py
2d8ce2516fe13f47838358b336f3732ff2cd4fe7
ray
3
313,498
7
7
3
42
6
1
7
12
device_traits
Update more nest tests to use common fixtures (#73303) Update nest tests to use fixtures
https://github.com/home-assistant/core.git
def device_traits() -> list[str]: return ["sdm.devices.traits.DoorbellChime"] @pytest.fixture(autouse=True)
@pytest.fixture(autouse=True)
14
test_events.py
Python
tests/components/nest/test_events.py
7a5fa8eb58f49282e73f454826472ba54cd37a30
core
1
156,654
73
13
23
258
31
0
107
269
_split_partition
``shuffle_group()``: avoid converting to arrays (#9157)
https://github.com/dask/dask.git
def _split_partition(df, on, nsplits): if isinstance(on, bytes): on = pickle.loads(on) if isinstance(on, str) or pd.api.types.is_list_like(on): # If `on` is a column name or list of column names, we # can hash/split by those columns. on = [on] if isinstance(on, str) else list(on) nset = set(on) if nset.intersection(set(df.columns)) == nset: ind = hash_object_dispatch(df[on], index=False) ind = ind % nsplits return group_split_dispatch(df, ind, nsplits, ignore_index=False) # We are not joining (purely) on columns. Need to # add a "_partitions" column to perform the split. if not isinstance(on, _Frame): on = _select_columns_or_index(df, on) partitions = partitioning_index(on, nsplits) df2 = df.assign(_partitions=partitions) return shuffle_group( df2, ["_partitions"], 0, nsplits, nsplits, False, nsplits, )
169
multi.py
Python
dask/dataframe/multi.py
0ce193b782a4e8d0cc6f62099c2fe6b94bd10e16
dask
7
176,290
82
17
43
506
24
0
218
754
shortest_path_length
DOC: Update documentation to include callables for weight argument (#5307) Update docs to include functions as valid input for weight argument.
https://github.com/networkx/networkx.git
def shortest_path_length(G, source=None, target=None, weight=None, method="dijkstra"): if method not in ("dijkstra", "bellman-ford"): # so we don't need to check in each branch later raise ValueError(f"method not supported: {method}") method = "unweighted" if weight is None else method if source is None: if target is None: # Find paths between all pairs. if method == "unweighted": paths = nx.all_pairs_shortest_path_length(G) elif method == "dijkstra": paths = nx.all_pairs_dijkstra_path_length(G, weight=weight) else: # method == 'bellman-ford': paths = nx.all_pairs_bellman_ford_path_length(G, weight=weight) else: # Find paths from all nodes co-accessible to the target. if G.is_directed(): G = G.reverse(copy=False) if method == "unweighted": path_length = nx.single_source_shortest_path_length paths = path_length(G, target) elif method == "dijkstra": path_length = nx.single_source_dijkstra_path_length paths = path_length(G, target, weight=weight) else: # method == 'bellman-ford': path_length = nx.single_source_bellman_ford_path_length paths = path_length(G, target, weight=weight) else: if target is None: # Find paths to all nodes accessible from the source. if method == "unweighted": paths = nx.single_source_shortest_path_length(G, source) elif method == "dijkstra": path_length = nx.single_source_dijkstra_path_length paths = path_length(G, source, weight=weight) else: # method == 'bellman-ford': path_length = nx.single_source_bellman_ford_path_length paths = path_length(G, source, weight=weight) else: # Find shortest source-target path. if method == "unweighted": p = nx.bidirectional_shortest_path(G, source, target) paths = len(p) - 1 elif method == "dijkstra": paths = nx.dijkstra_path_length(G, source, target, weight) else: # method == 'bellman-ford': paths = nx.bellman_ford_path_length(G, source, target, weight) return paths
306
generic.py
Python
networkx/algorithms/shortest_paths/generic.py
b5d41847b8db0c82372faf69cd3a339d11da7ef0
networkx
15
22,218
40
15
19
238
25
0
56
225
get_abstract_dependencies
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
https://github.com/pypa/pipenv.git
def get_abstract_dependencies(reqs, sources=None, parent=None): deps = [] from .requirements import Requirement for req in reqs: if isinstance(req, shims.InstallRequirement): requirement = Requirement.from_line("{0}{1}".format(req.name, req.specifier)) if req.link: requirement.req.link = req.link requirement.markers = req.markers requirement.req.markers = req.markers requirement.extras = req.extras requirement.req.extras = req.extras elif isinstance(req, Requirement): requirement = copy.deepcopy(req) else: requirement = Requirement.from_line(req) dep = AbstractDependency.from_requirement(requirement, parent=parent) deps.append(dep) return deps
149
dependencies.py
Python
pipenv/vendor/requirementslib/models/dependencies.py
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
5
91,938
8
9
3
47
9
0
8
29
get_sendable_user_objects
fix(notifications): fix bug where we considered users with only Slack enabled (#35901) fix bug where we considered users with only Slack enabled
https://github.com/getsentry/sentry.git
def get_sendable_user_objects(project): recipients_by_provider = NotificationSetting.objects.get_notification_recipients(project) return recipients_by_provider.get(ExternalProviders.EMAIL, [])
28
adapter.py
Python
src/sentry/mail/adapter.py
fa384296a65e6c7271fbac1bd425cf1393ef6636
sentry
1
156,025
12
8
3
58
8
0
14
35
argtopk
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
https://github.com/dask/dask.git
def argtopk(self, k, axis=-1, split_every=None): from dask.array.reductions import argtopk return argtopk(self, k, axis=axis, split_every=split_every)
40
core.py
Python
dask/array/core.py
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
1
156,014
11
8
3
52
8
0
14
35
to_tiledb
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
https://github.com/dask/dask.git
def to_tiledb(self, uri, *args, **kwargs): from dask.array.tiledb_io import to_tiledb return to_tiledb(self, uri, *args, **kwargs)
35
core.py
Python
dask/array/core.py
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
1
126,131
2
6
59
13
2
0
2
5
test_cluster_crash_before_checkpoint
[workflow] http_event_provider and accompanied listener (#26010) ### Why are these changes needed? This PR enhances workflow functionality to receive external events from a Serve based HTTP endpoint. A workflow can then consume events asynchronously as they arrive. ### Design Logic A `workflow.wait_for_event` node subscribes to the endpoint instantiated by a Ray Serve deployment of class `http_event_provider.HTTPEventProvider`. The subscription is made through a helper class `http_event_provider.HTTPListener`. `HTTPListener` implements the methods of `EventListener` to poll from and confirm event checkpointing to `HTTPEventProvider`, before `HTTPEventProvider`acknowledges success or error to the event submitter. ### Architecture Improvement The logic of this enhancement conforms with existing workflow runtime design.
https://github.com/ray-project/ray.git
def test_cluster_crash_before_checkpoint(workflow_start_regular_shared_serve):
314
test_event_resume_after_crash.py
Python
python/ray/workflow/tests/test_event_resume_after_crash.py
659d25a3a9c4794db9dbe8f428ec587470b261b0
ray
11
36,160
11
11
5
49
5
0
12
35
require_scipy
Visual Attention Network (VAN) (#16027) * encoder works * addded files * norm in stage * convertion script * tests * fix copies * make fix-copies * fixed __init__ * make fix-copies * fix * shapiro test needed * make fix-copie * minor changes * make style + quality * minor refactor conversion script * rebase + tests * removed unused variables * updated doc * toctree * CI * doc * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * resolved conversations * make fixup * config passed to modules * config passed to modules * Apply suggestions from code review Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * conversations * conversations * copyrights * normal test * tests Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>
https://github.com/huggingface/transformers.git
def require_scipy(test_case): if not is_scipy_available(): return unittest.skip("test requires Scipy")(test_case) else: return test_case
26
testing_utils.py
Python
src/transformers/testing_utils.py
0a057201a96565df29984d716f660fd8d634329a
transformers
2
31,146
12
15
2
54
4
0
20
26
bloom_gelu_forward
BLOOM (#17474) * adding template * update model * model update * update conf for debug model * update conversion * update conversion script * update conversion script * fix missing keys check * add tests to test the tokenizer in the local machine * Change variable name * add tests on xnli dataset * add more description * add descriptions + clearer code * clearer code * adding new tests + skipping few tests because of env problems * change comment * add dtype on the configuration * add test embeddings * add hardcoded test * fix dtype issue * adding torch.float16 to config * adding more metrics (min, max, mean) * add sum * now the test passes with almost equal * add files for conversion - test passes on cpu gpu * add final changes * cleaning code * add new args in the docstring * fix one liner function * remove macros * remove forward attention * clean up init funtion * add comments on the issue * rm scale mask softmax * do make style * fix dtype in init * fixing for loop on att probs * fix style with black * fix style + doc error * fix and debug CI errors (docs + style) * some updates - change new operations - finally add scaled softmax - added new args in the config * make use cache working * add changes - save sharded models - final changes on the modeling script * add changes - comment on alibi - add TODO on seq length * test commit - added a text to test the commit Co-authored-by: thomasw21 <24695242+thomasw21@users.noreply.github.com> * final changes - attention mask change - generation works on BS176b Co-authored-by: thomasw21 <24695242+thomasw21@users.noreply.github.com> * changes - model + conversion * move to correct dir * put , * fex fixes * fix tokenizer autodoc * fix minor CI issues * fix minor CI issues * fix minor CI issues * fix style issue * fix minor import issues * fix few issues * remove def main on the test * add require torch * replace decorator with 'with' * fix style * change to bloom * add quick fix tokenizer * fix tokenizer file * fix tokenizer - merge tests - small fixes * fix import issue * add bloom to readme * fix consistency * Update docs/source/en/model_doc/bloom.mdx Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Apply suggestions from code review fix comment issues on file headers Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * fix doc issue * small fix - modeling test * some changes - refactor some code - taking into account reviews - more tests should pass - removed pruning tests * remove useless division * more tests should pass * more tests should pass * more tests should pass * let's try this one -add alibi offset - remove all permutes to make the grad operations work - finger crossed * refactor - refactor code - style changes - add new threshold for test * major changes - change BLOOM to Bloom - add quick doc on bloom.mdx - move embeddings test on modeling test * modify readme * small fixes * small fix - better threshold for a test * remove old test file from fetcher * fix small typo * major change - change BloomLMHead to BloomForCausalLM * remove onnx config * major changes - refactor the code - remove asserts - change tol for test * make style * small change * adding a slow test + commenting old ones for now * make style * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * make style * fix duplicates * cleaning comments on config * clean a bit conversion file * refacor a bit modeling file * refactor tokenizer file * fix tokenization test issue * fix tokenization issue #2 * fix tokenization issue second try * fix test issue * make style + add suggestions * change test fetcher * try this one - slow tests should pass - finger crossed * possible final changes * make style * try fix padding side issue * fix side * fix padding issue * fix ko-readme * fix config auto * cleaning modeling file * keep bloom in caps in ko * update config docs * remove pretraining_pp * remove model parallel * update config - add correct config files * fix duplicates * fix fetcher * fix refactor issue - remove divide function * try to remove alibi * small fixes - fix alibi - remove seq length - refactor a bit the code * put correct values - fix bos and eos token ids * fix attention mask loop Co-authored-by: thomasw21 <24695242+thomasw21@users.noreply.github.com> * small fixes: - remove skip bias add * small fixes - fix typo in readme - fix typos in config * small changes - remove a test - add reconstruction test - change config * small changes - change Scaled Softmax to BloomScaledSoftmax * small fixes - fix alibi dtype * major changes - removing explicit dtype when loading modules - fixing test args (torch_dtype=auto) - add dosctring * fix readmes * major changes - now bloom supports alibi shifting - refactor a bit the code - better test tolerance now * refactor a bit * refactor a bit * put correct name on test * change docstring * small changes - fix docstring modeling - fix test tolerance * fix small nit - take dtype from tensors in the conversion script * minor fix - fix mdx issue * minor fix - change config docstring * forward contrib credits from PR14084 * Apply suggestions from code review Co-authored-by: Stas Bekman <stas00@users.noreply.github.com> * apply modifications Co-authored-by: Stas Bekman <stas00@users.noreply.github.com> * resolve softmax upcast * Apply suggestions from code review Co-authored-by: Stas Bekman <stas00@users.noreply.github.com> * Update src/transformers/models/bloom/modeling_bloom.py Co-authored-by: Niklas Muennighoff <n.muennighoff@gmail.com> * final changes modeling Co-authored-by: Stas Bekman <stas00@users.noreply.github.com> * Merge commit 'd156898f3b9b2c990e5963f5030a7143d57921a2' * merge commit * Apply suggestions from code review Co-authored-by: Stas Bekman <stas00@users.noreply.github.com> * apply suggestions Apply suggestions from Stas comments Co-authored-by: Stas Bekman <stas00@users.noreply.github.com> * Fix gradient checkpointing Co-authored-by: Stas Bekman <stas00@users.noreply.github.com> * add slow but exact * add accelerate compatibility Co-authored-by: Nicolas Patry <Narsil@users.noreply.github.com> * forward contrib credits Co-authored-by: thomasw21 <thomasw21@users.noreply.github.com> Co-authored-by: sgugger <sgugger@users.noreply.github.com> Co-authored-by: patrickvonplaten <patrickvonplaten@users.noreply.github.com> Co-authored-by: Niklas Muennighoff <n.muennighoff@gmail.com> Co-authored-by: LysandreJik <LysandreJik@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * fix torch device on tests * make style * Apply suggestions from code review Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * fix nits Co-authored-by: patrickvonplaten<patrickvonplaten@users.noreply.github.com> * remove final nits * fix doc - add more details on the doc - add links to checkpoints * Update src/transformers/__init__.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/bloom/modeling_bloom.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * apply suggestions Co-authored-by: sgugger <sgugger@users.noreply.github.com> * put test torchscript to false * Update src/transformers/models/bloom/modeling_bloom.py Co-authored-by: justheuristic <justheuristic@gmail.com> * fix alibi - create alibi only once * add small doc * make quality * replace torch.nn * remove token type emb * fix fused op + output bias * add fused op - now can control fused operation from config * remove fused op * make quality * small changes - remove unsed args on config - removed bias gelu file - make the model torchscriptable - add torchscript slow tests * Update src/transformers/models/bloom/modeling_bloom.py * fix slow * make style * add accelerate support * add bloom to deepspeed tests * minor changes * Apply suggestions from code review Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * minor change * slow tests pass * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update docs/source/en/model_doc/bloom.mdx Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * minor changes: - change docstring - add link to paper Co-authored-by: Thomwolf <thomwolf@gmail.com> Co-authored-by: Thomas Wolf <thomas@huggingface.co> Co-authored-by: thomasw21 <24695242+thomasw21@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: sIncerass <sheng.s@berkeley.edu> Co-authored-by: Stas Bekman <stas00@users.noreply.github.com> Co-authored-by: Niklas Muennighoff <n.muennighoff@gmail.com> Co-authored-by: Nicolas Patry <Narsil@users.noreply.github.com> Co-authored-by: thomasw21 <thomasw21@users.noreply.github.com> Co-authored-by: sgugger <sgugger@users.noreply.github.com> Co-authored-by: patrickvonplaten <patrickvonplaten@users.noreply.github.com> Co-authored-by: LysandreJik <LysandreJik@users.noreply.github.com> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: justheuristic <justheuristic@gmail.com> Co-authored-by: Stas Bekman <stas@stason.org>
https://github.com/huggingface/transformers.git
def bloom_gelu_forward(x): return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
41
modeling_bloom.py
Python
src/transformers/models/bloom/modeling_bloom.py
ca2a55e9dfb245527b5e1c954fec6ffbb7aef07b
transformers
1
211,431
29
14
7
158
16
1
45
68
weighted_mpjpe
pose3d metro modeling (#6612) * pose3d metro modeling * delete extra comments
https://github.com/PaddlePaddle/PaddleDetection.git
def weighted_mpjpe(pred, gt, has_3d_joints): pred, gt = filter_3d_joints(pred, gt, has_3d_joints) weight = paddle.linalg.norm(pred, p=2, axis=-1) weight = paddle.to_tensor( [1.5, 1.3, 1.2, 1.2, 1.3, 1.5, 1.5, 1.3, 1.2, 1.2, 1.3, 1.5, 1., 1.]) error = (weight * paddle.linalg.norm(pred - gt, p=2, axis=-1)).mean() return error @register @serializable
@register @serializable
134
pose3d_loss.py
Python
ppdet/modeling/losses/pose3d_loss.py
d4e34fe165c09db65fd00113708be1b711ac957c
PaddleDetection
1
50,990
9
8
2
41
5
0
9
26
__call__
update hand_pose_localization (#1967) * update hand_pose_localization * add clean func
https://github.com/PaddlePaddle/PaddleHub.git
def __call__(self, *input_datas, batch_size=1): return self.forward(*input_datas, batch_size=batch_size) # 模型参数加载函数
25
model.py
Python
modules/image/keypoint_detection/hand_pose_localization/model.py
6b42963d62833925ffed1cdb73400e7d528a5353
PaddleHub
1
280,288
13
8
8
59
8
0
13
89
save
Prepare public API surface for v3 saving. PiperOrigin-RevId: 484397600
https://github.com/keras-team/keras.git
def save(self, filepath, overwrite=True, save_format=None, **kwargs): saving_api.save_model( self, filepath=filepath, overwrite=overwrite, save_format=save_format, **kwargs, )
41
training.py
Python
keras/engine/training.py
c9068087d9142bab573e0c300bf9874a957accff
keras
1
250,019
12
12
7
65
7
0
12
33
test_prefilled_cache
Add missing types to tests.util. (#14597) Removes files under tests.util from the ignored by list, then fully types all tests/util/*.py files.
https://github.com/matrix-org/synapse.git
def test_prefilled_cache(self) -> None: cache = StreamChangeCache("#test", 1, prefilled_cache={"user@foo.com": 2}) self.assertTrue(cache.has_entity_changed("user@foo.com", 1))
37
test_stream_change_cache.py
Python
tests/util/test_stream_change_cache.py
acea4d7a2ff61b5beda420b54a8451088060a8cd
synapse
1
273,008
44
16
29
332
21
1
84
410
get_rotation_matrix
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def get_rotation_matrix(angles, image_height, image_width, name=None): with backend.name_scope(name or "rotation_matrix"): x_offset = ( (image_width - 1) - ( tf.cos(angles) * (image_width - 1) - tf.sin(angles) * (image_height - 1) ) ) / 2.0 y_offset = ( (image_height - 1) - ( tf.sin(angles) * (image_width - 1) + tf.cos(angles) * (image_height - 1) ) ) / 2.0 num_angles = tf.shape(angles)[0] return tf.concat( values=[ tf.cos(angles)[:, None], -tf.sin(angles)[:, None], x_offset[:, None], tf.sin(angles)[:, None], tf.cos(angles)[:, None], y_offset[:, None], tf.zeros((num_angles, 2), tf.float32), ], axis=1, ) @keras_export( "keras.layers.RandomRotation", "keras.layers.experimental.preprocessing.RandomRotation", v1=[], )
@keras_export( "keras.layers.RandomRotation", "keras.layers.experimental.preprocessing.RandomRotation", v1=[], )
210
image_preprocessing.py
Python
keras/layers/preprocessing/image_preprocessing.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
82,400
64
14
29
534
47
0
84
400
test_copy_langs_no_content
ci: Added codespell (#7355) Co-authored-by: Christian Clauss <cclauss@me.com> * ci: codespell config taken from #7292
https://github.com/django-cms/django-cms.git
def test_copy_langs_no_content(self): site = 1 number_start_plugins = CMSPlugin.objects.all().count() out = io.StringIO() management.call_command( 'cms', 'copy', 'lang', '--from-lang=en', '--to-lang=de', '--skip-content', interactive=False, stdout=out ) pages = Page.objects.on_site(site).drafts() for page in pages: self.assertEqual({'en', 'de'}, set(page.get_languages())) # These asserts that no orphaned plugin exists self.assertEqual(CMSPlugin.objects.all().count(), number_start_plugins) self.assertEqual(CMSPlugin.objects.filter(language='en').count(), number_start_plugins) self.assertEqual(CMSPlugin.objects.filter(language='de').count(), 0) root_page = Page.objects.get_home(site) root_plugins = CMSPlugin.objects.filter( placeholder=root_page.placeholders.get(slot="body")) first_plugin_en, _ = root_plugins.get(language='en', parent=None).get_plugin_instance() first_plugin_de = None with self.assertRaises(CMSPlugin.DoesNotExist): first_plugin_de, _ = root_plugins.get(language='de', parent=None).get_plugin_instance() self.assertIsNone(first_plugin_de) stack_plugins = CMSPlugin.objects.filter( placeholder=StaticPlaceholder.objects.order_by('?')[0].draft) stack_text_en, _ = stack_plugins.get(language='en', plugin_type='TextPlugin').get_plugin_instance() with self.assertRaises(CMSPlugin.DoesNotExist): stack_text_de, _ = stack_plugins.get(language='de', plugin_type='TextPlugin').get_plugin_instance()
315
test_management.py
Python
cms/tests/test_management.py
c1290c9ff89cb00caa5469129fd527e9d82cd820
django-cms
2
336,642
52
16
15
338
22
0
98
227
set_sigmas
[docs sprint] schedulers docs, will update (#376) * init schedulers docs * add some docstrings, fix sidebar formatting * add docstrings * [Type hint] PNDM schedulers (#335) * [Type hint] PNDM Schedulers * ran make style * updated timesteps type hint * apply suggestions from code review * ran make style * removed unused import * [Type hint] scheduling ddim (#343) * [Type hint] scheduling ddim * apply suggestions from code review apply suggestions to also return the return type Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * make style * update class docstrings * add docstrings * missed merge edit * add general docs page * modify headings for right sidebar Co-authored-by: Partho <parthodas6176@gmail.com> Co-authored-by: Santiago Víquez <santi.viquez@gmail.com> Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
https://github.com/huggingface/diffusers.git
def set_sigmas(self, num_inference_steps, sigma_min=None, sigma_max=None, sampling_eps=None): sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(num_inference_steps, sampling_eps) tensor_format = getattr(self, "tensor_format", "pt") if tensor_format == "np": self.discrete_sigmas = np.exp(np.linspace(np.log(sigma_min), np.log(sigma_max), num_inference_steps)) self.sigmas = np.array([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) elif tensor_format == "pt": self.discrete_sigmas = torch.exp(torch.linspace(np.log(sigma_min), np.log(sigma_max), num_inference_steps)) self.sigmas = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) else: raise ValueError(f"`self.tensor_format`: {self.tensor_format} is not valid.")
215
scheduling_sde_ve.py
Python
src/diffusers/schedulers/scheduling_sde_ve.py
e6110f68569c7b620306e678c3a3d9eee1a293e2
diffusers
9
157,027
178
17
75
928
36
0
416
1,062
normalize_chunks
Remove factorization logic from array auto chunking (#9507) * Remove factorization logic from array auto chunking * Temporarily point to distributed PR * Tests * Typo * Fix doctest * Remove temporary changes
https://github.com/dask/dask.git
def normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None): if dtype and not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) if chunks is None: raise ValueError(CHUNKS_NONE_ERROR_MESSAGE) if isinstance(chunks, list): chunks = tuple(chunks) if isinstance(chunks, (Number, str)): chunks = (chunks,) * len(shape) if isinstance(chunks, dict): chunks = tuple(chunks.get(i, None) for i in range(len(shape))) if isinstance(chunks, np.ndarray): chunks = chunks.tolist() if not chunks and shape and all(s == 0 for s in shape): chunks = ((0,),) * len(shape) if ( shape and len(shape) == 1 and len(chunks) > 1 and all(isinstance(c, (Number, str)) for c in chunks) ): chunks = (chunks,) if shape and len(chunks) != len(shape): raise ValueError( "Chunks and shape must be of the same length/dimension. " "Got chunks=%s, shape=%s" % (chunks, shape) ) if -1 in chunks or None in chunks: chunks = tuple(s if c == -1 or c is None else c for c, s in zip(chunks, shape)) # If specifying chunk size in bytes, use that value to set the limit. # Verify there is only one consistent value of limit or chunk-bytes used. for c in chunks: if isinstance(c, str) and c != "auto": parsed = parse_bytes(c) if limit is None: limit = parsed elif parsed != limit: raise ValueError( "Only one consistent value of limit or chunk is allowed." "Used %s != %s" % (parsed, limit) ) # Substitute byte limits with 'auto' now that limit is set. chunks = tuple("auto" if isinstance(c, str) and c != "auto" else c for c in chunks) if any(c == "auto" for c in chunks): chunks = auto_chunks(chunks, shape, limit, dtype, previous_chunks) if shape is not None: chunks = tuple(c if c not in {None, -1} else s for c, s in zip(chunks, shape)) if chunks and shape is not None: chunks = sum( ( blockdims_from_blockshape((s,), (c,)) if not isinstance(c, (tuple, list)) else (c,) for s, c in zip(shape, chunks) ), (), ) for c in chunks: if not c: raise ValueError( "Empty tuples are not allowed in chunks. Express " "zero length dimensions with 0(s) in chunks" ) if shape is not None: if len(chunks) != len(shape): raise ValueError( "Input array has %d dimensions but the supplied " "chunks has only %d dimensions" % (len(shape), len(chunks)) ) if not all( c == s or (math.isnan(c) or math.isnan(s)) for c, s in zip(map(sum, chunks), shape) ): raise ValueError( "Chunks do not add up to shape. " "Got chunks=%s, shape=%s" % (chunks, shape) ) return tuple(tuple(int(x) if not math.isnan(x) else x for x in c) for c in chunks)
598
core.py
Python
dask/array/core.py
f382d9e8439f5ed200da6ce66df7b2b33a0fc500
dask
53
80,111
38
18
18
154
17
0
47
229
map_struct_block_value
Add StreamField migration helpers from https://github.com/sandilsranasinghe/wagtail-streamfield-migration-toolkit/
https://github.com/wagtail/wagtail.git
def map_struct_block_value(struct_block_value, block_def, block_path, **kwargs): mapped_value = {} for key, child_value in struct_block_value.items(): if not should_alter_block(key, block_path): mapped_value[key] = child_value else: try: child_block_def = block_def.child_blocks[key] except KeyError: raise InvalidBlockDefError("No current block def named {}".format(key)) altered_child_value = map_block_value( child_value, block_def=child_block_def, block_path=block_path[1:], **kwargs, ) mapped_value[key] = altered_child_value return mapped_value
98
utils.py
Python
wagtail/blocks/migrations/utils.py
ec6229c23600ebae8ec0d5db6846b095a9468151
wagtail
4
268,291
112
17
37
427
51
0
153
621
_load_included_file
Prevent double failing hosts for includes in loops (#76928) Fixes #23161
https://github.com/ansible/ansible.git
def _load_included_file(self, included_file, iterator, is_handler=False): display.debug("loading included file: %s" % included_file._filename) try: data = self._loader.load_from_file(included_file._filename) if data is None: return [] elif not isinstance(data, list): raise AnsibleError("included task files must contain a list of tasks") ti_copy = self._copy_included_file(included_file) block_list = load_list_of_blocks( data, play=iterator._play, parent_block=ti_copy.build_parent_block(), role=included_file._task._role, use_handlers=is_handler, loader=self._loader, variable_manager=self._variable_manager, ) # since we skip incrementing the stats when the task result is # first processed, we do so now for each host in the list for host in included_file._hosts: self._tqm._stats.increment('ok', host.name) except AnsibleParserError: raise except AnsibleError as e: if isinstance(e, AnsibleFileNotFound): reason = "Could not find or access '%s' on the Ansible Controller." % to_text(e.file_name) else: reason = to_text(e) for r in included_file._results: r._result['failed'] = True for host in included_file._hosts: tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason)) self._tqm._stats.increment('failures', host.name) self._tqm.send_callback('v2_runner_on_failed', tr) raise AnsibleError(reason) from e # finally, send the callback and return the list of blocks loaded self._tqm.send_callback('v2_playbook_on_include', included_file) display.debug("done processing included file") return block_list
267
__init__.py
Python
lib/ansible/plugins/strategy/__init__.py
42d8a9daa89907545ebd208f4fd0a9192738c6a6
ansible
9
43,771
83
18
34
336
28
0
115
464
import_local_settings
Speed up creation of DagRun for large DAGs (5k+ tasks) by 25-130% (#20722) * Speed up creation of DagRun for large DAGs (5k+ tasks) by 15-40% This uses the "bulk" operation API of SQLAlchemy to get a big speed up. Due to the `task_instance_mutation_hook` we still need to keep actual TaskInstance objects around. For postgresql we have enabled to "batch operation helpers"[1] which makes it even faster. The default page sizes are chosen somewhat randomly based on the SQLA docs. To make these options configurable I have added (and used here and in KubeConfig) a new `getjson` option to AirflowConfigParser class. Postgresql is over 77% faster with bulk_save_objects: Before: ``` number_of_tis=1 mean=0.004397215199423954 per=0.004397215199423954 times=[0.009390181003254838, 0.002814065999700688, 0.00284132499655243, 0.0036120269942330196, 0.0033284770033787936] number_of_tis=10 mean=0.008078816600027494 per=0.0008078816600027494 times=[0.011014281000825576, 0.008476420000079088, 0.00741832799394615, 0.006857775995740667, 0.006627278009545989] number_of_tis=50 mean=0.01927847799670417 per=0.00038556955993408336 times=[0.02556803499464877, 0.01935569499619305, 0.01662322599440813, 0.01840184700267855, 0.01644358699559234] number_of_tis=100 mean=0.03301511880126782 per=0.00033015118801267817 times=[0.04117956099798903, 0.030890661000739783, 0.03007458901265636, 0.03125198099587578, 0.03167880199907813] number_of_tis=500 mean=0.15320950179593637 per=0.0003064190035918727 times=[0.20054609200451523, 0.14052859699586406, 0.14509809199080337, 0.1365471329918364, 0.1433275949966628] number_of_tis=1000 mean=0.2929377429973101 per=0.0002929377429973101 times=[0.3517978919990128, 0.2807794280088274, 0.2806490379880415, 0.27710555399244186, 0.27435680299822707] number_of_tis=3000 mean=0.9935687056015012 per=0.00033118956853383374 times=[1.2047388390055858, 0.8248025969951414, 0.8685875020019012, 0.9017027500085533, 1.1680118399963249] number_of_tis=5000 mean=1.5349355740036117 per=0.00030698711480072236 times=[1.8663743910001358, 1.5182018500054255, 1.5446484510030132, 1.3932801040064078, 1.3521730740030762] number_of_tis=10000 mean=3.7448632712010292 per=0.0003744863271201029 times=[4.135914924001554, 3.4411147559876554, 3.526543836007477, 3.7195197630062466, 3.9012230770022143] number_of_tis=15000 mean=6.3099766838044165 per=0.00042066511225362775 times=[6.552250057997298, 6.1369703890086384, 6.8749958210100885, 6.067943914007628, 5.917723236998427] number_of_tis=20000 mean=8.317583500797628 per=0.00041587917503988143 times=[8.720249108009739, 8.0188543760014, 8.328030352990027, 8.398350054994808, 8.122433611992165] ``` When using bulk_save_objects: ``` number_of_tis=20000 mean=4.678154367001843 per=0.00023390771835009216 times=[4.465847548010061, 4.571855771995615, 4.749505186002352, 4.724330568002188, 4.8792327609990025] ``` MySQL is only 10-15% faster (and a lot noisier) Before: ``` number_of_tis=1 mean=0.006164804595755413 per=0.006164804595755413 times=[0.013516580002033152, 0.00427598599344492, 0.004508020996581763, 0.004067091998877004, 0.004456343987840228] number_of_tis=10 mean=0.007822793803643435 per=0.0007822793803643434 times=[0.0081135170039488, 0.00719467100861948, 0.009007985994685441, 0.00758794900320936, 0.007209846007754095] number_of_tis=50 mean=0.020377356800599954 per=0.00040754713601199905 times=[0.02612382399092894, 0.018950315003166907, 0.019109474000288174, 0.018008680999628268, 0.019694490008987486] number_of_tis=100 mean=0.040682651600218375 per=0.00040682651600218374 times=[0.05449078499805182, 0.037430580996442586, 0.039291110006161034, 0.03625023599306587, 0.035950546007370576] number_of_tis=500 mean=0.18646696420037187 per=0.00037293392840074375 times=[0.24278165798750706, 0.17090376401029062, 0.1837275660072919, 0.16893767600413412, 0.1659841569926357] number_of_tis=1000 mean=0.5903461098030676 per=0.0005903461098030675 times=[0.6001852740009781, 0.5642872750031529, 0.686630773008801, 0.5578094649972627, 0.5428177620051429] number_of_tis=3000 mean=1.9076304554007948 per=0.0006358768184669316 times=[2.042052763994434, 2.1137778090051142, 1.7461599689995637, 1.7260139089921722, 1.9101478260126896] number_of_tis=5000 mean=2.9185905692051164 per=0.0005837181138410233 times=[2.9221124830073677, 3.2889883980096783, 2.7569778940087417, 2.973596281008213, 2.651277789991582] number_of_tis=10000 mean=8.880191986600403 per=0.0008880191986600403 times=[7.3548113360011484, 9.13715232499817, 9.568511486999341, 8.80206210000324, 9.538422685000114] number_of_tis=15000 mean=15.426499317999696 per=0.0010284332878666464 times=[14.944712879005237, 15.38737604500784, 15.409629273999599, 15.852925243991194, 15.53785314799461] number_of_tis=20000 mean=20.579332908798825 per=0.0010289666454399414 times=[20.362008597003296, 19.878823954990366, 20.73281196100288, 20.837948996995692, 21.085071034001885] ``` After: ``` number_of_tis=20000 mean=18.36637533060275 per=0.0009183187665301375 times=[17.728908119010157, 18.62269214099797, 18.936747477011522, 17.74613195299753, 18.797396962996572] ``` [1]: https://docs.sqlalchemy.org/en/13/dialects/postgresql.html#psycopg2-batch-mode * Use bulk_insert_mappings for even more speed where possible. It gives us an extra speed up over bulk_save_objects, but we can't use it when the task_instance_mutation_hook does anything, as that hook needs an actual object. So _when_ we know that hook won't do anything we switch in to insert_mappings mode. New speeds (vs baseline, not vs bulk_save_objects) when using bulk_insert_mappings PostgreSQL now 130% faster: ``` number_of_tis=1 mean=0.028053103599813767 per=0.028053103599813767 times=[0.03762496300623752, 0.02637488600157667, 0.025065611000172794, 0.024561002996051684, 0.026639054995030165] number_of_tis=10 mean=0.02647183560184203 per=0.002647183560184203 times=[0.02698062499985099, 0.026417658998980187, 0.027347976007149555, 0.025797458001761697, 0.025815460001467727] number_of_tis=50 mean=0.03149963079486042 per=0.0006299926158972085 times=[0.03810671299288515, 0.03055680700344965, 0.029733988994848914, 0.03016914198815357, 0.02893150299496483] number_of_tis=100 mean=0.033998635396710594 per=0.0003399863539671059 times=[0.0351028829900315, 0.03299884400621522, 0.03358584298985079, 0.03295094799250364, 0.03535465900495183] number_of_tis=500 mean=0.07903424859978259 per=0.00015806849719956516 times=[0.08279920800123364, 0.08588568199775182, 0.07312070899934042, 0.07360191999759991, 0.07976372400298715] number_of_tis=1000 mean=0.12571056479937398 per=0.00012571056479937398 times=[0.12573593499837443, 0.12141938100103289, 0.12616568499652203, 0.12907471299695317, 0.12615711000398733] number_of_tis=3000 mean=0.36025245799683037 per=0.00012008415266561012 times=[0.36071603700111154, 0.3470657339930767, 0.3373015969991684, 0.3337128989951452, 0.42246602299564984] number_of_tis=5000 mean=0.6916533229988999 per=0.00013833066459977998 times=[0.9647149289958179, 0.6451378140045563, 0.5970188640058041, 0.5849326960014878, 0.6664623119868338] number_of_tis=10000 mean=2.071472014003666 per=0.00020714720140036663 times=[2.957865878008306, 1.9388906149979448, 1.766649461002089, 1.8647991580073722, 1.8291549580026185] number_of_tis=15000 mean=2.866650845797267 per=0.00019111005638648446 times=[3.3783503199956613, 2.657773957995232, 2.707275656008278, 2.7875704979960574, 2.802283796991105] number_of_tis=20000 mean=3.5886989389982773 per=0.00017943494694991387 times=[3.969436354993377, 3.436962780993781, 3.9078941010084236, 3.6387251569976797, 2.9904763009981252] ``` MySQL is (only) 27% faster: ``` number_of_tis=1 mean=0.035956257799989545 per=0.035956257799989545 times=[0.03932315899874084, 0.03545605999534018, 0.03535486999317072, 0.034727805003058165, 0.03491939500963781] number_of_tis=10 mean=0.036957260797498746 per=0.0036957260797498745 times=[0.040442515004542656, 0.0379129799985094, 0.03494819799379911, 0.03562593398964964, 0.03585667700099293] number_of_tis=50 mean=0.04745422120031435 per=0.0009490844240062871 times=[0.06965546800347511, 0.04221734800375998, 0.04038520700123627, 0.040363031992455944, 0.04465005100064445] number_of_tis=100 mean=0.0528092162014218 per=0.000528092162014218 times=[0.06113427500531543, 0.04883724599494599, 0.05276876600692049, 0.047688748003565706, 0.05361704599636141] number_of_tis=500 mean=0.16223246100416872 per=0.0003244649220083374 times=[0.24469116200634744, 0.1407806619972689, 0.14792052800476085, 0.14703868801007047, 0.13073126500239596] number_of_tis=1000 mean=0.285728433605982 per=0.00028572843360598197 times=[0.3230128890136257, 0.27035739900020417, 0.3003890450054314, 0.2638379510026425, 0.2710448840080062] number_of_tis=3000 mean=1.1824120475997915 per=0.0003941373491999305 times=[1.3103130240051541, 1.286688863998279, 1.1455156929878285, 1.1072918410063721, 1.062250816001324] number_of_tis=5000 mean=1.9416745471942705 per=0.0003883349094388541 times=[2.3746965279860888, 1.9103765429899795, 2.0542518720030785, 1.7706374429981224, 1.598410349994083] number_of_tis=10000 mean=5.059874459402636 per=0.0005059874459402636 times=[5.431018351999228, 5.262124675995437, 5.174487816999317, 4.423381198008428, 5.008360254010768] number_of_tis=15000 mean=9.717965700797503 per=0.0006478643800531668 times=[7.884617075993447, 9.466949063993525, 10.005758297003922, 10.105231182998978, 11.127272883997648] number_of_tis=20000 mean=16.2008618004038 per=0.00081004309002019 times=[14.645835625007749, 16.304637463006657, 16.255490412993822, 16.830263861003914, 16.968081640006858] ```
https://github.com/apache/airflow.git
def import_local_settings(): try: import airflow_local_settings if hasattr(airflow_local_settings, "__all__"): for i in airflow_local_settings.__all__: globals()[i] = getattr(airflow_local_settings, i) else: for k, v in airflow_local_settings.__dict__.items(): if not k.startswith("__"): globals()[k] = v # TODO: Remove once deprecated if "policy" in globals() and "task_policy" not in globals(): warnings.warn( "Using `policy` in airflow_local_settings.py is deprecated. " "Please rename your `policy` to `task_policy`.", DeprecationWarning, stacklevel=2, ) globals()["task_policy"] = globals()["policy"] del globals()["policy"] if not hasattr(task_instance_mutation_hook, 'is_noop'): task_instance_mutation_hook.is_noop = False log.info("Loaded airflow_local_settings from %s .", airflow_local_settings.__file__) except ModuleNotFoundError as e: if e.name == "airflow_local_settings": log.debug("No airflow_local_settings to import.", exc_info=True) else: log.critical( "Failed to import airflow_local_settings due to a transitive module not found error.", exc_info=True, ) raise except ImportError: log.critical("Failed to import airflow_local_settings.", exc_info=True) raise
191
settings.py
Python
airflow/settings.py
f2039b4c9e15b514661d4facbd710791fe0a2ef4
airflow
11
142,491
6
9
3
37
7
0
6
15
ensure_serialization_context
[api] Annotate as public / move ray-core APIs to _private and add enforcement rule (#25695) Enable checking of the ray core module, excluding serve, workflows, and tune, in ./ci/lint/check_api_annotations.py. This required moving many files to ray._private and associated fixes.
https://github.com/ray-project/ray.git
def ensure_serialization_context(): ctx = StandaloneSerializationContext() ray.util.serialization_addons.apply(ctx)
20
utils.py
Python
python/ray/serve/utils.py
43aa2299e6623c8f8c7c4a1b80133459d0aa68b0
ray
1
120,514
34
14
8
136
17
0
49
67
_mask
Add support for padded arrays in QDWH algorithm. This change is in preparation for adding a jit-table QDWH-eig implementation. PiperOrigin-RevId: 448571523
https://github.com/google/jax.git
def _mask(x, dims, alternative=0): assert jnp.ndim(x) == len(dims) mask = None for i, d in enumerate(dims): if d is not None: mask_dim_i = lax.broadcasted_iota(jnp.int32, x.shape, i) < d mask = mask_dim_i if mask is None else (mask & mask_dim_i) return x if mask is None else jnp.where(mask, x, alternative)
91
qdwh.py
Python
jax/_src/lax/qdwh.py
db73670ec3fc72f75e6f832351620ac79e9b0c6f
jax
5
295,822
24
13
8
97
14
0
31
95
control_zone_name
Use EntityFeature enum in components (i**) (#69409)
https://github.com/home-assistant/core.git
def control_zone_name(self): if self._supported_features & ClimateEntityFeature.TARGET_TEMPERATURE: return None zone_ctrl = self._controller.zone_ctrl zone = next((z for z in self.zones.values() if z.zone_index == zone_ctrl), None) if zone is None: return None return zone.name
61
climate.py
Python
homeassistant/components/izone/climate.py
e6d8aa34fa34d7f8e45280e4cc545d2ba15fd117
core
5
101,196
23
20
13
150
16
0
30
303
xyz_2d
lib.align.aligned_face updates - Typing - Legacy support for pre-aligned faces - Coverage support for pre-aligned faces - Standardized retrieval of sub-crops
https://github.com/deepfakes/faceswap.git
def xyz_2d(self) -> np.ndarray: if self._xyz_2d is None: xyz = cv2.projectPoints(np.array([[6., 0., -2.3], [0., 6., -2.3], [0., 0., 3.7]]).astype("float32"), self._rotation, self._translation, self._camera_matrix, self._distortion_coefficients)[0].squeeze() self._xyz_2d = xyz - self._offset["head"] return self._xyz_2d
112
aligned_face.py
Python
lib/align/aligned_face.py
a2de4a97985dc62db3b140a924aeac2be733abf8
faceswap
2
271,257
31
9
14
87
12
0
40
132
_build_map
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _build_map(outputs): finished_nodes = set() nodes_in_progress = set() nodes_in_decreasing_depth = [] # nodes from inputs -> outputs. layer_indices = {} # layer -> in traversal order. for output in tf.nest.flatten(outputs): _build_map_helper( output, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices, ) return nodes_in_decreasing_depth, layer_indices
53
functional.py
Python
keras/engine/functional.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
150,984
6
6
7
26
5
0
6
20
get_producer_pairs
remove data waiting, remove explicit analyzing of external df
https://github.com/freqtrade/freqtrade.git
def get_producer_pairs(self) -> List[str]: return self.__producer_pairs
15
dataprovider.py
Python
freqtrade/data/dataprovider.py
510cf4f30507ed4763d13e12a41e12ceb59a6748
freqtrade
1
48,006
21
11
18
120
16
1
21
113
test_check_docker_compose_version_unknown
Unify style of communication with the users for Breeze. (#23311) Fixes: #22906
https://github.com/apache/airflow.git
def test_check_docker_compose_version_unknown(mock_get_console, mock_run_command): check_docker_compose_version(verbose=True) expected_run_command_calls = [ call( ["docker-compose", "--version"], verbose=True, no_output_dump_on_exception=True, capture_output=True, text=True, ), ] mock_run_command.assert_has_calls(expected_run_command_calls) mock_get_console.return_value.print.assert_called_with( ) @mock.patch('airflow_breeze.utils.docker_command_utils.run_command') @mock.patch('airflow_breeze.utils.docker_command_utils.get_console')
@mock.patch('airflow_breeze.utils.docker_command_utils.run_command') @mock.patch('airflow_breeze.utils.docker_command_utils.get_console')
59
test_docker_command_utils.py
Python
dev/breeze/tests/test_docker_command_utils.py
505af06303d8160c71f6a7abe4792746f640083d
airflow
1
257,300
8
8
6
44
8
0
9
23
_dispatch_run
Add `run_batch` method to all nodes and `Pipeline` to allow batch querying (#2481) * Add run_batch methods for batch querying * Update Documentation & Code Style * Fix mypy * Update Documentation & Code Style * Fix mypy * Fix linter * Fix tests * Update Documentation & Code Style * Fix tests * Update Documentation & Code Style * Fix mypy * Fix rest api test * Update Documentation & Code Style * Add Doc strings * Update Documentation & Code Style * Add batch_size as attribute to nodes supporting batching * Adapt error messages * Adapt type of filters in retrievers * Revert change about truncation_warning in summarizer * Unify multiple_doc_lists tests * Use smaller models in extractor tests * Add return types to JoinAnswers and RouteDocuments * Adapt return statements in reader's run_batch method * Allow list of filters * Adapt error messages * Update Documentation & Code Style * Fix tests * Fix mypy * Adapt print_questions * Remove disabling warning about too many public methods * Add flag for pylint to disable warning about too many public methods in pipelines/base.py and document_stores/base.py * Add type check * Update Documentation & Code Style * Adapt tutorial 11 * Update Documentation & Code Style * Add query_batch method for DCDocStore * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
https://github.com/deepset-ai/haystack.git
def _dispatch_run(self, **kwargs) -> Tuple[Dict, str]: return self._dispatch_run_general(self.run, **kwargs)
28
base.py
Python
haystack/nodes/base.py
738e008020f146ff9820c290311782f515749c48
haystack
1
143,545
9
8
3
43
4
0
9
30
seed
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed]
26
coin_game_non_vectorized_env.py
Python
rllib/examples/env/coin_game_non_vectorized_env.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
224,482
5
11
2
41
7
0
5
19
get_file_from_path
Refactor URI handling to not have to deal with backslashes
https://github.com/mkdocs/mkdocs.git
def get_file_from_path(self, path): return self.src_uris.get(PurePath(path).as_posix())
24
files.py
Python
mkdocs/structure/files.py
1c50987f9c17b228fdf22456aa369b83bd6b11b9
mkdocs
1
270,214
4
6
2
19
3
0
4
18
master_target
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def master_target(self): return self._master_target
10
distribute_coordinator_utils.py
Python
keras/distribute/distribute_coordinator_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
310,911
26
13
8
107
11
0
31
110
_update_callback
Point updates (#64748) Co-authored-by: Franck Nijhof <git@frenck.dev>
https://github.com/home-assistant/core.git
async def _update_callback(self): if not self.is_updated: return if self.device_class == BinarySensorDeviceClass.CONNECTIVITY: # connectivity is the other way around. self._attr_is_on = not (self._events[0] in self.device.ongoing_events) else: self._attr_is_on = self._events[0] in self.device.ongoing_events self.async_write_ha_state()
64
binary_sensor.py
Python
homeassistant/components/point/binary_sensor.py
f23af3455edee3aae40a60b936bfaf04b8aab8a3
core
3
101,197
7
6
5
31
7
0
7
21
offset
lib.align.aligned_face updates - Typing - Legacy support for pre-aligned faces - Coverage support for pre-aligned faces - Standardized retrieval of sub-crops
https://github.com/deepfakes/faceswap.git
def offset(self) -> Dict[CenteringType, np.ndarray]: return self._offset
19
aligned_face.py
Python
lib/align/aligned_face.py
a2de4a97985dc62db3b140a924aeac2be733abf8
faceswap
1
7,853
19
12
6
159
25
0
20
62
process_downloaded_dataset
Add medical no-show appointments dataset (#2387) Co-authored-by: Daniel Treiman <dan.treiman@gmail.com>
https://github.com/ludwig-ai/ludwig.git
def process_downloaded_dataset(self): df = pd.read_csv(os.path.join(self.raw_dataset_path, self.csv_filename)) df[SPLIT] = np.random.choice(3, len(df), p=(0.7, 0.1, 0.2)).astype(np.int8) makedirs(self.processed_temp_path, exist_ok=True) df.to_csv(os.path.join(self.processed_temp_path, self.csv_filename), index=False) rename(self.processed_temp_path, self.processed_dataset_path)
111
__init__.py
Python
ludwig/datasets/noshow_appointments/__init__.py
c13bc2cf1443ce9339cde0eb5e0d2b568af341f7
ludwig
1
266,089
49
9
15
215
17
0
62
122
change_logging
Use context vars instead of thread-local storage for change logging
https://github.com/netbox-community/netbox.git
def change_logging(request): current_request.set(request) webhooks_queue.set([]) # Connect our receivers to the post_save and post_delete signals. post_save.connect(handle_changed_object, dispatch_uid='handle_changed_object') m2m_changed.connect(handle_changed_object, dispatch_uid='handle_changed_object') pre_delete.connect(handle_deleted_object, dispatch_uid='handle_deleted_object') clear_webhooks.connect(clear_webhook_queue, dispatch_uid='clear_webhook_queue') yield # Disconnect change logging signals. This is necessary to avoid recording any errant # changes during test cleanup. post_save.disconnect(handle_changed_object, dispatch_uid='handle_changed_object') m2m_changed.disconnect(handle_changed_object, dispatch_uid='handle_changed_object') pre_delete.disconnect(handle_deleted_object, dispatch_uid='handle_deleted_object') clear_webhooks.disconnect(clear_webhook_queue, dispatch_uid='clear_webhook_queue') # Flush queued webhooks to RQ flush_webhooks(webhooks_queue.get()) # Clear context vars current_request.set(None) webhooks_queue.set([])
121
context_managers.py
Python
netbox/extras/context_managers.py
cd8943144bec52ff608ddad3db5d0155832a4a23
netbox
1
275,290
4
6
2
19
3
0
4
18
iterations
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def iterations(self): return self._iterations
10
optimizer.py
Python
keras/optimizers/optimizer_experimental/optimizer.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
101,220
10
7
4
32
4
0
11
32
interpolator
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
https://github.com/deepfakes/faceswap.git
def interpolator(self) -> int: assert self._interpolator is not None return self._interpolator
19
detected_face.py
Python
lib/align/detected_face.py
5e73437be47f2410439a3c6716de96354e6a0c94
faceswap
1
278,589
23
11
8
91
15
0
23
110
back_up
Add step granularity for BackupAndRestore PiperOrigin-RevId: 452124032
https://github.com/keras-team/keras.git
def back_up(self, epoch, batch=0): # Save the model plus CKPT_SAVED_EPOCH and CKPT_SAVED_BATCH variable. backend.set_value(self._ckpt_saved_epoch, epoch) backend.set_value(self._ckpt_saved_batch, batch) if self.write_checkpoint_manager.save(): distributed_file_utils.remove_temp_dirpath( self.write_checkpoint_manager.directory, self._model.distribute_strategy, )
58
worker_training_state.py
Python
keras/distribute/worker_training_state.py
896c8d1a2bfb9351611a86282acf0b2257c54a55
keras
2
136,365
53
11
13
189
21
0
92
148
test_delete_job
[Jobs] Add DELETE endpoint (#30056) Adds a DELETE endpoint to the REST API, SDK and CLI for Ray Jobs. Before this, there was no way to delete the job info from the cluster, so the memory could potentially grow without bound. Deleting a job immediately deletes all its associated info (status, metadata) from memory. We only support deleting jobs that are in a terminal state. We don't delete the log files of a deleted job.
https://github.com/ray-project/ray.git
def test_delete_job(job_sdk_client, capsys): client: JobSubmissionClient = job_sdk_client job_id = client.submit_job(entrypoint="sleep 300 && echo hello") with pytest.raises(Exception, match="but it is in a non-terminal state"): # This should fail because the job is not in a terminal state. client.delete_job(job_id) # Check that the job appears in list_jobs jobs = client.list_jobs() assert job_id in [job.submission_id for job in jobs] finished_job_id = client.submit_job(entrypoint="echo hello") wait_for_condition(_check_job_succeeded, client=client, job_id=finished_job_id) deleted = client.delete_job(finished_job_id) assert deleted is True # Check that the job no longer appears in list_jobs jobs = client.list_jobs() assert finished_job_id not in [job.submission_id for job in jobs]
114
test_http_job_server.py
Python
dashboard/modules/job/tests/test_http_job_server.py
1639914ba5a37ed56a644fdb4c712f6da064746c
ray
3