ast_errors
stringlengths 0
3.2k
| d_id
int64 44
121k
| id
int64 70
338k
| n_whitespaces
int64 3
14k
| path
stringlengths 8
134
| n_words
int64 4
4.82k
| n_identifiers
int64 1
131
| random_cut
stringlengths 16
15.8k
| commit_message
stringlengths 2
15.3k
| fun_name
stringlengths 1
84
| commit_id
stringlengths 40
40
| repo
stringlengths 3
28
| file_name
stringlengths 5
79
| ast_levels
int64 6
31
| nloc
int64 1
548
| url
stringlengths 31
59
| complexity
int64 1
66
| token_counts
int64 6
2.13k
| n_ast_errors
int64 0
28
| vocab_size
int64 4
1.11k
| n_ast_nodes
int64 15
19.2k
| language
stringclasses 1
value | documentation
dict | code
stringlengths 101
62.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
@receiver(post_save, sender=Annotation) | 42,524 | 177,853 | 220 | label_studio/tasks/models.py | 67 | 22 | def delete_project_summary_annotations_before_updating_annotation(sender, instance, **kwargs):
try:
old_annotation = sender.objects.get(id=instance.id)
except Annotation.DoesNotExist:
# annotation just created - do nothing
return
old_annotation.decrease_project_summary_counters()
# update task counters if annotation changes it's was_cancelled status
task = instance.task
if old_annotation.was_cancelled != instance.was_cancelled:
if instance.was_cancelled:
task.cancelled_annotations = task.cancelled_annotations + 1
task.total_annotations = task.total_annotations - 1
else:
task.cancelled_annotations = task.cancelled_annotations - 1
task.total_annotations = task.total_annotations + 1
task.update_is_labeled()
Task.objects.filter(id=instance.task.id).update(
is_labeled=task.is_labeled,
total_annot | fix: DEV-2406: Fix counters for skipped annotations (#2364)
* fix: DEV-2406: Fix counters for skipped annotations
* Fixes
Co-authored-by: makseq-ubnt <makseq@gmail.com> | delete_project_summary_annotations_before_updating_annotation | 50583d9ed6bfd2a837d0168e1690529a31efa2f7 | label-studio | models.py | 14 | 20 | https://github.com/heartexlabs/label-studio.git | 4 | 135 | 1 | 44 | 231 | Python | {
"docstring": "Before updating annotation fields - ensure previous info removed from project.summary",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def delete_project_summary_annotations_before_updating_annotation(sender, instance, **kwargs):
try:
old_annotation = sender.objects.get(id=instance.id)
except Annotation.DoesNotExist:
# annotation just created - do nothing
return
old_annotation.decrease_project_summary_counters()
# update task counters if annotation changes it's was_cancelled status
task = instance.task
if old_annotation.was_cancelled != instance.was_cancelled:
if instance.was_cancelled:
task.cancelled_annotations = task.cancelled_annotations + 1
task.total_annotations = task.total_annotations - 1
else:
task.cancelled_annotations = task.cancelled_annotations - 1
task.total_annotations = task.total_annotations + 1
task.update_is_labeled()
Task.objects.filter(id=instance.task.id).update(
is_labeled=task.is_labeled,
total_annotations=task.total_annotations,
cancelled_annotations=task.cancelled_annotations
)
@receiver(post_save, sender=Annotation) |
10,250 | 50,991 | 699 | modules/image/keypoint_detection/hand_pose_localization/model.py | 151 | 28 | def load_config(self, modelpath, use_gpu, gpu_id, use_mkldnn, cpu_threads):
# 对运行位置进行配置
if use_gpu:
| update hand_pose_localization (#1967)
* update hand_pose_localization
* add clean func | load_config | 6b42963d62833925ffed1cdb73400e7d528a5353 | PaddleHub | model.py | 16 | 40 | https://github.com/PaddlePaddle/PaddleHub.git | 11 | 291 | 0 | 76 | 496 | Python | {
"docstring": "\r\n load the model config\r\n modelpath: inference model path\r\n use_gpu: use gpu or not\r\n use_mkldnn: use mkldnn or not\r\n Error! Unable to use GPU. Please set the environment variables \"CUDA_VISIBLE_DEVICES=GPU_id\" to use GPU. Now switch to CPU to continue...",
"language": "en",
"n_whitespaces": 73,
"n_words": 38,
"vocab_size": 27
} | def load_config(self, modelpath, use_gpu, gpu_id, use_mkldnn, cpu_threads):
# 对运行位置进行配置
if use_gpu:
try:
int(os.environ.get('CUDA_VISIBLE_DEVICES'))
except Exception:
print(
)
use_gpu = False
if os.path.isdir(modelpath):
if os.path.exists(os.path.join(modelpath, "__params__")):
# __model__ + __params__
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = Config(model, params)
elif os.path.exists(os.path.join(modelpath, "params")):
# model + params
model = os.path.join(modelpath, "model")
params = os.path.join(modelpath, "params")
config = Config(model, params)
elif os.path.exists(os.path.join(modelpath, "__model__")):
# __model__ + others
config = Config(modelpath)
else:
raise Exception(
"Error! Can\'t find the model in: %s. Please check your model path." % os.path.abspath(modelpath))
elif os.path.exists(modelpath + ".pdmodel"):
# *.pdmodel + *.pdiparams
model = modelpath + ".pdmodel"
params = modelpath + ".pdiparams"
config = Config(model, params)
elif isinstance(modelpath, Config):
config = modelpath
else:
raise Exception(
"Error! Can\'t find the model in: %s. Please check your model path." % os.path.abspath(modelpath))
# 设置参数
if use_gpu:
config.enable_use_gpu(100, gpu_id)
else:
config.disable_gpu()
config.set_cpu_math_library_num_threads(cpu_threads)
if use_mkldnn:
config.enable_mkldnn()
config.disable_glog_info()
# 返回配置
return config
# 预测器创建函数
|
|
18,399 | 88,553 | 256 | src/sentry/lang/javascript/processor_smcache.py | 139 | 10 | def trim_line(line, column=0):
line = line.strip("\n")
ll = len(line)
if ll <= 150:
return line
if column > ll:
column = ll
start = max(column - 60, 0)
# Round down if it brings us close to the edge
if start < 5:
start = 0
end = min(start + 140, ll)
# Round up to the end if it's close
if end > ll - 5:
end = ll
# If we are bumped all the way to the end,
# make sure we still get a full 140 characters in the line
if end == ll:
start = max(end - 140, 0)
line = line[start:end]
if end < ll:
# we've snipped from the end
line += " {snip}"
if start > 0: | feat(processor): Use JavaScriptSmCacheStacktraceProcessor by default for internal projects (#41390)
This PR builds on top of https://github.com/getsentry/sentry/pull/40951/
and prepares us for gradual rollout. | trim_line | 67c8215ba3f246937fd7e1fbb02f33050a1c0456 | sentry | processor_smcache.py | 11 | 21 | https://github.com/getsentry/sentry.git | 8 | 120 | 0 | 68 | 204 | Python | {
"docstring": "\n Trims a line down to a goal of 140 characters, with a little\n wiggle room to be sensible and tries to trim around the given\n `column`. So it tries to extract 60 characters before and after\n the provided `column` and yield a better context.\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 44,
"vocab_size": 34
} | def trim_line(line, column=0):
line = line.strip("\n")
ll = len(line)
if ll <= 150:
return line
if column > ll:
column = ll
start = max(column - 60, 0)
# Round down if it brings us close to the edge
if start < 5:
start = 0
end = min(start + 140, ll)
# Round up to the end if it's close
if end > ll - 5:
end = ll
# If we are bumped all the way to the end,
# make sure we still get a full 140 characters in the line
if end == ll:
start = max(end - 140, 0)
line = line[start:end]
if end < ll:
# we've snipped from the end
line += " {snip}"
if start > 0:
# we've snipped from the beginning
line = "{snip} " + line
return line
|
|
71,747 | 247,569 | 189 | tests/storage/test_background_update.py | 45 | 17 | def test_background_update_default_batch_set_by_config(self):
self.get_success(
self.store.db_pool.simple_insert(
"background_updates",
values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
)
)
self.update_handler.side_effect = self.update
self.update_handler.reset_mock()
res = self.get_success(
self.updates.do_next_background_update(False),
by=0. | Add config settings for background update parameters (#11980) | test_background_update_default_batch_set_by_config | ef3619e61d84493d98470eb2a69131d15eb1166b | synapse | test_background_update.py | 13 | 15 | https://github.com/matrix-org/synapse.git | 1 | 92 | 0 | 39 | 154 | Python | {
"docstring": "\n Test that the background update is run with the default_batch_size set by the config\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 12
} | def test_background_update_default_batch_set_by_config(self):
self.get_success(
self.store.db_pool.simple_insert(
"background_updates",
values={"update_name": "test_update", "progress_json": '{"my_key": 1}'},
)
)
self.update_handler.side_effect = self.update
self.update_handler.reset_mock()
res = self.get_success(
self.updates.do_next_background_update(False),
by=0.01,
)
self.assertFalse(res)
# on the first call, we should get run with the default background update size specified in the config
self.update_handler.assert_called_once_with({"my_key": 1}, 20)
|
|
50,689 | 204,304 | 54 | django/contrib/sessions/backends/file.py | 11 | 9 | def _expiry_date(self, session_data):
return session_data.get("_session_expiry") or (
self._last_modification()
+ datetime.timedelta(seconds=self.get_session_cookie_ag | Refs #33476 -- Reformatted code with Black. | _expiry_date | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | file.py | 13 | 5 | https://github.com/django/django.git | 2 | 36 | 0 | 11 | 62 | Python | {
"docstring": "\n Return the expiry time of the file storing the session's content.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 9
} | def _expiry_date(self, session_data):
return session_data.get("_session_expiry") or (
self._last_modification()
+ datetime.timedelta(seconds=self.get_session_cookie_age())
)
|
|
5,973 | 32,709 | 283 | src/transformers/models/trocr/processing_trocr.py | 89 | 15 | def __call__(self, *args, **kwargs):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
images = kwargs.pop("images", None)
text = kwargs.pop("text", None)
if len(args) > 0:
images = args[0]
args = args[1:]
if images is None and text is None:
raise ValueError("Yo | Change audio kwarg to images in TROCR processor (#18421)
Co-authored-by: ydshieh <ydshieh@users.noreply.github.com> | __call__ | 0b8c1b6994082950044452a670e8417a5ebc2db0 | transformers | processing_trocr.py | 11 | 21 | https://github.com/huggingface/transformers.git | 9 | 147 | 0 | 50 | 240 | Python | {
"docstring": "\n When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor's\n [`~AutoFeatureExtractor.__call__`] and returns its output. If used in the context\n [`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's\n [`~TrOCRTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.\n ",
"language": "en",
"n_whitespaces": 82,
"n_words": 46,
"vocab_size": 33
} | def __call__(self, *args, **kwargs):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
images = kwargs.pop("images", None)
text = kwargs.pop("text", None)
if len(args) > 0:
images = args[0]
args = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")
if images is not None:
inputs = self.feature_extractor(images, *args, **kwargs)
if text is not None:
encodings = self.tokenizer(text, **kwargs)
if text is None:
return inputs
elif images is None:
return encodings
else:
inputs["labels"] = encodings["input_ids"]
return inputs
|
|
78,158 | 265,638 | 61 | netbox/extras/reports.py | 26 | 8 | def get_report(module_name, report_name):
reports = get_reports()
module = reports.get(module_name)
if module is None:
return None
report = module.get(report_name)
if report is None:
return None
return report
| Allow reports to be nested in submodules | get_report | 356ff457be08d5527920c617eb598f24a6edbc3d | netbox | reports.py | 8 | 9 | https://github.com/netbox-community/netbox.git | 3 | 45 | 0 | 15 | 75 | Python | {
"docstring": "\n Return a specific report from within a module.\n ",
"language": "en",
"n_whitespaces": 15,
"n_words": 8,
"vocab_size": 7
} | def get_report(module_name, report_name):
reports = get_reports()
module = reports.get(module_name)
if module is None:
return None
report = module.get(report_name)
if report is None:
return None
return report
|
|
72,159 | 248,221 | 243 | tests/config/test_workers.py | 32 | 12 | def test_worker_duty_configs(self) -> None:
worker1_config = self._make_worker_config(
worker_app="synapse.app.generic_worker",
worker_name="worker1",
extras={
"notify_appservice | Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. (#12654)
Co-authored-by: Shay <hillerys@element.io> | test_worker_duty_configs | 699192fc1a1055a4bec2345bc80f120f28470c73 | synapse | test_workers.py | 12 | 24 | https://github.com/matrix-org/synapse.git | 1 | 96 | 0 | 22 | 170 | Python | {
"docstring": "\n Additional tests for the worker duties\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 6,
"vocab_size": 6
} | def test_worker_duty_configs(self) -> None:
worker1_config = self._make_worker_config(
worker_app="synapse.app.generic_worker",
worker_name="worker1",
extras={
"notify_appservices_from_worker": "worker2",
"update_user_directory_from_worker": "worker1",
},
)
self.assertFalse(worker1_config.should_notify_appservices)
self.assertTrue(worker1_config.should_update_user_directory)
worker2_config = self._make_worker_config(
worker_app="synapse.app.generic_worker",
worker_name="worker2",
extras={
"notify_appservices_from_worker": "worker2",
"update_user_directory_from_worker": "worker1",
},
)
self.assertTrue(worker2_config.should_notify_appservices)
self.assertFalse(worker2_config.should_update_user_directory)
|
|
29,383 | 130,818 | 107 | python/ray/runtime_context.py | 38 | 7 | def actor_id(self):
# only worker mode has actor_id
assert (
self.worker.mode == ray.worker. | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | actor_id | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | runtime_context.py | 9 | 7 | https://github.com/ray-project/ray.git | 2 | 41 | 0 | 34 | 78 | Python | {
"docstring": "Get the current actor ID in this worker.\n\n ID of the actor of the current process.\n This shouldn't be used in a driver process.\n\n Returns:\n The current actor id in this worker. None if there's no actor id.\n ",
"language": "en",
"n_whitespaces": 77,
"n_words": 38,
"vocab_size": 24
} | def actor_id(self):
# only worker mode has actor_id
assert (
self.worker.mode == ray.worker.WORKER_MODE
), f"This method is only available when the process is a\
worker. Current mode: {self.worker.mode}"
actor_id = self.worker.actor_id
return actor_id if not actor_id.is_nil() else None
|
|
16,032 | 73,502 | 117 | wagtail/contrib/settings/tests/test_admin.py | 28 | 17 | def test_redirect_to_current(self):
start_url = reverse("wagtailsettings | Reformat with black | test_redirect_to_current | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_admin.py | 12 | 11 | https://github.com/wagtail/wagtail.git | 1 | 78 | 0 | 23 | 127 | Python | {
"docstring": "\n Should redirect to the setting for the current site taken from the URL,\n by default\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 13
} | def test_redirect_to_current(self):
start_url = reverse("wagtailsettings:edit", args=["tests", "testsetting"])
dest_url = reverse(
"wagtailsettings:edit", args=["tests", "testsetting", self.other_site.pk]
)
response = self.client.get(
start_url, follow=True, HTTP_HOST=self.other_site.hostname
)
self.assertRedirects(
response, dest_url, status_code=302, fetch_redirect_response=False
)
|
|
35,052 | 151,576 | 407 | freqtrade/freqai/base_models/FreqaiMultiOutputClassifier.py | 114 | 27 | def fit(self, X, y, sample_weight=None, fit_params=None):
if not hasattr(self.estimator, "fit"):
raise ValueError("The base estimator should implement a fit method")
y = self._validate_data(X="no_validation", y=y, multi_output=True)
if is_classifier(self):
check_classification_targets(y)
if y.ndim == 1:
raise ValueError(
"y must have at least two dimensions for "
"multi-output regression but has only one."
)
if sample_weight is not None and not has_fit_parameter(
self.estimator, "sample_weight"
):
raise ValueError("Underlying estimator does not support sample weights.")
if not fit_params:
fit_params = [None] * y.shape[1]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_estimator)(
self.estimator, X, y[:, i], sample_weight, **fit_params[i]
)
| add strat and config for testing on PR | fit | 217add70bd010cae584db5aa13a7d5e76011e2bd | freqtrade | FreqaiMultiOutputClassifier.py | 13 | 31 | https://github.com/freqtrade/freqtrade.git | 11 | 239 | 0 | 87 | 379 | Python | {
"docstring": "Fit the model to data, separately for each output variable.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input data.\n y : {array-like, sparse matrix} of shape (n_samples, n_outputs)\n Multi-output targets. An indicator matrix turns on multilabel\n estimation.\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If `None`, then samples are equally weighted.\n Only supported if the underlying classifier supports sample\n weights.\n fit_params : A list of dicts for the fit_params\n Parameters passed to the ``estimator.fit`` method of each step.\n Each dict may contain same or different values (e.g. different\n eval_sets or init_models)\n .. versionadded:: 0.23\n Returns\n -------\n self : object\n Returns a fitted instance.\n ",
"language": "en",
"n_whitespaces": 301,
"n_words": 110,
"vocab_size": 84
} | def fit(self, X, y, sample_weight=None, fit_params=None):
if not hasattr(self.estimator, "fit"):
raise ValueError("The base estimator should implement a fit method")
y = self._validate_data(X="no_validation", y=y, multi_output=True)
if is_classifier(self):
check_classification_targets(y)
if y.ndim == 1:
raise ValueError(
"y must have at least two dimensions for "
"multi-output regression but has only one."
)
if sample_weight is not None and not has_fit_parameter(
self.estimator, "sample_weight"
):
raise ValueError("Underlying estimator does not support sample weights.")
if not fit_params:
fit_params = [None] * y.shape[1]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_estimator)(
self.estimator, X, y[:, i], sample_weight, **fit_params[i]
)
for i in range(y.shape[1])
)
self.classes_ = []
for estimator in self.estimators_:
self.classes_.extend(estimator.classes_)
if hasattr(self.estimators_[0], "n_features_in_"):
self.n_features_in_ = self.estimators_[0].n_features_in_
if hasattr(self.estimators_[0], "feature_names_in_"):
self.feature_names_in_ = self.estimators_[0].feature_names_in_
return self
|
|
24,472 | 111,721 | 54 | nni/retiarii/nn/pytorch/api.py | 14 | 7 | def inner_choices(self) -> Iterable['ValueChoice']:
for arg in self.arguments:
if isinstance(arg, ValueChoiceX):
yield from arg.inner_choices()
| Composition of `ValueChoice` (#4435) | inner_choices | a36dc07e8d39ec4438fd660c98f6f4551ff5f4a6 | nni | api.py | 12 | 9 | https://github.com/microsoft/nni.git | 3 | 33 | 0 | 14 | 56 | Python | {
"docstring": "\n Return an iterable of all leaf value choices.\n Useful for composition of value choices.\n No deduplication on labels. Mutators should take care.\n ",
"language": "en",
"n_whitespaces": 51,
"n_words": 22,
"vocab_size": 19
} | def inner_choices(self) -> Iterable['ValueChoice']:
for arg in self.arguments:
if isinstance(arg, ValueChoiceX):
yield from arg.inner_choices()
|
|
49,661 | 200,455 | 451 | sympy/tensor/index_methods.py | 152 | 29 | def get_indices(expr):
# We call ourself recursively to d | Fix various typos
Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet` | get_indices | 24f1e7730119fe958cc8e28411f790c9a5ec04eb | sympy | index_methods.py | 16 | 30 | https://github.com/sympy/sympy.git | 13 | 186 | 0 | 102 | 311 | Python | {
"docstring": "Determine the outer indices of expression ``expr``\n\n By *outer* we mean indices that are not summation indices. Returns a set\n and a dict. The set contains outer indices and the dict contains\n information about index symmetries.\n\n Examples\n ========\n\n >>> from sympy.tensor.index_methods import get_indices\n >>> from sympy import symbols\n >>> from sympy.tensor import IndexedBase\n >>> x, y, A = map(IndexedBase, ['x', 'y', 'A'])\n >>> i, j, a, z = symbols('i j a z', integer=True)\n\n The indices of the total expression is determined, Repeated indices imply a\n summation, for instance the trace of a matrix A:\n\n >>> get_indices(A[i, i])\n (set(), {})\n\n In the case of many terms, the terms are required to have identical\n outer indices. Else an IndexConformanceException is raised.\n\n >>> get_indices(x[i] + A[i, j]*y[j])\n ({i}, {})\n\n :Exceptions:\n\n An IndexConformanceException means that the terms ar not compatible, e.g.\n\n >>> get_indices(x[i] + y[j]) #doctest: +SKIP\n (...)\n IndexConformanceException: Indices are not consistent: x(i) + y(j)\n\n .. warning::\n The concept of *outer* indices applies recursively, starting on the deepest\n level. This implies that dummies inside parenthesis are assumed to be\n summed first, so that the following expression is handled gracefully:\n\n >>> get_indices((x[i] + A[i, j]*y[j])*x[j])\n ({i, j}, {})\n\n This is correct and may appear convenient, but you need to be careful\n with this as SymPy will happily .expand() the product, if requested. The\n resulting expression would mix the outer ``j`` with the dummies inside\n the parenthesis, which makes it a different expression. To be on the\n safe side, it is best to avoid such ambiguities by using unique indices\n for all contractions that should be held separate.\n\n ",
"language": "en",
"n_whitespaces": 433,
"n_words": 263,
"vocab_size": 172
} | def get_indices(expr):
# We call ourself recursively to determine indices of sub expressions.
# break recursion
if isinstance(expr, Indexed):
c = expr.indices
inds, dummies = _remove_repeated(c)
return inds, {}
elif expr is None:
return set(), {}
elif isinstance(expr, Idx):
return {expr}, {}
elif expr.is_Atom:
return set(), {}
# recurse via specialized functions
else:
if expr.is_Mul:
return _get_indices_Mul(expr)
elif expr.is_Add:
return _get_indices_Add(expr)
elif expr.is_Pow or isinstance(expr, exp):
return _get_indices_Pow(expr)
elif isinstance(expr, Piecewise):
# FIXME: No support for Piecewise yet
return set(), {}
elif isinstance(expr, Function):
# Support ufunc like behaviour by returning indices from arguments.
# Functions do not interpret repeated indices across arguments
# as summation
ind0 = set()
for arg in expr.args:
ind, sym = get_indices(arg)
ind0 |= ind
return ind0, sym
# this test is expensive, so it should be at the end
elif not expr.has(Indexed):
return set(), {}
raise NotImplementedError(
"FIXME: No specialized handling of type %s" % type(expr))
|
|
12,478 | 61,265 | 38 | .venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py | 16 | 6 | def dist_location(dist):
# type: (Distribution) -> str
egg_link = egg_link_path(dist)
if egg_link:
return normalize_path(egg_lin | upd; format | dist_location | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | misc.py | 9 | 5 | https://github.com/jindongwang/transferlearning.git | 2 | 27 | 0 | 15 | 48 | Python | {
"docstring": "\n Get the site-packages location of this distribution. Generally\n this is dist.location, except in the case of develop-installed\n packages, where dist.location is the source code location, and we\n want to know where the egg-link file is.\n\n The returned location is normalized (in particular, with symlinks removed).\n ",
"language": "en",
"n_whitespaces": 64,
"n_words": 45,
"vocab_size": 36
} | def dist_location(dist):
# type: (Distribution) -> str
egg_link = egg_link_path(dist)
if egg_link:
return normalize_path(egg_link)
return normalize_path(dist.location)
|
|
39,618 | 164,908 | 119 | pandas/_testing/_io.py | 41 | 8 | def can_connect(url, error_classes=None):
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url, time | TST: Check network URL statuses in tests (#45949) | can_connect | 4bc68b39511fdf1dffe91bd315ffee9565b90d1a | pandas | _io.py | 13 | 11 | https://github.com/pandas-dev/pandas.git | 4 | 52 | 0 | 33 | 92 | Python | {
"docstring": "\n Try to connect to the given url. True if succeeds, False if OSError\n raised\n\n Parameters\n ----------\n url : basestring\n The URL to try to connect to\n\n Returns\n -------\n connectable : bool\n Return True if no OSError (unable to connect) or URLError (bad url) was\n raised\n ",
"language": "en",
"n_whitespaces": 94,
"n_words": 45,
"vocab_size": 33
} | def can_connect(url, error_classes=None):
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url, timeout=20) as response:
# Timeout just in case rate-limiting is applied
if response.status != 200:
return False
except error_classes:
return False
else:
return True
# ------------------------------------------------------------------
# File-IO
|
|
19,247 | 95,842 | 448 | tests/sentry/incidents/endpoints/test_serializers.py | 73 | 46 | def test_valid_slack_channel_id(self):
integration = Integration.objects.create(
external_id="1",
provider="slack",
metadata={"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"},
)
integration.add_organization(self.organization, self.user)
base_params = self.valid_params.copy()
base_params.update(
{
"type": AlertRuleTriggerAction.get_registered_type(
AlertRuleTriggerAction.Type.SLACK
).slug,
"targetType": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SPECIFIC
],
"targetIdentifier": "merp",
"integration": str(integration.id),
}
)
context = self.context.copy()
context.update({"input_channel_id": "CSVK0921"})
responses.add(
method=responses.GET,
url="https:// | ref(serializers): Split up large file (#31359) | test_valid_slack_channel_id | efb962b72c649c18c466afae41722384d111824b | sentry | test_serializers.py | 15 | 36 | https://github.com/getsentry/sentry.git | 1 | 210 | 0 | 63 | 360 | Python | {
"docstring": "\n Test that when a valid Slack channel ID is provided, we look up the channel name and validate it against the targetIdentifier.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 22,
"vocab_size": 20
} | def test_valid_slack_channel_id(self):
integration = Integration.objects.create(
external_id="1",
provider="slack",
metadata={"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"},
)
integration.add_organization(self.organization, self.user)
base_params = self.valid_params.copy()
base_params.update(
{
"type": AlertRuleTriggerAction.get_registered_type(
AlertRuleTriggerAction.Type.SLACK
).slug,
"targetType": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.SPECIFIC
],
"targetIdentifier": "merp",
"integration": str(integration.id),
}
)
context = self.context.copy()
context.update({"input_channel_id": "CSVK0921"})
responses.add(
method=responses.GET,
url="https://slack.com/api/conversations.info",
status=200,
content_type="application/json",
body=json.dumps({"ok": "true", "channel": {"name": "merp", "id": "CSVK0921"}}),
)
serializer = AlertRuleTriggerActionSerializer(context=context, data=base_params)
assert serializer.is_valid()
serializer.save()
# # Make sure the action was created.
alert_rule_trigger_actions = list(
AlertRuleTriggerAction.objects.filter(integration=integration)
)
assert len(alert_rule_trigger_actions) == 1
|
|
56,520 | 221,804 | 194 | python3.10.4/Lib/ctypes/_aix.py | 77 | 9 | def get_legacy(members):
if AIX_ABI == 64:
# AIX 64-bit member is | add python 3.10.4 for windows | get_legacy | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _aix.py | 15 | 12 | https://github.com/XX-net/XX-Net.git | 5 | 59 | 0 | 54 | 103 | Python | {
"docstring": "\n This routine provides historical aka legacy naming schemes started\n in AIX4 shared library support for library members names.\n e.g., in /usr/lib/libc.a the member name shr.o for 32-bit binary and\n shr_64.o for 64-bit binary.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 33,
"vocab_size": 29
} | def get_legacy(members):
if AIX_ABI == 64:
# AIX 64-bit member is one of shr64.o, shr_64.o, or shr4_64.o
expr = r'shr4?_?64\.o'
member = get_one_match(expr, members)
if member:
return member
else:
# 32-bit legacy names - both shr.o and shr4.o exist.
# shr.o is the preferred name so we look for shr.o first
# i.e., shr4.o is returned only when shr.o does not exist
for name in ['shr.o', 'shr4.o']:
member = get_one_match(re.escape(name), members)
if member:
return member
return None
|
|
54,425 | 216,136 | 542 | salt/states/iptables.py | 176 | 12 | def set_policy(name, table="filter", family="ipv4", **kwargs):
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
if (
__salt__["iptables.get_policy"](table, kwargs["chain"], family)
== kwargs["policy"]
):
ret["result"] = True
ret[
"comment"
] = "iptables default policy for chain {} on table {} for {} already set to {}".format(
kwargs["chain"], table, family, kwargs["policy"]
)
return ret
if __opts__["test"]:
ret["comment"] = (
"iptables default policy for chain {} on table {} for {} needs to be set"
" to {}".format(kwargs["chain"], table, family, kwargs["policy"])
)
return ret
if not __salt__["iptables.set_policy"](
table, kwargs["chain"], kwargs["policy"], family
):
ret["changes"] = {"locale": name}
ret["result"] = True
ret | salt.states.iptables: Document the save parameter
The examples mention this, but the reference documentation did not,
and it isn't obvious from the example that minimal installations of
some operating systems (in particular Debian) don't have all the
necessary packages to make it effective, even if iptables itself is
installed.
Signed-off-by: Simon McVittie <smcv@collabora.com> | set_policy | 497ebde11325333cf8e1c0e4eeec185a55fb6ace | salt | iptables.py | 14 | 46 | https://github.com/saltstack/salt.git | 9 | 281 | 0 | 83 | 491 | Python | {
"docstring": "\n .. versionadded:: 2014.1.0\n\n Sets the default policy for iptables firewall tables\n\n table\n The table that owns the chain that should be modified\n\n family\n Networking family, either ipv4 or ipv6\n\n policy\n The requested table policy\n\n save\n If set to a true value, the new iptables rules for the given family\n will be saved to a file. See the ``append`` state for more details.\n\n ",
"language": "en",
"n_whitespaces": 119,
"n_words": 62,
"vocab_size": 45
} | def set_policy(name, table="filter", family="ipv4", **kwargs):
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
if (
__salt__["iptables.get_policy"](table, kwargs["chain"], family)
== kwargs["policy"]
):
ret["result"] = True
ret[
"comment"
] = "iptables default policy for chain {} on table {} for {} already set to {}".format(
kwargs["chain"], table, family, kwargs["policy"]
)
return ret
if __opts__["test"]:
ret["comment"] = (
"iptables default policy for chain {} on table {} for {} needs to be set"
" to {}".format(kwargs["chain"], table, family, kwargs["policy"])
)
return ret
if not __salt__["iptables.set_policy"](
table, kwargs["chain"], kwargs["policy"], family
):
ret["changes"] = {"locale": name}
ret["result"] = True
ret["comment"] = "Set default policy for {} to {} family {}".format(
kwargs["chain"], kwargs["policy"], family
)
if "save" in kwargs and kwargs["save"]:
if kwargs["save"] is not True:
filename = kwargs["save"]
else:
filename = None
__salt__["iptables.save"](filename=filename, family=family)
ret[
"comment"
] = "Set and saved default policy for {} to {} family {}".format(
kwargs["chain"], kwargs["policy"], family
)
return ret
else:
ret["result"] = False
ret["comment"] = "Failed to set iptables default policy"
return ret
|
|
117,938 | 321,839 | 99 | tests/end2end/fixtures/quteprocess.py | 20 | 12 | def _after_start(self):
delay = self.request.config.getoption('--qute-delay-start')
if delay:
with self.disable_capturing():
print(f"- waiting {delay}ms for quteprocess "
f"(PID: {self.proc.processId()})...")
time. | quteprocess: Add --qute-delay-start
Allows for some rudimentary debugging of subprocesses. | _after_start | 496c14bc9e0afb6c6787a0a167a1cb623ce5e2ff | qutebrowser | quteprocess.py | 17 | 7 | https://github.com/qutebrowser/qutebrowser.git | 2 | 43 | 0 | 20 | 97 | Python | {
"docstring": "Wait before continuing if requested, e.g. for debugger attachment.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _after_start(self):
delay = self.request.config.getoption('--qute-delay-start')
if delay:
with self.disable_capturing():
print(f"- waiting {delay}ms for quteprocess "
f"(PID: {self.proc.processId()})...")
time.sleep(delay / 1000)
|
|
54,326 | 216,018 | 192 | salt/modules/vault.py | 66 | 19 | def list_secrets(path, default=None):
if default is None:
default = CommandExecutionError
log.debug("Listing vault secret keys for %s in %s", __grains__["id"], path)
version2 = __utils__["vault.is_v2"](path)
if version2["v2"]:
path = version2["metadata"]
try:
url = "v1/{}".format(path)
response = __utils__["vault.make_request"]("LIST", url)
if response.status_code != 200:
response | Don't pass Error as default value in vault.py | list_secrets | 681ea37f212619266424f00173d0affa27002071 | salt | vault.py | 17 | 19 | https://github.com/saltstack/salt.git | 6 | 123 | 0 | 54 | 215 | Python | {
"docstring": "\n .. versionchanged:: 3001\n The ``default`` argument has been added. When the path or path/key\n combination is not found, an exception will be raised, unless a default\n is provided.\n\n List secret keys at the path in vault. The vault policy used must allow this.\n The path should end with a trailing slash.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' vault.list_secrets \"secret/my/\"\n ",
"language": "en",
"n_whitespaces": 111,
"n_words": 60,
"vocab_size": 52
} | def list_secrets(path, default=None):
if default is None:
default = CommandExecutionError
log.debug("Listing vault secret keys for %s in %s", __grains__["id"], path)
version2 = __utils__["vault.is_v2"](path)
if version2["v2"]:
path = version2["metadata"]
try:
url = "v1/{}".format(path)
response = __utils__["vault.make_request"]("LIST", url)
if response.status_code != 200:
response.raise_for_status()
return response.json()["data"]
except Exception as err: # pylint: disable=broad-except
if default is CommandExecutionError:
raise CommandExecutionError(
"Failed to list secrets! {}: {}".format(type(err).__name__, err)
)
return default
|
|
@RunIf(skip_windows=True, fairscale=True)
@mock.patch("pytorch_lightning.strategies.DDPShardedStrategy._wrap_optimizers", autospec=True)
@pytest.mark.parametrize(["params", "expected_buffer_size"], [(dict(), 0), (dict(reduce_buffer_size=128), 128)])
@pytest.mark.parametrize("num_nodes", [1, 2]) | 69,609 | 241,584 | 79 | tests/strategies/test_sharded_strategy.py | 49 | 29 | def test_custom_kwargs_sharded(tmpdir, cls):
strategy = cls(reduce_fp16=True)
strategy.model = Mock(spec=LightningModule)
strategy.model.trainer = Mock()
class_name = "sharded" if isinstance(strategy, DDPShardedStrategy) else "sharded_spawn"
with mock.patch(f"pytorch_lightning.strategies.{class_name}.ShardedDataParallel", autosp | Rename training plugin test files & names to strategy (#11303) | test_custom_kwargs_sharded | 650c710efacd633fa283955145342bb64063c883 | lightning | test_sharded_strategy.py | 12 | 10 | https://github.com/Lightning-AI/lightning.git | 2 | 83 | 1 | 42 | 257 | Python | {
"docstring": "Tests to ensure that if custom kwargs are passed, they are set correctly.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 12
} | def test_custom_kwargs_sharded(tmpdir, cls):
strategy = cls(reduce_fp16=True)
strategy.model = Mock(spec=LightningModule)
strategy.model.trainer = Mock()
class_name = "sharded" if isinstance(strategy, DDPShardedStrategy) else "sharded_spawn"
with mock.patch(f"pytorch_lightning.strategies.{class_name}.ShardedDataParallel", autospec=True) as mock_sharded:
strategy.configure_ddp()
args, kwargs = mock_sharded.call_args
assert "reduce_fp16" in kwargs
assert kwargs["reduce_fp16"]
@RunIf(skip_windows=True, fairscale=True)
@mock.patch("pytorch_lightning.strategies.DDPShardedStrategy._wrap_optimizers", autospec=True)
@pytest.mark.parametrize(["params", "expected_buffer_size"], [(dict(), 0), (dict(reduce_buffer_size=128), 128)])
@pytest.mark.parametrize("num_nodes", [1, 2]) |
"""
# This file was generated by 'versioneer.py' (0.21) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import | 47,323 | 195,608 | 170 | versioneer.py | 58 | 21 | def versions_from_parentdir(parentdir_prefix, root, verbose):
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parent | add auto tag | versions_from_parentdir | f0194812568c83585ff09488fe7f67df300938cc | rembg | versioneer.py | 15 | 14 | https://github.com/danielgatis/rembg.git | 4 | 106 | 1 | 51 | 199 | Python | {
"docstring": "Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory\n \n# This file was generated by 'versioneer.py' (0.21) from\n# revision-control system data, or from the parent directory name of an\n# unpacked source archive. Distribution tarballs contain a pre-generated copy\n# of this file.\n\nimport json\n\nversion_json = # END VERSION_JSON\n\n",
"language": "en",
"n_whitespaces": 92,
"n_words": 84,
"vocab_size": 62
} | def versions_from_parentdir(parentdir_prefix, root, verbose):
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY =
%s
|
50,354 | 203,405 | 1,211 | django/contrib/admin/options.py | 248 | 51 | def response_add(self, request, obj, post_url_continue=None):
opts = obj._meta
preserved_filters = self.get_preserved_filters(request)
obj_url = reverse(
"admin:%s_%s_change" % (opts.app_label, opts.model_name),
args=(quote(obj.pk),),
current_app=self.admin_site.name,
)
# Add a link to the object's change form if the user can edit the obj.
if self.has_change_permission(request, obj):
obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj)
else:
obj_repr = str(obj)
msg_dict = {
"name": opts.verbose_name,
"obj": obj_repr,
}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
popup_response_data = json.dumps(
{
"value": str(value),
"obj": str(obj),
}
)
return TemplateResponse(
request,
self.popup_response_template
or [
"admin/%s/%s/popup_response.html"
% (opt | Refs #33476 -- Reformatted code with Black. | response_add | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | options.py | 15 | 77 | https://github.com/django/django.git | 12 | 410 | 0 | 152 | 665 | Python | {
"docstring": "\n Determine the HttpResponse for the add_view stage.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 6
} | def response_add(self, request, obj, post_url_continue=None):
opts = obj._meta
preserved_filters = self.get_preserved_filters(request)
obj_url = reverse(
"admin:%s_%s_change" % (opts.app_label, opts.model_name),
args=(quote(obj.pk),),
current_app=self.admin_site.name,
)
# Add a link to the object's change form if the user can edit the obj.
if self.has_change_permission(request, obj):
obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj)
else:
obj_repr = str(obj)
msg_dict = {
"name": opts.verbose_name,
"obj": obj_repr,
}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
popup_response_data = json.dumps(
{
"value": str(value),
"obj": str(obj),
}
)
return TemplateResponse(
request,
self.popup_response_template
or [
"admin/%s/%s/popup_response.html"
% (opts.app_label, opts.model_name),
"admin/%s/popup_response.html" % opts.app_label,
"admin/popup_response.html",
],
{
"popup_response_data": popup_response_data,
},
)
elif "_continue" in request.POST or (
# Redirecting after "Save as new".
"_saveasnew" in request.POST
and self.save_as_continue
and self.has_change_permission(request, obj)
):
msg = _("The {name} “{obj}” was added successfully.")
if self.has_change_permission(request, obj):
msg += " " + _("You may edit it again below.")
self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS)
if post_url_continue is None:
post_url_continue = obj_url
post_url_continue = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts},
post_url_continue,
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was added successfully. You may add another {name} below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_("The {name} “{obj}” was added successfully."), **msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
|
|
117,110 | 320,280 | 61 | src/paperless_mail/tests/test_parsers.py | 26 | 8 | def test_tika_parse_unreachable(self):
html = '<html><he | add test comments | test_tika_parse_unreachable | 4aa318598fd0dc6c5d4e08dd2a13e7bf614511ec | paperless-ngx | test_parsers.py | 9 | 4 | https://github.com/paperless-ngx/paperless-ngx.git | 1 | 30 | 0 | 25 | 54 | Python | {
"docstring": "\n GIVEN:\n - Fresh start\n WHEN:\n - tika parsing is called but tika is not available\n THEN:\n - a ParseError Exception is thrown\n ",
"language": "en",
"n_whitespaces": 84,
"n_words": 22,
"vocab_size": 17
} | def test_tika_parse_unreachable(self):
html = '<html><head><meta http-equiv="content-type" content="text/html; charset=UTF-8"></head><body><p>Some Text</p></body></html>'
# Check if exception is raised when Tika cannot be reached.
self.parser.tika_server = ""
self.assertRaises(ParseError, self.parser.tika_parse, html)
|
|
43,543 | 181,757 | 17 | tests/tpot_tests.py | 8 | 6 | def test_read_config_file_2():
tpot_obj = TPOTRegressor()
assert_raises(ValueError, tpot | Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068. | test_read_config_file_2 | 388616b6247ca4ea8de4e2f340d6206aee523541 | tpot | tpot_tests.py | 8 | 3 | https://github.com/EpistasisLab/tpot.git | 1 | 20 | 0 | 8 | 37 | Python | {
"docstring": "Assert that _read_config_file rasies ValueError with wrong dictionary format",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_read_config_file_2():
tpot_obj = TPOTRegressor()
assert_raises(ValueError, tpot_obj._read_config_file, "tests/test_config.py.bad")
|
|
78,295 | 266,105 | 70 | netbox/netbox/staging.py | 28 | 17 | def pre_delete_handler(self, sender, instance, **kwargs):
key = self.get_key_for_instance(instance)
object_type = instance._meta.verbose_name
# Delete an existing object
logger.debug(f"[{self.branch}] Staging deletion of {object_type} {instance} (PK: {instance.pk})")
self.queue[key] = (ChangeActionChoices.ACTION_DELETE, Non | Closes #10851: New staging mechanism (#10890)
* WIP
* Convert checkout() context manager to a class
* Misc cleanup
* Drop unique constraint from Change model
* Extend staging tests
* Misc cleanup
* Incorporate M2M changes
* Don't cancel wipe out creation records when an object is deleted
* Rename Change to StagedChange
* Add documentation for change staging | pre_delete_handler | a5308ea28e851a4ddb65a4e7ca2297b641e5891f | netbox | staging.py | 10 | 5 | https://github.com/netbox-community/netbox.git | 1 | 49 | 0 | 26 | 100 | Python | {
"docstring": "\n Hooks to the pre_delete signal when a branch is active to queue delete actions.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 13
} | def pre_delete_handler(self, sender, instance, **kwargs):
key = self.get_key_for_instance(instance)
object_type = instance._meta.verbose_name
# Delete an existing object
logger.debug(f"[{self.branch}] Staging deletion of {object_type} {instance} (PK: {instance.pk})")
self.queue[key] = (ChangeActionChoices.ACTION_DELETE, None)
|
|
28,421 | 127,346 | 98 | python/ray/serve/experimental/gradio_visualize_graph.py | 42 | 11 | def _reset_state(self):
self.cache = {}
| [serve] Visualize Deployment Graph with Gradio (#27897) | _reset_state | 4c970cc88247f7cfa7351297b8b5050f2372742e | ray | gradio_visualize_graph.py | 8 | 6 | https://github.com/ray-project/ray.git | 1 | 48 | 0 | 27 | 79 | Python | {
"docstring": "Resets state for each new RayServeHandle representing a new DAG.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def _reset_state(self):
self.cache = {}
self.resolved_nodes = 0
self.finished_last_inference = True
# maps DAGNode uuid to unique instance of a gradio block
self.node_to_block: Dict[DAGNode, Any] = {}
# maps InputAttributeNodes to unique instance of interactive gradio block
self.input_key_to_blocks: Dict[int, Any] = {}
|
|
7,318 | 40,109 | 20 | dash/testing/browser.py | 6 | 6 | def find_element(self, selector):
| :hocho: deprecated find_element(s)_by_css_selector | find_element | 5dfa6b0782803cb0635119ee1dcf8775dd76c8a7 | dash | browser.py | 8 | 2 | https://github.com/plotly/dash.git | 1 | 21 | 0 | 6 | 34 | Python | {
"docstring": "find_element returns the first found element by the css `selector`\n shortcut to `driver.find_element(By.CSS_SELECTOR, ...)`.",
"language": "en",
"n_whitespaces": 20,
"n_words": 14,
"vocab_size": 13
} | def find_element(self, selector):
return self.driver.find_element(By.CSS_SELECTOR, selector)
|
|
26,265 | 118,518 | 20 | lib/tests/streamlit/caching/memo_test.py | 6 | 5 | def test_bad_persist_value(self):
with self.assertRaises(StreamlitAPIException) as e:
| st.memo/singleton: cache-specific clear() functionality (#4184)
Gives `@st.memo` and `@st.singleton` a per-cache `clear()` function, similar to Python's [functools.lru_cache API](https://docs.python.org/3/library/functools.html#functools.lru_cache).
You can use it like this:
```python
@st.experimental_memo
def foo(val):
return val
foo(1), foo(2), foo(3)
foo.clear() # Clear foo's cache *only*
```
(This PR also bundles in a few very minor cleanups and missing docstrings for memo + singleton). | test_bad_persist_value | b7f417f86ed4ca12c522d8ae5c147f932ffc7d40 | streamlit | memo_test.py | 10 | 8 | https://github.com/streamlit/streamlit.git | 1 | 41 | 0 | 6 | 32 | Python | {
"docstring": "Throw an error if an invalid value is passed to 'persist'.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | def test_bad_persist_value(self):
with self.assertRaises(StreamlitAPIException) as e:
|
|
42,847 | 178,863 | 2,090 | nuitka/OptionParsing.py | 859 | 33 | def _getDataFileTagsOptionHelp():
return % ", ".join(
"'%s' (%s)" % d for d in data_files_tags
)
data_file_tags_option = data_group.add_option(
"--data-file-tags",
action="append",
dest="data_file_tags",
metavar="DATA_TAGS",
default=[],
)
parser.add_option_group(data_group)
execute_group = OptionGroup(parser, "Immediate execution after compilation")
execute_group.add_option(
"--run",
action="store_true",
dest="immediate_execution",
default=is_nuitka_run,
help=
% ("on" if is_nuitka_run else "off"),
)
execute_group.add_option(
"--debugger",
"--gdb",
action="store_true",
dest="debugger",
default=False,
help=,
)
execute_group.add_option(
"--execute-with-pythonpath",
action="store_true",
dest="keep_pythonpath",
default=False,
help=,
)
parser.add_option_group(execute_group)
dump_group = OptionGroup(parser, "Dump options for internal tree")
dump_group.add_option(
"--xml",
action="store_true",
dest="dump_xml",
default=False,
help="Dump the final result of optimization as XML, then exit.",
)
parser.add_option_group(dump_group)
codegen_group = OptionGroup(parser, "Code generation choices")
codegen_group.add_option(
"--disable-bytecode-cache",
action="store_true",
dest="disable_bytecode_cache",
default=False,
help=,
)
codegen_group.add_option(
"--full-compat",
action="store_false",
dest="improved",
default=True,
help=,
)
codegen_group.add_option(
"--file-reference-choice",
action="store",
dest="file_reference_mode",
metavar="MODE",
choices=("original", "runtime", "frozen"),
default=None,
help=,
)
codegen_group.add_option(
"--module-name-choice",
action="store",
dest="module_name_mode",
metavar="MODE",
choices=("original", "runtime"),
default=None,
help=,
)
parser.add_option_group(codegen_group)
output_group = OptionGroup(parser, "Output choices")
output_group.add_option(
"-o",
action="store",
dest="output_filename",
metavar="FILENAME",
default=None,
help=
% "<program_name>"
+ (".exe" if getOS() == "Windows" else ".bin"),
)
output_group.add_option(
"--output-dir",
action="store",
dest="output_dir",
metavar="DIRECTORY",
default="",
help=,
)
output_group.add_option(
"--remove-output",
action="store_true",
dest="remove_build",
default=False,
help=,
)
output_group.add_option(
"--no-pyi-file",
action="store_false",
dest="pyi_file",
default=True,
help=,
)
parser.add_option_group(output_group)
debug_group = OptionGroup(parser, "Debug features")
debug_group.add_option(
"--debug",
action="store_true",
dest="debug",
default=False,
help=,
)
debug_group.add_option(
"--unstripped",
action="store_true",
dest="unstripped",
default=False,
help=,
)
debug_group.add_option(
"--profile",
action="store_true",
dest="profile",
default=False,
help=,
)
debug_group.add_option(
"--internal-graph",
action="store_true",
dest="graph",
default=False,
help=,
)
debug_group.add_option(
"--trace-execution",
action="store_true",
dest="trace_execution",
default=False,
help=,
)
debug_group.add_option(
"--recompile-c-only",
action="store_true",
dest="recompile_c_only",
default=False,
help=,
)
debug_group.add_option(
"--generate-c-only",
action="store_true",
dest="generate_c_only",
default=False,
help=,
)
debug_group.add_option(
"--experimental",
action="append",
dest="experimental",
metavar="FLAG",
default=[],
help=,
)
debug_group.add_option(
"--explain-imports",
action="store_true",
dest="explain_imports",
default=False,
help=SUPPRESS_HELP,
)
debug_group.add_option(
"--low-memory",
action="store_true",
dest="low_memory",
default=False,
help=,
)
if os.name == "nt":
debug_group.add_option(
"--disable-dll-dependency-cache",
action="store_true",
dest="no_dependency_cache",
default=False,
help=,
)
debug_group.add_option(
"--force-dll-dependency-cache-update",
action="store_true",
dest="update_dependency_cache",
default=False,
help=,
)
# This is for testing framework, "coverage.py" hates to loose the process. And
# we can use it to make sure it's not done unknowingly.
parser.add_option(
"--must-not-re-execute",
action="store_false",
dest="allow_reexecute",
default=True,
help=SUPPRESS_HELP,
)
parser.add_option_group(debug_group)
c_compiler_group = OptionGroup(parser, "Backend C compiler choice")
c_compiler_group.add_option(
"--clang",
action="store_true",
dest="clang",
default=False,
help=,
)
c_compiler_group.add_option(
"--mingw64",
action="store_true",
dest="mingw64",
default=False,
help=,
)
c_compiler_group.add_option(
"--msvc",
action="store",
dest="msvc_version",
default=None,
help=,
)
c_compiler_group.add_option(
"-j",
"--jobs",
action="store",
dest="jobs",
metavar="N",
default=None,
help=,
)
c_compiler_group.add_option(
"--lto",
action="store",
dest="lto",
metavar="choice",
default="auto",
choices=("yes", "no", "auto"),
help=,
)
c_compiler_group.add_option(
"--static-libpython",
action="store",
dest="static_libpython",
metavar="choice",
default="auto",
choices=("yes", "no", "auto"),
help=,
)
c_compiler_group.add_option(
"--disable-ccache",
action="store_true",
dest="disable_ccache",
default=False,
help=,
)
parser.add_option_group(c_compiler_group)
pgo_group = OptionGroup(parser, "PGO compilation choices")
pgo_group.add_option(
"--pgo",
action="store_true",
dest="is_c_pgo",
default=False,
help=,
)
pgo_group.add_option(
"--pgo-python",
action="store_true",
dest="is_python_pgo",
default=False,
help=SUPPRESS_HELP,
)
pgo_group.add_option(
"--pgo-python-input",
action="store",
dest="python_pgo_input",
default=None,
help=SUPPRESS_HELP,
)
pgo_group.add_option(
"--pgo-python-policy-unused-module",
action="store",
dest="python_pgo_policy_unused_module",
choices=("include", "exclude", "bytecode"),
default="include",
help=SUPPRESS_HELP,
)
pgo_group.add_option(
"--pgo-args",
action="store",
dest="pgo_args",
default="",
help=,
)
pgo_group.add_option(
"--pgo-executable",
action="store",
dest="pgo_executable",
default=None,
help=,
)
parser.add_option_group(pgo_group)
tracing_group = OptionGroup(parser, "Tracing features")
tracing_group.add_option(
"--quiet",
action="store_true",
dest="quiet",
default=False,
help=,
)
tracing_group.add_option(
"--show-scons",
action="store_true",
dest="show_scons",
default=False,
help=,
)
tracing_group.add_option(
"--show-progress",
action="store_true",
dest="show_progress",
default=False,
help=,
)
tracing_group.add_option(
"--no-progressbar",
action="store_false",
dest="progress_bar",
default=True,
help=,
)
tracing_group.add_option(
"--show-memory",
action="store_true",
dest="show_memory",
default=False,
help=,
)
tracing_group.add_option(
"--show-modules",
action="store_true",
dest="show_inclusion",
default=False,
help=,
)
tracing_group.add_option(
"--show-modules-output",
action="store",
dest="show_inclusion_output",
metavar="PATH",
default=None,
help=,
)
tracing_group.add_option(
"--report",
action="store",
dest="compilation_report_filename",
default=None,
help=,
)
tracing_group.add_option(
"--verbose",
action="store_true",
dest="verbose",
default=False,
help=,
)
tracing_group.add_option(
"--verbose-output",
action="store",
dest="verbose_output",
metavar="PATH",
default=None,
help=,
)
parser.add_option_group(tracing_group)
windows_group = OptionGroup(parser, "Windows specific controls")
windows_group.add_option(
"--windows-dependency-tool",
action="store",
dest="dependency_tool",
default=None,
help=SUPPRESS_HELP,
)
windows_group.add_option(
"--windows-disable-console",
action="store_true",
dest="disable_console",
default=False,
help=,
)
windows_group.add_option(
"--windows-icon-from-ico",
action="append",
dest="icon_path",
metavar="ICON_PATH",
default=[],
help=,
)
windows_group.add_option(
"--windows-icon-from-exe",
action="store",
dest="icon_exe_path",
metavar="ICON_EXE_PATH",
default=None,
help="Copy executable icons from this existing executable (Windows only).",
)
windows_group.add_option(
"--onefile-windows-splash-screen-image",
action="store",
dest="splash_screen_image",
default=None,
help=,
)
windows_group.add_option(
"--windows-uac-admin",
action="store_true",
dest="windows_uac_admin",
metavar="WINDOWS_UAC_ADMIN",
default=False,
help="Request Windows User Control, to grant admin rights on execution. (Windows only). Defaults to off.",
)
windows_group.add_option(
"--windows-uac-uiaccess",
action="store_true",
dest="windows_uac_uiaccess",
metavar="WINDOWS_UAC_UIACCESS",
default=False,
help=,
)
windows_group.add_option(
"--windows-company-name",
action="store",
dest="windows_company_name",
metavar="WINDOWS_COMPANY_NAME",
default=None,
help=,
)
windows_group.add_option(
"--windows-product-name",
action="store",
dest="windows_product_name",
metavar="WINDOWS_PRODUCT_NAME",
default=None,
help=,
)
windows_group.add_option(
"--windows-file-version",
action="store",
dest="windows_file_version",
metavar="WINDOWS_FILE_VERSION",
default=None,
help=,
)
windows_group.add_option(
"--windows-product-version",
action="store",
dest="windows_product_version",
metavar="WINDOWS_PRODUCT_VERSION",
default=None,
help=,
)
windows_group.add_option(
"--windows-file-description",
action="store",
dest="windows_ | Plugins: Added ability to provide data file tags | _getDataFileTagsOptionHelp | e940705671f341139487d79b0eb0b6b9104f0a71 | Nuitka | OptionParsing.py | 12 | 9 | https://github.com/Nuitka/Nuitka.git | 2 | 19 | 0 | 399 | 3,971 | Python | {
"docstring": "\\\nFor included data files, special handlings can be chosen. With the\ncommercial plugins, e.g. files can be included directly in the\nbinary. The list is completed by some plugins. With the current\nlist of plugins, these are available: %s.\nThe default is empty.\\\nExecute immediately the created binary (or import the compiled module).\nDefaults to %s.\\\nExecute inside a debugger, e.g. \"gdb\" or \"lldb\" to automatically get a stack trace.\nDefaults to off.\\\nWhen immediately executing the created binary (--execute), don't reset\nPYTHONPATH. When all modules are successfully included, you ought to not need\nPYTHONPATH anymore.\\\nDo not reuse dependency analysis results for modules, esp. from standard library,\nthat are included as bytecode.\\\nEnforce absolute compatibility with CPython. Do not even allow minor\ndeviations from CPython behavior, e.g. not having better tracebacks\nor exception messages which are not really incompatible, but only\ndifferent. This is intended for tests only and should not be used\nfor normal use.\\\nSelect what value \"__file__\" is going to be. With \"runtime\" (default for\nstandalone binary mode and module mode), the created binaries and modules,\nuse the location of themselves to deduct the value of \"__file__\". Included\npackages pretend to be in directories below that location. This allows you\nto include data files in deployments. If you merely seek acceleration, it's\nbetter for you to use the \"original\" value, where the source files location\nwill be used. With \"frozen\" a notation \"<frozen module_name>\" is used. For\ncompatibility reasons, the \"__file__\" value will always have \".py\" suffix\nindependent of what it really is.\\\nSelect what value \"__name__\" and \"__package__\" are going to be. With \"runtime\"\n(default for module mode), the created module, it uses the parent package to\ndeduct the value of \"__package__\", to be fully compatible. This allows for more\noptimization to happen, but normally any package can be loaded into another one,\nbut this will raise an import error when it detects that with \"original\" mode.\\\nSpecify how the executable should be named. For extension modules there is no\nchoice, also not for standalone mode and using it will be an error. This may\ninclude path information that needs to exist though. Defaults to '%s' on this\nplatform.\n\\\nSpecify where intermediate and final output files should be put. The DIRECTORY\nwill be populated with C files, object files, etc.\nDefaults to current directory.\n\\\nRemoves the build directory after producing the module or exe file.\nDefaults to off.\\\nDo not create a \".pyi\" file for extension modules created by Nuitka. This is\nused to detect implicit imports.\nDefaults to off.\\\nExecuting all self checks possible to find errors in Nuitka, do not use for\nproduction. Defaults to off.\\\nKeep debug info in the resulting object file for better debugger interaction.\nDefaults to off.\\\nEnable vmprof based profiling of time spent. Not working currently. Defaults to off.\\\nCreate graph of optimization process internals, do not use for whole programs, but only\nfor small test cases. Defaults to off.\\\nTraced execution output, output the line of code before executing it.\nDefaults to off.\\\nThis is not incremental compilation, but for Nuitka development only. Takes\nexisting files and simply compile them as C again. Allows compiling edited\nC files for quick debugging changes to the generated source, e.g. to see if\ncode is passed by, values output, etc, Defaults to off. Depends on compiling\nPython source to determine which files it should look at.\\\nGenerate only C source code, and do not compile it to binary or module. This\nis for debugging and code coverage analysis that doesn't waste CPU. Defaults to\noff. Do not think you can use this directly.\\\nUse features declared as 'experimental'. May have no effect if no experimental\nfeatures are present in the code. Uses secret tags (check source) per\nexperimented feature.\\\nAttempt to use less memory, by forking less C compilation jobs and using\noptions that use less memory. For use on embedded machines. Use this in\ncase of out of memory problems. Defaults to off.\\\nDisable the dependency walker cache. Will result in much longer times to create\nthe distribution folder, but might be used in case the cache is suspect to cause\nerrors.\n\\\nFor an update of the dependency walker cache. Will result in much longer times\nto create the distribution folder, but might be used in case the cache is suspect\nto cause errors or known to need an update.\n\\\nEnforce the use of clang. On Windows this requires a working Visual\nStudio version to piggy back on. Defaults to off.\\\nEnforce the use of MinGW64 on Windows. Defaults to off.\\\nEnforce the use of specific MSVC version on Windows. Allowed values\nare e.g. \"14.3\" (MSVC 2022) and other MSVC version numbers, specify\n\"list\" for a list of installed compilers, or use \"latest\".\n\nDefaults to latest MSVC being used if installed, otherwise MinGW64\nis used.\\\nSpecify the allowed number of parallel C compiler jobs. Defaults to the\nsystem CPU count.\\\nUse link time optimizations (MSVC, gcc, clang). Allowed values are\n\"yes\", \"no\", and \"auto\" (when it's known to work). Defaults to\n\"auto\".\\\nUse static link library of Python. Allowed values are \"yes\", \"no\",\nand \"auto\" (when it's known to work). Defaults to \"auto\".\\\nDo not attempt to use ccache (gcc, clang, etc.) or clcache (MSVC, clangcl).\\\nEnables C level profile guided optimization (PGO), by executing a dedicated build first\nfor a profiling run, and then using the result to feedback into the C compilation.\nNote: This is experimental and not working with standalone modes of Nuitka yet.\nDefaults to off.\\\nArguments to be passed in case of profile guided optimization. These are passed to the special\nbuilt executable during the PGO profiling run. Default empty.\\\nCommand to execute when collecting profile information. Use this only, if you need to\nlaunch it through a script that prepares it to run. Default use created program.\\\nDisable all information outputs, but show warnings.\nDefaults to off.\\\nOperate Scons in non-quiet mode, showing the executed commands.\nDefaults to off.Provide progress information and statistics.\nDefaults to off.Disable progress bar outputs (if tqdm is installed).\nDefaults to off.Provide memory information and statistics.\nDefaults to off.\\\nProvide information for included modules and DLLs\nDefaults to off.\\\nWhere to output --show-modules, should be a filename. Default is standard output.\\\nReport module inclusion in an XML output file. Default is off.\\\nOutput details of actions taken, esp. in optimizations. Can become a lot.\nDefaults to off.\\\nWhere to output --verbose, should be a filename. Default is standard output.\\\nWhen compiling for Windows, disable the console window. Defaults to off.\\\nAdd executable icon. Can be given multiple times for different resolutions\nor files with multiple icons inside. In the later case, you may also suffix\nwith #<n> where n is an integer index starting from 1, specifying a specific\nicon to be included, and all others to be ignored.\\\nWhen compiling for Windows and onefile, show this while loading the application. Defaults to off.\\\nRequest Windows User Control, to enforce running from a few folders only, remote\ndesktop access. (Windows only). Defaults to off.\\\nName of the company to use in Windows Version information.\n\nOne of file or product version is required, when a version resource needs to be\nadded, e.g. to specify product name, or company name. Defaults to unused.\\\nName of the product to use in Windows Version information. Defaults to base\nfilename of the binary.\\\nFile version to use in Windows Version information. Must be a sequence of\nup to 4 numbers, e.g. 1.0.0.0, only this format is allowed.\nOne of file or product version is required, when a version resource needs to be\nadded, e.g. to specify product name, or company name. Defaults to unused.\\\nProduct version to use in Windows Version information. Must be a sequence of\nup to 4 numbers, e.g. 1.0.0.0, only this format is allowed.\nOne of file or product version is required, when a version resource needs to be\nadded, e.g. to specify product name, or company name. Defaults to unused.\\\nDescription of the file use in Windows Version information.\n\nOne of file or product version is required, when a version resource needs to be\nadded, e.g. to specify product name, or company name. Defaults to nonsense.\\\nUse this as a temporary folder. Defaults to '%TEMP%\\\\onefile_%PID%_%TIME%', i.e. system temporary directory.\\\nForce standard output of the program to go to this location. Useful for programs with\ndisabled console and programs using the Windows Services Plugin of Nuitka. Defaults\nto not active, use e.g. '%PROGRAM%.out.txt', i.e. file near your program.\\\nForce standard error of the program to go to this location. Useful for programs with\ndisabled console and programs using the Windows Services Plugin of Nuitka. Defaults\nto not active, use e.g. '%PROGRAM%.err.txt', i.e. file near your program.\\\nWhat architectures is this to supposed to run on. Default and limit\nis what the running Python allows for. Default is \"native\" which is\nthe architecture the Python is run with.\\\nWhen compiling for macOS, disable the console window and create a GUI\napplication. Defaults to off.\\\nWhen compiling for macOS, create a bundle rather than a plain binary\napplication. Currently experimental and incomplete. Currently this\nis the only way to unlock disabling of console.Defaults to off.\\\nName of the application to use for macOS signing. Follow com.yourcompany.appname naming\nresults for best results, as these have to be globally unique, and will grant protected\nAPI accesses.\\\nName of the product to use in macOS bundle information. Defaults to base\nfilename of the binary.\\\nProduct version to use in macOS bundle information. Defaults to 1.0 if\nnot given.\\\nEnabled plugins. Must be plug-in names. Use --plugin-list to query the\nfull list and exit. Default empty.\\\nDisabled plugins. Must be plug-in names. Use --plugin-list to query the\nfull list and exit. Default empty.\\\nPlugins can detect if they might be used, and the you can disable the warning\nvia \"--disable-plugin=plugin-that-warned\", or you can use this option to disable\nthe mechanism entirely, which also speeds up compilation slightly of course as\nthis detection code is run in vain once you are certain of which plugins to\nuse. Defaults to off.\\\nShow list of all available plugins and exit. Defaults to off.\\\nWrite source changes to original Python files. Use with care. May need\npermissions, best for use in a virtualenv to debug if plugin code\nchanges work with standard Python or to benefit from bloat removal\neven with pure Python. Default False.",
"language": "en",
"n_whitespaces": 1563,
"n_words": 1740,
"vocab_size": 655
} | def _getDataFileTagsOptionHelp():
return % ", ".join(
"'%s' (%s)" % d for d in data_files_tags
)
data_file_tags_option = data_group.add_option(
"--data-file-tags",
action="append",
dest="data_file_tags",
metavar="DATA_TAGS",
default=[],
)
parser.add_option_group(data_group)
execute_group = OptionGroup(parser, "Immediate execution after compilation")
execute_group.add_option(
"--run",
action="store_true",
dest="immediate_execution",
default=is_nuitka_run,
help=
% ("on" if is_nuitka_run else "off"),
)
execute_group.add_option(
"--debugger",
"--gdb",
action="store_true",
dest="debugger",
default=False,
help=,
)
execute_group.add_option(
"--execute-with-pythonpath",
action="store_true",
dest="keep_pythonpath",
default=False,
help=,
)
parser.add_option_group(execute_group)
dump_group = OptionGroup(parser, "Dump options for internal tree")
dump_group.add_option(
"--xml",
action="store_true",
dest="dump_xml",
default=False,
help="Dump the final result of optimization as XML, then exit.",
)
parser.add_option_group(dump_group)
codegen_group = OptionGroup(parser, "Code generation choices")
codegen_group.add_option(
"--disable-bytecode-cache",
action="store_true",
dest="disable_bytecode_cache",
default=False,
help=,
)
codegen_group.add_option(
"--full-compat",
action="store_false",
dest="improved",
default=True,
help=,
)
codegen_group.add_option(
"--file-reference-choice",
action="store",
dest="file_reference_mode",
metavar="MODE",
choices=("original", "runtime", "frozen"),
default=None,
help=,
)
codegen_group.add_option(
"--module-name-choice",
action="store",
dest="module_name_mode",
metavar="MODE",
choices=("original", "runtime"),
default=None,
help=,
)
parser.add_option_group(codegen_group)
output_group = OptionGroup(parser, "Output choices")
output_group.add_option(
"-o",
action="store",
dest="output_filename",
metavar="FILENAME",
default=None,
help=
% "<program_name>"
+ (".exe" if getOS() == "Windows" else ".bin"),
)
output_group.add_option(
"--output-dir",
action="store",
dest="output_dir",
metavar="DIRECTORY",
default="",
help=,
)
output_group.add_option(
"--remove-output",
action="store_true",
dest="remove_build",
default=False,
help=,
)
output_group.add_option(
"--no-pyi-file",
action="store_false",
dest="pyi_file",
default=True,
help=,
)
parser.add_option_group(output_group)
debug_group = OptionGroup(parser, "Debug features")
debug_group.add_option(
"--debug",
action="store_true",
dest="debug",
default=False,
help=,
)
debug_group.add_option(
"--unstripped",
action="store_true",
dest="unstripped",
default=False,
help=,
)
debug_group.add_option(
"--profile",
action="store_true",
dest="profile",
default=False,
help=,
)
debug_group.add_option(
"--internal-graph",
action="store_true",
dest="graph",
default=False,
help=,
)
debug_group.add_option(
"--trace-execution",
action="store_true",
dest="trace_execution",
default=False,
help=,
)
debug_group.add_option(
"--recompile-c-only",
action="store_true",
dest="recompile_c_only",
default=False,
help=,
)
debug_group.add_option(
"--generate-c-only",
action="store_true",
dest="generate_c_only",
default=False,
help=,
)
debug_group.add_option(
"--experimental",
action="append",
dest="experimental",
metavar="FLAG",
default=[],
help=,
)
debug_group.add_option(
"--explain-imports",
action="store_true",
dest="explain_imports",
default=False,
help=SUPPRESS_HELP,
)
debug_group.add_option(
"--low-memory",
action="store_true",
dest="low_memory",
default=False,
help=,
)
if os.name == "nt":
debug_group.add_option(
"--disable-dll-dependency-cache",
action="store_true",
dest="no_dependency_cache",
default=False,
help=,
)
debug_group.add_option(
"--force-dll-dependency-cache-update",
action="store_true",
dest="update_dependency_cache",
default=False,
help=,
)
# This is for testing framework, "coverage.py" hates to loose the process. And
# we can use it to make sure it's not done unknowingly.
parser.add_option(
"--must-not-re-execute",
action="store_false",
dest="allow_reexecute",
default=True,
help=SUPPRESS_HELP,
)
parser.add_option_group(debug_group)
c_compiler_group = OptionGroup(parser, "Backend C compiler choice")
c_compiler_group.add_option(
"--clang",
action="store_true",
dest="clang",
default=False,
help=,
)
c_compiler_group.add_option(
"--mingw64",
action="store_true",
dest="mingw64",
default=False,
help=,
)
c_compiler_group.add_option(
"--msvc",
action="store",
dest="msvc_version",
default=None,
help=,
)
c_compiler_group.add_option(
"-j",
"--jobs",
action="store",
dest="jobs",
metavar="N",
default=None,
help=,
)
c_compiler_group.add_option(
"--lto",
action="store",
dest="lto",
metavar="choice",
default="auto",
choices=("yes", "no", "auto"),
help=,
)
c_compiler_group.add_option(
"--static-libpython",
action="store",
dest="static_libpython",
metavar="choice",
default="auto",
choices=("yes", "no", "auto"),
help=,
)
c_compiler_group.add_option(
"--disable-ccache",
action="store_true",
dest="disable_ccache",
default=False,
help=,
)
parser.add_option_group(c_compiler_group)
pgo_group = OptionGroup(parser, "PGO compilation choices")
pgo_group.add_option(
"--pgo",
action="store_true",
dest="is_c_pgo",
default=False,
help=,
)
pgo_group.add_option(
"--pgo-python",
action="store_true",
dest="is_python_pgo",
default=False,
help=SUPPRESS_HELP,
)
pgo_group.add_option(
"--pgo-python-input",
action="store",
dest="python_pgo_input",
default=None,
help=SUPPRESS_HELP,
)
pgo_group.add_option(
"--pgo-python-policy-unused-module",
action="store",
dest="python_pgo_policy_unused_module",
choices=("include", "exclude", "bytecode"),
default="include",
help=SUPPRESS_HELP,
)
pgo_group.add_option(
"--pgo-args",
action="store",
dest="pgo_args",
default="",
help=,
)
pgo_group.add_option(
"--pgo-executable",
action="store",
dest="pgo_executable",
default=None,
help=,
)
parser.add_option_group(pgo_group)
tracing_group = OptionGroup(parser, "Tracing features")
tracing_group.add_option(
"--quiet",
action="store_true",
dest="quiet",
default=False,
help=,
)
tracing_group.add_option(
"--show-scons",
action="store_true",
dest="show_scons",
default=False,
help=,
)
tracing_group.add_option(
"--show-progress",
action="store_true",
dest="show_progress",
default=False,
help=,
)
tracing_group.add_option(
"--no-progressbar",
action="store_false",
dest="progress_bar",
default=True,
help=,
)
tracing_group.add_option(
"--show-memory",
action="store_true",
dest="show_memory",
default=False,
help=,
)
tracing_group.add_option(
"--show-modules",
action="store_true",
dest="show_inclusion",
default=False,
help=,
)
tracing_group.add_option(
"--show-modules-output",
action="store",
dest="show_inclusion_output",
metavar="PATH",
default=None,
help=,
)
tracing_group.add_option(
"--report",
action="store",
dest="compilation_report_filename",
default=None,
help=,
)
tracing_group.add_option(
"--verbose",
action="store_true",
dest="verbose",
default=False,
help=,
)
tracing_group.add_option(
"--verbose-output",
action="store",
dest="verbose_output",
metavar="PATH",
default=None,
help=,
)
parser.add_option_group(tracing_group)
windows_group = OptionGroup(parser, "Windows specific controls")
windows_group.add_option(
"--windows-dependency-tool",
action="store",
dest="dependency_tool",
default=None,
help=SUPPRESS_HELP,
)
windows_group.add_option(
"--windows-disable-console",
action="store_true",
dest="disable_console",
default=False,
help=,
)
windows_group.add_option(
"--windows-icon-from-ico",
action="append",
dest="icon_path",
metavar="ICON_PATH",
default=[],
help=,
)
windows_group.add_option(
"--windows-icon-from-exe",
action="store",
dest="icon_exe_path",
metavar="ICON_EXE_PATH",
default=None,
help="Copy executable icons from this existing executable (Windows only).",
)
windows_group.add_option(
"--onefile-windows-splash-screen-image",
action="store",
dest="splash_screen_image",
default=None,
help=,
)
windows_group.add_option(
"--windows-uac-admin",
action="store_true",
dest="windows_uac_admin",
metavar="WINDOWS_UAC_ADMIN",
default=False,
help="Request Windows User Control, to grant admin rights on execution. (Windows only). Defaults to off.",
)
windows_group.add_option(
"--windows-uac-uiaccess",
action="store_true",
dest="windows_uac_uiaccess",
metavar="WINDOWS_UAC_UIACCESS",
default=False,
help=,
)
windows_group.add_option(
"--windows-company-name",
action="store",
dest="windows_company_name",
metavar="WINDOWS_COMPANY_NAME",
default=None,
help=,
)
windows_group.add_option(
"--windows-product-name",
action="store",
dest="windows_product_name",
metavar="WINDOWS_PRODUCT_NAME",
default=None,
help=,
)
windows_group.add_option(
"--windows-file-version",
action="store",
dest="windows_file_version",
metavar="WINDOWS_FILE_VERSION",
default=None,
help=,
)
windows_group.add_option(
"--windows-product-version",
action="store",
dest="windows_product_version",
metavar="WINDOWS_PRODUCT_VERSION",
default=None,
help=,
)
windows_group.add_option(
"--windows-file-description",
action="store",
dest="windows_file_description",
metavar="WINDOWS_FILE_DESCRIPTION",
default=None,
help=,
)
windows_group.add_option(
"--windows-onefile-tempdir",
"--onefile-tempdir",
action="store_true",
dest="is_onefile_tempdir",
metavar="ONEFILE_TEMPDIR",
default=False,
help=SUPPRESS_HELP,
)
windows_group.add_option(
"--windows-onefile-tempdir-spec",
"--onefile-tempdir-spec",
action="store",
dest="onefile_tempdir_spec",
metavar="ONEFILE_TEMPDIR_SPEC",
default=None,
help=,
)
windows_group.add_option(
"--windows-force-stdout-spec",
action="store",
dest="force_stdout_spec",
metavar="WINDOWS_FORCE_STDOUT_SPEC",
default=None,
help=,
)
windows_group.add_option(
"--windows-force-stderr-spec",
action="store",
dest="force_stderr_spec",
metavar="WINDOWS_FORCE_STDERR_SPEC",
default=None,
help=,
)
parser.add_option_group(windows_group)
macos_group = OptionGroup(parser, "macOS specific controls")
macos_group.add_option(
"--macos-target-arch",
action="store",
dest="macos_target_arch",
choices=("universal", "arm64", "x86_64"),
metavar="MACOS_TARGET_ARCH",
default=None,
help=,
)
macos_group.add_option(
"--macos-disable-console",
"--disable-console",
action="store_true",
dest="disable_console",
default=False,
help=,
)
macos_group.add_option(
"--macos-create-app-bundle",
action="store_true",
dest="macos_create_bundle",
default=False,
help=,
)
macos_group.add_option(
"--macos-onefile-icon",
action="append",
dest="icon_path",
metavar="ICON_PATH",
default=[],
help="Add executable icon for binary to use. Can be given only one time. Defaults to Python icon if available.",
)
macos_group.add_option(
"--macos-signed-app-name",
action="store",
dest="macos_signed_app_name",
metavar="MACOS_SIGNED_APP_NAME",
default=None,
help=,
)
macos_group.add_option(
"--macos-app-name",
action="store",
dest="macos_app_name",
metavar="MACOS_APP_NAME",
default=None,
help=,
)
macos_group.add_option(
"--macos-app-version",
action="store",
dest="macos_app_version",
metavar="MACOS_APP_VERSION",
default=None,
help=,
)
parser.add_option_group(macos_group)
linux_group = OptionGroup(parser, "Linux specific controls")
linux_group.add_option(
"--linux-onefile-icon",
action="append",
dest="icon_path",
metavar="ICON_PATH",
default=[],
help="Add executable icon for onefile binary to use. Can be given only one time. Defaults to Python icon if available.",
)
linux_group.add_option(
"--linux-onefile-compression",
action="store",
dest="appimage_compression",
choices=("gzip", "xz"),
metavar="COMPRESSION",
default="gzip",
help="Compression method to use for Linux onefile builds. Defaults to gzip for faster decompression",
)
parser.add_option_group(linux_group)
plugin_group = OptionGroup(parser, "Plugin control")
plugin_group.add_option(
"--enable-plugin",
"--plugin-enable",
action="append",
dest="plugins_enabled",
metavar="PLUGIN_NAME",
default=[],
help=,
)
plugin_group.add_option(
"--disable-plugin",
"--plugin-disable",
action="append",
dest="plugins_disabled",
metavar="PLUGIN_NAME",
default=[],
help=,
)
plugin_group.add_option(
"--plugin-no-detection",
action="store_false",
dest="detect_missing_plugins",
default=True,
help=,
)
plugin_group.add_option(
"--plugin-list",
action="store_true",
dest="list_plugins",
default=False,
help=,
)
parser.add_option_group(plugin_group)
plugin_group.add_option(
"--user-plugin",
action="append",
dest="user_plugins",
metavar="PATH",
default=[],
help="The file name of user plugin. Can be given multiple times. Default empty.",
)
plugin_group.add_option(
"--persist-source-changes",
action="store_true",
dest="persist_source_changes",
default=False,
help=,
)
|
|
15,867 | 72,267 | 236 | wagtail/admin/tests/test_workflows.py | 34 | 16 | def test_collect_workflow_action_data_post(self):
response = self.client.post(
reverse(
"wagtailadmin_pages:collect_workflow_action_data",
args=(
self.page | Reformat with black | test_collect_workflow_action_data_post | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_workflows.py | 15 | 18 | https://github.com/wagtail/wagtail.git | 1 | 94 | 0 | 27 | 159 | Python | {
"docstring": "\n This tests that a POST request to the collect_workflow_action_data view (for the approve action) returns a modal response with the validated data\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 22,
"vocab_size": 19
} | def test_collect_workflow_action_data_post(self):
response = self.client.post(
reverse(
"wagtailadmin_pages:collect_workflow_action_data",
args=(
self.page.id,
"approve",
self.page.current_workflow_task_state.id,
),
),
{"comment": "This is my comment"},
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json["step"], "success")
self.assertEqual(
response_json["cleaned_data"], {"comment": "This is my comment"}
)
|
|
25,581 | 115,841 | 31 | mindsdb/integrations/handlers/lightwood_handler/tests/test_lightwood_handler.py | 11 | 13 | def test_02_train_predictor(self):
query = f
response = self.handler.native_query(query)
self.assertTrue(response.type == | add more TS tests | test_02_train_predictor | 871793d4fbd99f454c0c1ff14db6ce3c385e656c | mindsdb | test_lightwood_handler.py | 9 | 8 | https://github.com/mindsdb/mindsdb.git | 1 | 31 | 0 | 10 | 69 | Python | {
"docstring": "\n CREATE PREDICTOR {self.test_model_1}\n FROM {PG_HANDLER_NAME} (SELECT * FROM {self.data_table_1} limit 50)\n PREDICT rental_price\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 13,
"vocab_size": 12
} | def test_02_train_predictor(self):
query = f
response = self.handler.native_query(query)
self.assertTrue(response.type == RESPONSE_TYPE.OK)
|
|
@TRANSFORMS.register_module() | 70,426 | 244,549 | 123 | mmdet/datasets/pipelines/loading.py | 36 | 13 | def __call__(self, results):
img = results['img']
if self.to_float32:
img = img.astype(np.float32)
results['img_path'] = None
results['img'] = img
height, width = img.shape[:2]
results['height'] = height
results['width'] = width
results['ori_height'] = height
| Refacter Visualization | __call__ | c71a160c5193b92f6a4f56c113e96b63decf8354 | mmdetection | loading.py | 11 | 12 | https://github.com/open-mmlab/mmdetection.git | 2 | 78 | 1 | 22 | 147 | Python | {
"docstring": "Call functions to add image meta information.\n\n Args:\n results (dict): Result dict with Webcam read image in\n ``results['img']``.\n\n Returns:\n dict: The dict contains loaded image and meta information.\n ",
"language": "en",
"n_whitespaces": 86,
"n_words": 28,
"vocab_size": 23
} | def __call__(self, results):
img = results['img']
if self.to_float32:
img = img.astype(np.float32)
results['img_path'] = None
results['img'] = img
height, width = img.shape[:2]
results['height'] = height
results['width'] = width
results['ori_height'] = height
results['ori_width'] = width
return results
@TRANSFORMS.register_module() |
29,488 | 131,233 | 381 | python/ray/tests/test_advanced_4.py | 114 | 20 | def test_jemalloc_env_var_propagate():
gcs_ptype = ray.ray_constants.PROCESS_TYPE_GCS_SERVER
expected = {}
actual = ray._private.services.propagate_jemalloc_env_var(
jemalloc_path="", jemalloc_conf="", jemalloc_comps=[], process_type=gcs_ptype
)
assert actual == expected
actual = ray._private.services.propagate_jemalloc_env_var(
jemalloc_path=None,
jemalloc_conf="a,b,c",
jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],
process_type=gcs_ptype,
)
assert actual == expected
library_path = "/abc"
expected = {"LD_PRELOAD": library_path}
actual = ray._private.services.propagate_jemalloc_env_var(
jemalloc_path=library_path,
jemalloc_conf="",
jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],
process_type=gcs_ptype,
)
assert actual == expect | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | test_jemalloc_env_var_propagate | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | test_advanced_4.py | 12 | 57 | https://github.com/ray-project/ray.git | 1 | 258 | 0 | 52 | 420 | Python | {
"docstring": "Test `propagate_jemalloc_env_var`\n If the shared library path is not specified,\n it should return an empty dict.\n \n When the shared library is specified\n \n When the malloc config is specified\n ",
"language": "en",
"n_whitespaces": 51,
"n_words": 28,
"vocab_size": 20
} | def test_jemalloc_env_var_propagate():
gcs_ptype = ray.ray_constants.PROCESS_TYPE_GCS_SERVER
expected = {}
actual = ray._private.services.propagate_jemalloc_env_var(
jemalloc_path="", jemalloc_conf="", jemalloc_comps=[], process_type=gcs_ptype
)
assert actual == expected
actual = ray._private.services.propagate_jemalloc_env_var(
jemalloc_path=None,
jemalloc_conf="a,b,c",
jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],
process_type=gcs_ptype,
)
assert actual == expected
library_path = "/abc"
expected = {"LD_PRELOAD": library_path}
actual = ray._private.services.propagate_jemalloc_env_var(
jemalloc_path=library_path,
jemalloc_conf="",
jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],
process_type=gcs_ptype,
)
assert actual == expected
# comps should be a list type.
with pytest.raises(AssertionError):
ray._private.services.propagate_jemalloc_env_var(
jemalloc_path=library_path,
jemalloc_conf="",
jemalloc_comps="ray.ray_constants.PROCESS_TYPE_GCS_SERVER,",
process_type=gcs_ptype,
)
# When comps don't match the process_type, it should return an empty dict.
expected = {}
actual = ray._private.services.propagate_jemalloc_env_var(
jemalloc_path=library_path,
jemalloc_conf="",
jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_RAYLET],
process_type=gcs_ptype,
)
library_path = "/abc"
malloc_conf = "a,b,c"
expected = {"LD_PRELOAD": library_path, "MALLOC_CONF": malloc_conf}
actual = ray._private.services.propagate_jemalloc_env_var(
jemalloc_path=library_path,
jemalloc_conf=malloc_conf,
jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],
process_type=gcs_ptype,
)
assert actual == expected
|
|
70,341 | 244,349 | 674 | mmdet/models/dense_heads/dense_test_mixins.py | 171 | 54 | def aug_test_bboxes(self, feats, img_metas, rescale=False):
# check with_nms argument
gb_sig = signature(self.get_results)
gb_args = [p.name for p in gb_sig.parameters.values()]
gbs_sig = signature(self._get_results_single)
gbs_args = [p.name for p in gbs_sig.parameters.values()]
assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \
f'{self.__class__.__name__}' \
' does not support test-time augmentation'
aug_bboxes = []
aug_scores = []
aug_labels = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
outs = self.forward(x)
bbox_outputs = self.get_results(
*outs,
img_metas=img_meta,
cfg=self.test_cfg,
rescale=False,
with_nms=False)[0]
aug_bboxes.append(bbox_outputs.bboxes)
aug_scores.append(bbox_outputs.scores)
if len(bbox_outputs) >= 3:
aug_labels.append(bbox_outputs.labels)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = self.merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas)
merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None
if merged_bboxes.numel() == 0:
det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1)
return [
(det_bboxes, merged_labels),
]
det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores,
merged_labels, self.test_cfg.nms)
det_bboxes = det_bboxes[:self.test_cfg.max_per_img]
det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img]
if rescale:
| [Refactor] Refactor dense head outputs to InstanceResults. | aug_test_bboxes | 9a3bf7660e6ced54672741095f96df07919f9ba7 | mmdetection | dense_test_mixins.py | 14 | 46 | https://github.com/open-mmlab/mmdetection.git | 9 | 361 | 0 | 122 | 567 | Python | {
"docstring": "Test det bboxes with test time augmentation, can be applied in\n DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``,\n etc.\n\n Args:\n feats (list[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains features for all images in the batch.\n img_metas (list[list[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. each dict has image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n The first item is ``bboxes`` with shape (n, 5),\n where 5 represent (tl_x, tl_y, br_x, br_y, score).\n The shape of the second tensor in the tuple is ``labels``\n with shape (n,). The length of list should always be 1.\n ",
"language": "en",
"n_whitespaces": 345,
"n_words": 131,
"vocab_size": 92
} | def aug_test_bboxes(self, feats, img_metas, rescale=False):
# check with_nms argument
gb_sig = signature(self.get_results)
gb_args = [p.name for p in gb_sig.parameters.values()]
gbs_sig = signature(self._get_results_single)
gbs_args = [p.name for p in gbs_sig.parameters.values()]
assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \
f'{self.__class__.__name__}' \
' does not support test-time augmentation'
aug_bboxes = []
aug_scores = []
aug_labels = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
outs = self.forward(x)
bbox_outputs = self.get_results(
*outs,
img_metas=img_meta,
cfg=self.test_cfg,
rescale=False,
with_nms=False)[0]
aug_bboxes.append(bbox_outputs.bboxes)
aug_scores.append(bbox_outputs.scores)
if len(bbox_outputs) >= 3:
aug_labels.append(bbox_outputs.labels)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = self.merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas)
merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None
if merged_bboxes.numel() == 0:
det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1)
return [
(det_bboxes, merged_labels),
]
det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores,
merged_labels, self.test_cfg.nms)
det_bboxes = det_bboxes[:self.test_cfg.max_per_img]
det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img]
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= det_bboxes.new_tensor(
img_metas[0][0]['scale_factor'])
results = InstanceData()
results.bboxes = _det_bboxes[:, :4]
results.scores = _det_bboxes[:, 4]
results.labels = det_labels
return [results]
|
|
71,136 | 246,294 | 32 | synapse/replication/tcp/protocol.py | 11 | 8 | def pauseProducing(self) -> None:
logger.info("[%s] Pause producing", self.id())
self.state = ConnectionStat | Add missing type hints to synapse.replication. (#11938) | pauseProducing | d0e78af35e519ff76bd23e786007f3e7130d90f7 | synapse | protocol.py | 9 | 10 | https://github.com/matrix-org/synapse.git | 1 | 27 | 0 | 11 | 48 | Python | {
"docstring": "This is called when both the kernel send buffer and the twisted\n tcp connection send buffers have become full.\n\n We don't actually have any control over those sizes, so we buffer some\n commands ourselves before knifing the connection due to the remote\n failing to keep up.\n ",
"language": "en",
"n_whitespaces": 81,
"n_words": 46,
"vocab_size": 38
} | def pauseProducing(self) -> None:
logger.info("[%s] Pause producing", self.id())
self.state = ConnectionStates.PAUSED
|
|
55,605 | 219,497 | 104 | python3.10.4/Lib/_collections_abc.py | 28 | 6 | def throw(self, typ, val=None, tb=None):
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
va | add python 3.10.4 for windows | throw | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _collections_abc.py | 10 | 8 | https://github.com/XX-net/XX-Net.git | 4 | 49 | 0 | 16 | 78 | Python | {
"docstring": "Raise an exception in the coroutine.\n Return next yielded value or raise StopIteration.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 13,
"vocab_size": 13
} | def throw(self, typ, val=None, tb=None):
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
|
|
3,497 | 20,715 | 29 | pipenv/patched/notpip/_vendor/rich/console.py | 8 | 4 | def _exit_buffer(self) -> None:
se | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for pip==22.0.4
* Update patches
* exclude pyptoject.toml from black to see if that helps.
* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4 | _exit_buffer | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | console.py | 7 | 4 | https://github.com/pypa/pipenv.git | 1 | 18 | 0 | 8 | 33 | Python | {
"docstring": "Leave buffer context, and render content if required.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def _exit_buffer(self) -> None:
self._buffer_index -= 1
self._check_buffer()
|
|
116,193 | 317,626 | 27 | homeassistant/components/switchbot/coordinator.py | 11 | 2 | def flatten_sensors_data(sensor):
if "temp" in sensor["data"]:
sensor["data"]["temp | Add Switchbot hygrometers (#75325)
* Switchbot add support for hygrometers
* Update CODEOWNERS
* Improve debug
* Remove redundant mention to temp unit
* Adopt FlowResultType
* Modify SwitchBot data within coordinator
* Increase logging for switchbot sensor
* Revert "Increase logging for switchbot sensor"
This reverts commit d8b377429c562fc7044a3c98a6e976e4cd71847e.
Co-authored-by: J. Nick Koston <nick@koston.org> | flatten_sensors_data | 148f96351052b0a4ba31e7d15dad16d7639e1ceb | core | coordinator.py | 12 | 4 | https://github.com/home-assistant/core.git | 2 | 34 | 0 | 11 | 67 | Python | {
"docstring": "Deconstruct SwitchBot library temp object C/Fº readings from dictionary.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def flatten_sensors_data(sensor):
if "temp" in sensor["data"]:
sensor["data"]["temperature"] = sensor["data"]["temp"]["c"]
return sensor
|
|
82,378 | 278,120 | 696 | keras/feature_column/sequence_feature_column_test.py | 115 | 29 | def test_shared_embedding_column_with_non_sequence_categorical(self):
with tf.Graph().as_default():
vocabulary_size = 3
sparse_input_a = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2),
)
sparse_input_b = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2),
)
categorical_column_a = (
tf.feature_column.categorical_column_with_identity(
key="aaa", num_buckets=vocabulary_size
)
)
categorical_column_b = (
tf.feature_column.categorical_column_with_identity(
key="bbb", num_buckets=vocabulary_size
)
)
shared_embedding_columns = tf.feature_column.shared_embeddings(
[categorical_column_a, categorical_column_b], dimension=2
)
sequence_input_layer = ksfc.SequenceFeatures(
shared_embedding_columns
)
with self.assertRaisesRegex(
ValueError,
r"In embedding_column: aaa_shared_embedding\. "
r"categorical_column must "
r"be of type SequenceCategoricalColumn to use "
r"SequenceFeatures\.",
):
_, _ = sequence_input_layer(
{"aaa": sparse_input_a, | resolve line-too-long in feature_column | test_shared_embedding_column_with_non_sequence_categorical | 6fafb567af4e4d9f42974d0b6c55b18bc03e17eb | keras | sequence_feature_column_test.py | 15 | 39 | https://github.com/keras-team/keras.git | 1 | 218 | 0 | 64 | 332 | Python | {
"docstring": "Tests that error is raised for non-sequence shared embedding\n column.",
"language": "en",
"n_whitespaces": 16,
"n_words": 10,
"vocab_size": 10
} | def test_shared_embedding_column_with_non_sequence_categorical(self):
with tf.Graph().as_default():
vocabulary_size = 3
sparse_input_a = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2),
)
sparse_input_b = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2),
)
categorical_column_a = (
tf.feature_column.categorical_column_with_identity(
key="aaa", num_buckets=vocabulary_size
)
)
categorical_column_b = (
tf.feature_column.categorical_column_with_identity(
key="bbb", num_buckets=vocabulary_size
)
)
shared_embedding_columns = tf.feature_column.shared_embeddings(
[categorical_column_a, categorical_column_b], dimension=2
)
sequence_input_layer = ksfc.SequenceFeatures(
shared_embedding_columns
)
with self.assertRaisesRegex(
ValueError,
r"In embedding_column: aaa_shared_embedding\. "
r"categorical_column must "
r"be of type SequenceCategoricalColumn to use "
r"SequenceFeatures\.",
):
_, _ = sequence_input_layer(
{"aaa": sparse_input_a, "bbb": sparse_input_b}
)
|
|
85,472 | 285,879 | 320 | openbb_terminal/helper_funcs.py | 90 | 18 | def get_next_stock_market_days(last_stock_day, n_next_days) -> list:
n_days = 0
l_pred_days = []
years: list = []
holidays: list = []
if isinstance(last_stock_day, datetime):
while n_days < n_next_days:
last_stock_day += timedelta(ho | Forecasting Menu [Work in Progress] (#1933)
* Gave forecasting memory
* Fixed scripts, refactored
* FIxed poetry lock
* edge case check for forecast target
* Improved combine and load functionality
* Cleaned up translations
* Fixed issue with covariates
* Fixed issue checking covariates
* Another covariates check fix
* Ignored regr and linregr warnings
* Fixed covariate issues
* switched from forecasting to forecast
* Finished transition to forecast
* Can add entire dataset with one command
* Improved combine description
* Removed naming covariates
* Created new installation
* typo
* Make plot show dates if available
* Added better handling or users without the menu
* Removed unused file
* Fix
* Better handling for nontraditional datasets
* Fixed black and pylint
* Fixed tests
* Added darts install to main tests
* Working on darts with CI
* Added back test file
* Made large tables print better
* naive baseline
* typo
* Finished naive
* no dollar on prediction
* fixed positive MAPE bug
* quick refactoring
* Fixed two different args for same thing
* added extra patience
* linreg mape fix
* info fix
* Refactored API, bumped to Darts 0.21.0
* Added fixes
* Increased verbosity for wrong column
* Updated dependencies
* Hid warnings
* Fixed importing
* Fixed tests
* Fixed ugly seasonal plotting
* Fixed forecast line color
* Switched chart output to blue
* Simplified lambda_price_prediction_color
* fixed residuals
* Chnage
* Removed darts from CI per Chavi
* Added fixes to tests
* Added knnfix
* Fixed issue where n!= o
* Added changes
* Added changes
* Imrpoved forecast dash
* Added Theo notebook
* Added enhancements to dash
* Added notebook
* Added fix for jupyter lab
* Added debug stuff
* Change
* Updated docs
* Fixed formatting
* Fixed formatting
* Removed prints
* Filtered some info
* Added button to run model
* Improved api
* Added secret feautr (no peeking Martin)
* Cleaned code
* Fixed tests
* Added test fixes
* Added fixes
* Fixes
* FIxes for pres
* Remove bad tests
* Removed knn
* Fixed issues with removing mc
* doc for conda
* Added forecast improvements
* Added streamlit support
* Fixed issues
* fix expo with streamlit due to quantile()
* fixed performance issues with streamlit for now..
* clean up historical forecast with new trainer
* quick fix for regression trainer params
* Added fixes
* quick fix for other fix for regression trainer params
* table formatting for timestamp
* potential fix for inf in feature engineered datasets
* Basic working in new format
* dw
* Trying
* Fixed issues
* Improved graphing
* fixing trainer for LR and formatting
* doge and linting
* page break
* automatic cleaning of datasets
* automatic cleaning of datasets- fix
* Fixed forecast dates
* Made dashboard prettier
* Added fixes
* Added fixes
* Added options
* Fixed error
* remove caching
* adding in spinner
* Added vairable n_predict in streamlit
* Added mypy fix
* renaming and range change
* new index for n predict
* check positive float for window size
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* Update _index.md
* renaming
* reorg files
* Update _index.md
* hidden which command for versions
* Update _index.md
* Update _index.md
* which: ns parser
* hugo for: which
* hugo for: forecasting fix
* formatting black
* update stock controller test
* Lay groundwork for better residual plotting
* improved delete to allow for periods in title
* improved automatic cleaning of inf
* Added new API
* Added new API
* Added new API
* formatting for black
* Updated our testing CI
* Reverted changes
* Added forecast docs
* Fixed mypy issues
* Fixes tests
* Did some refactoring, added a report
* new api in streamlit
* Added integrated tests
* Update _index.md
* improved loading in custom dataset
* menu spacing
* installer fixes
* Added docs fixes
* Adding comments to test if commit working
* Fixed report
* naming conventions
* formatting
* removing unused var
* Made last report imporvements
* Update README.md
* Added fix
* Switched to warning
* Added fixes
* Added fixes
* Added fixes
* Added fixes
* Update economy av view test
* Remove forgotten print statement
* Update depencencies
* Added verbosity to pytest
* Added fixes
* Fixed pylint
* Fixed actions checkout
* Added fixes
Co-authored-by: colin99d <colin99delahunty@gmail.com>
Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>
Co-authored-by: James Simmons <simmonsj330@gmail.com>
Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>
Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com> | get_next_stock_market_days | 7fd72d9ee1e8847717195859bf6d608268a94e2f | OpenBBTerminal | helper_funcs.py | 14 | 24 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 7 | 133 | 0 | 52 | 225 | Python | {
"docstring": "Gets the next stock market day. Checks against weekends and holidays",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def get_next_stock_market_days(last_stock_day, n_next_days) -> list:
n_days = 0
l_pred_days = []
years: list = []
holidays: list = []
if isinstance(last_stock_day, datetime):
while n_days < n_next_days:
last_stock_day += timedelta(hours=24)
year = last_stock_day.date().year
if year not in years:
years.append(year)
holidays += us_market_holidays(year)
# Check if it is a weekend
if last_stock_day.date().weekday() > 4:
continue
# Check if it is a holiday
if last_stock_day.strftime("%Y-%m-%d") in holidays:
continue
# Otherwise stock market is open
n_days += 1
l_pred_days.append(last_stock_day)
else:
while n_days < n_next_days:
l_pred_days.append(last_stock_day + 1 + n_days)
n_days += 1
return l_pred_days
|
|
@pytest.mark.asyncio
@pytest.mark.parametrize(
"failures",
[
[True, True, True, True, True],
[False, False, False, False, False],
[False, True, False, True, False],
[False, False, False, True, True],
[True, True, False, False, False],
],
) | 27,665 | 124,708 | 225 | dashboard/tests/test_state_head.py | 120 | 21 | async def test_max_concurrent_in_progress_functions(extra_req_num):
max_req = 10
a = A(max_num_call=max_req)
# Run more than allowed concurrent async functions should trigger rate limiting
res_arr = await asyncio.gather(
*[a.fn1() if i % 2 == 0 else a.fn2() for i in range(max_req + extra_req_num)]
)
fail_cnt = 0
for ok in res_arr:
fail_cnt += 0 if ok else 1
expected_fail_cnt = max(0, extra_req_num)
assert fail_cnt == expected_fail_cnt, (
f"{expected_fail_cnt} out of {max_req + extra_req_num} "
f"concurrent runs should fail with max={max_req} but {fail_cnt}."
)
assert a.num_call_ == 0, "All requests should be done"
@pytest.mark.asyncio
@pytest.mark.parametrize(
"failures",
[
[True, True, True, True, True],
[False, False, False, False, False],
[False, True, False, True, False],
[False, False, False, True, True],
[True, True, False, False, False],
],
) | [Core | State Observability] Implement API Server (Dashboard) HTTP Requests Throttling (#26257)
This is to limit the max number of HTTP requests the dashboard (API server) will accept before rejecting more requests.
This will make sure the observability requests do not overload the downstream systems (raylet/gcs) when delegating too many concurrent state observability requests to the cluster. | test_max_concurrent_in_progress_functions | 365ffe21e592589880e3116302705b5e08a5b81f | ray | test_state_head.py | 15 | 15 | https://github.com/ray-project/ray.git | 5 | 96 | 1 | 78 | 270 | Python | {
"docstring": "Test rate limiting for concurrent in-progress requests on StateHead",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | async def test_max_concurrent_in_progress_functions(extra_req_num):
max_req = 10
a = A(max_num_call=max_req)
# Run more than allowed concurrent async functions should trigger rate limiting
res_arr = await asyncio.gather(
*[a.fn1() if i % 2 == 0 else a.fn2() for i in range(max_req + extra_req_num)]
)
fail_cnt = 0
for ok in res_arr:
fail_cnt += 0 if ok else 1
expected_fail_cnt = max(0, extra_req_num)
assert fail_cnt == expected_fail_cnt, (
f"{expected_fail_cnt} out of {max_req + extra_req_num} "
f"concurrent runs should fail with max={max_req} but {fail_cnt}."
)
assert a.num_call_ == 0, "All requests should be done"
@pytest.mark.asyncio
@pytest.mark.parametrize(
"failures",
[
[True, True, True, True, True],
[False, False, False, False, False],
[False, True, False, True, False],
[False, False, False, True, True],
[True, True, False, False, False],
],
) |
37,330 | 158,149 | 132 | d2l/mxnet.py | 87 | 6 | def transpose_qkv(X, num_heads):
# Shape of input `X`:
# (`batch_size`, no. | [PaddlePaddle] Merge master into Paddle branch (#1186)
* change 15.2 title in chinese version (#1109)
change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘
* 修改部分语义表述 (#1105)
* Update r0.17.5 (#1120)
* Bump versions in installation
* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)
* line 313: "bert.mall" -> "bert.small" (#1130)
* fix: update language as native reader (#1114)
* Fix the translation of "stride" (#1115)
* Update index.md (#1118)
修改部分语义表述
* Update self-attention-and-positional-encoding.md (#1133)
依照本书的翻译习惯,将pooling翻译成汇聚
* maybe a comment false (#1149)
* maybe a little false
* maybe a little false
* A minor bug in the rcnn section (Chinese edition) (#1148)
* Update bert.md (#1137)
一个笔误
# 假设batch_size=2,num_pred_positions=3
# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]
* Update calculus.md (#1135)
* fix typo in git documentation (#1106)
* fix: Update the Chinese translation in lr-scheduler.md (#1136)
* Update lr-scheduler.md
* Update chapter_optimization/lr-scheduler.md
Co-authored-by: goldmermaid <goldpiggy@berkeley.edu>
Co-authored-by: goldmermaid <goldpiggy@berkeley.edu>
* fix translation for kaggle-house-price.md (#1107)
* fix translation for kaggle-house-price.md
* fix translation for kaggle-house-price.md
Signed-off-by: sunhaizhou <haizhou.sun@smartmore.com>
* Update weight-decay.md (#1150)
* Update weight-decay.md
关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解
关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。
并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释
解释为何会增加复杂性以及为何需要细粒度工具。
* Update chapter_multilayer-perceptrons/weight-decay.md
yep
Co-authored-by: goldmermaid <goldpiggy@berkeley.edu>
* Update chapter_multilayer-perceptrons/weight-decay.md
yep
Co-authored-by: goldmermaid <goldpiggy@berkeley.edu>
Co-authored-by: goldmermaid <goldpiggy@berkeley.edu>
* Fix a spelling error (#1161)
* Update gru.md (#1152)
The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.
翻译错误
* Unify the function naming (#1113)
Unify naming of the function 'init_xavier()'.
* Update mlp-concise.md (#1166)
* Update mlp-concise.md
语句不通顺
* Update environment.md
语序异常
* Update config.ini
* fix the imprecise description (#1168)
Co-authored-by: yuande <yuande>
* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)
* Fix some typos. (#1163)
* Update batch-norm.md (#1170)
fixing typos u->x in article
* Update linear-regression.md (#1090)
We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that
原译文把who也直接翻译出来了。
* Update mlp.md (#1117)
* Update mlp.md
修改部分语义表述
* Update chapter_multilayer-perceptrons/mlp.md
Co-authored-by: goldmermaid <goldpiggy@berkeley.edu>
* Update chapter_multilayer-perceptrons/mlp.md
Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>
Co-authored-by: goldmermaid <goldpiggy@berkeley.edu>
* Correct a translation error. (#1091)
* Correct a translation error.
* Update chapter_computer-vision/image-augmentation.md
Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>
* Update aws.md (#1121)
* Update aws.md
* Update chapter_appendix-tools-for-deep-learning/aws.md
Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>
* Update image-augmentation.md (#1093)
* Update anchor.md (#1088)
fix a minor issue in code
* Update anchor.md
* Update image-augmentation.md
* fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087)
* Avoid `torch.meshgrid` user warning (#1174)
Avoids the following user warning:
```python
~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
```
* bump to 2.0.0-beta1
* Update sequence.md
* bump beta1 on readme
* Add latex code block background to config
* BLD: Bump python support version 3.9 (#1183)
* BLD: Bump python support version 3.9
* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4
* BLD: Bump torch and tensorflow
* Update Jenkinsfile
* Update chapter_installation/index.md
* Update chapter_installation/index.md
Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>
* Update config.ini
* Update INFO.md
* Update INFO.md
* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)
* resolve the conflicts
* revise from publisher (#1089)
* revise from publisher
* d2l api
* post_latex
* revise from publisher
* revise ch11
* Delete d2l-Copy1.bib
* clear cache
* rm d2lbook clear
* debug anchor
* keep original d2l doc
Co-authored-by: Ubuntu <ubuntu@ip-172-31-12-66.us-west-2.compute.internal>
Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>
Co-authored-by: Aston Zhang <asv325@gmail.com>
* 重复语句 (#1188)
Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>
* Improve expression for chapter_preliminaries/pandas.md (#1184)
* Update pandas.md
* Improve expression
* Improve expression
* Update chapter_preliminaries/pandas.md
Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>
* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)
* Improce expression
* Improve code comments
* Update chapter_preliminaries/linear-algebra.md
* Update chapter_preliminaries/linear-algebra.md
* Update chapter_preliminaries/linear-algebra.md
* Update chapter_preliminaries/linear-algebra.md
Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>
* Fix multibox_detection bugs
* Update d2l to 0.17.5 version
* restore older version
* Upgrade pandas
* change to python3.8
* Test warning log
* relocate warning log
* test logs filtering
* Update gru.md
* Add DeprecationWarning filter
* Test warning log
* Update attention mechanisms & computational performance
* Update multilayer perceptron& linear & convolution networks & computer vision
* Update recurrent&optimition&nlp pretraining & nlp applications
* ignore warnings
* Update index.md
* Update linear networks
* Update multilayer perceptrons&deep learning computation
* Update preliminaries
* Check and Add warning filter
* Update kaggle-cifar10.md
* Update object-detection-dataset.md
* Update ssd.md fcn.md
* Update hybridize.md
* Update hybridize.md
Signed-off-by: sunhaizhou <haizhou.sun@smartmore.com>
Co-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>
Co-authored-by: Xinwei Liu <xinzone@outlook.com>
Co-authored-by: Anirudh Dagar <anirudhdagar6@gmail.com>
Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>
Co-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>
Co-authored-by: gyro永不抽风 <1247006353@qq.com>
Co-authored-by: CanChengZheng <zcc550169544@163.com>
Co-authored-by: linlin <jajupmochi@gmail.com>
Co-authored-by: iuk <liukun0104@gmail.com>
Co-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>
Co-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>
Co-authored-by: Chiyuan Fu <fuchiyuan2019@outlook.com>
Co-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>
Co-authored-by: Haiker Sun <haizhou.uestc2011@gmail.com>
Co-authored-by: Ming Liu <akira.liu@njnu.edu.cn>
Co-authored-by: goldmermaid <goldpiggy@berkeley.edu>
Co-authored-by: silenceZheng66 <13754430639@163.com>
Co-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>
Co-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>
Co-authored-by: Krahets <krahets@163.com>
Co-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>
Co-authored-by: Jameson <miraclecome@gmail.com>
Co-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>
Co-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>
Co-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>
Co-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>
Co-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>
Co-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>
Co-authored-by: VigourJiang <jiangfuqiang154@163.com>
Co-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>
Co-authored-by: LYF <27893441+liyufan@users.noreply.github.com>
Co-authored-by: Aston Zhang <asv325@gmail.com>
Co-authored-by: xiaotinghe <xiaotih@amazon.com>
Co-authored-by: Ubuntu <ubuntu@ip-172-31-12-66.us-west-2.compute.internal>
Co-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>
Co-authored-by: HinGwenWoong <peterhuang0323@qq.com>
Co-authored-by: Shuai Zhang <cheungdaven@gmail.com> | transpose_qkv | b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2 | d2l-zh | mxnet.py | 10 | 4 | https://github.com/d2l-ai/d2l-zh.git | 1 | 69 | 0 | 37 | 111 | Python | {
"docstring": "Transposition for parallel computation of multiple attention heads.\n\n Defined in :numref:`sec_multihead-attention`",
"language": "en",
"n_whitespaces": 13,
"n_words": 11,
"vocab_size": 11
} | def transpose_qkv(X, num_heads):
# Shape of input `X`:
# (`batch_size`, no. of queries or key-value pairs, `num_hiddens`).
# Shape of output `X`:
# (`batch_size`, no. of queries or key-value pairs, `num_heads`,
# `num_hiddens` / `num_heads`)
X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)
# Shape of output `X`:
# (`batch_size`, `num_heads`, no. of queries or key-value pairs,
# `num_hiddens` / `num_heads`)
X = X.transpose(0, 2, 1, 3)
# Shape of `output`:
# (`batch_size` * `num_heads`, no. of queries or key-value pairs,
# `num_hiddens` / `num_heads`)
return X.reshape(-1, X.shape[2], X.shape[3])
|
|
9,018 | 46,849 | 131 | airflow/models/taskinstance.py | 21 | 13 | def current_state(self, session=NEW_SESSION) -> str:
return (
session.query(TaskInstance.state)
.filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.ta | No need to load whole ti in current_state (#22764)
Co-authored-by: Jed Cunningham <66968678+jedcunningham@users.noreply.github.com>
Co-authored-by: Tzu-ping Chung <uranusjr@gmail.com> | current_state | 4eaf9bcddfb370222b4386b02975974bb253f614 | airflow | taskinstance.py | 13 | 17 | https://github.com/apache/airflow.git | 1 | 55 | 0 | 18 | 85 | Python | {
"docstring": "\n Get the very latest state from the database, if a session is passed,\n we use and looking up the state becomes part of the session, otherwise\n a new session is used.\n\n :param session: SQLAlchemy ORM Session\n ",
"language": "en",
"n_whitespaces": 72,
"n_words": 36,
"vocab_size": 29
} | def current_state(self, session=NEW_SESSION) -> str:
return (
session.query(TaskInstance.state)
.filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == self.run_id,
)
.scalar()
)
|
|
41,724 | 176,154 | 71 | networkx/generators/small.py | 28 | 5 | def house_graph(create_using=None):
description = [
"adjacencylist",
"House Graph",
5,
[[2, 3], | Docstrings for the small.py module (#5240)
* added description for the first 5 small graphs
* modified descriptions based on comment and added description for two more functions
* added doctrings to all the functions
* Minor touchups.
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> | house_graph | dec723f072eb997a497a159dbe8674cd39999ee9 | networkx | small.py | 9 | 9 | https://github.com/networkx/networkx.git | 1 | 64 | 0 | 24 | 90 | Python | {
"docstring": "\n Returns the House graph (square with triangle on top)\n\n The house graph is a simple undirected graph with\n 5 nodes and 6 edges [1]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n House graph in the form of a square with a triangle on top\n\n References\n ----------\n .. [1] https://mathworld.wolfram.com/HouseGraph.html\n ",
"language": "en",
"n_whitespaces": 121,
"n_words": 68,
"vocab_size": 51
} | def house_graph(create_using=None):
description = [
"adjacencylist",
"House Graph",
5,
[[2, 3], [1, 4], [1, 4, 5], [2, 3, 5], [3, 4]],
]
G = make_small_undirected_graph(description, create_using)
return G
|
|
4,208 | 22,136 | 114 | pipenv/patched/pip/_vendor/requests/utils.py | 40 | 9 | def check_header_validity(header):
name, value = header
for part in header:
if type(part) not in HEADER_VALIDATORS:
raise InvalidHeader(
f"Header part ({part!r}) | Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | check_header_validity | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | utils.py | 16 | 10 | https://github.com/pypa/pipenv.git | 3 | 67 | 0 | 37 | 134 | Python | {
"docstring": "Verifies that header parts don't contain leading whitespace\n reserved characters, or return characters.\n\n :param header: tuple, in the format (name, value).\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 21,
"vocab_size": 21
} | def check_header_validity(header):
name, value = header
for part in header:
if type(part) not in HEADER_VALIDATORS:
raise InvalidHeader(
f"Header part ({part!r}) from {{{name!r}: {value!r}}} must be "
f"of type str or bytes, not {type(part)}"
)
_validate_header_part(name, "name", HEADER_VALIDATORS[type(name)][0])
_validate_header_part(value, "value", HEADER_VALIDATORS[type(value)][1])
|
|
5,130 | 27,800 | 330 | saleor/graphql/order/tests/test_order.py | 129 | 45 | def test_orderline_query(staff_api_client, permission_manage_orders, fulfilled_order):
order = fulfilled_order
query =
line = order.lines.first()
metadata_key = "md key"
metadata_value = "md value"
line.store_value_in_private_metadata({metadata_key: metadata_value})
line.store_value_in_metadata({metadata_key: metadata_value})
line.save()
staff_api_client.user.user_permissions.add(permission_manage_orders)
response = staff_api_client.post_graphql(query)
content = get_graphql_content(response)
order_data = content["data"]["orders"]["edges"][0]["node"]
first_order_data_line = order_data["lines"][0]
variant_id = graphene.Node.to_global_id("ProductVariant", line.variant.pk)
assert first_order_data_line["thumbnail"] is None
assert first_order_data_line["variant"]["id"] == variant_id
assert first_order_data_line["quantity"] == line.quantity
assert first_order_data_line["unitPrice"]["currency"] == line.unit_price.currency
assert first_order_data_line["metadata"] == [
{"key": metadata_key, "value": metadata_value}
]
assert first_order_data_line["privateMetadata"] == [
{"key": metadata_key, "value": metadata_value}
]
expected_unit_price = Money(
amount=str(first_order_data_line["unitPrice" | Metadata added to checkout and order lines (#10040)
* Metadata added to checkout and order lines
* CHANGELOG.md update
* Missing tests added | test_orderline_query | a68553e1a55e3a1bd32826cdce294d27f74175e9 | saleor | test_order.py | 15 | 93 | https://github.com/saleor/saleor.git | 1 | 349 | 0 | 78 | 595 | Python | {
"docstring": "\n query OrdersQuery {\n orders(first: 1) {\n edges {\n node {\n lines {\n thumbnail(size: 540) {\n url\n }\n variant {\n id\n }\n quantity\n allocations {\n id\n quantity\n warehouse {\n id\n }\n }\n unitPrice {\n currency\n gross {\n amount\n }\n }\n totalPrice {\n currency\n gross {\n amount\n }\n }\n metadata {\n key\n value\n }\n privateMetadata {\n key\n value\n }\n }\n }\n }\n }\n }\n ",
"language": "en",
"n_whitespaces": 1222,
"n_words": 62,
"vocab_size": 26
} | def test_orderline_query(staff_api_client, permission_manage_orders, fulfilled_order):
order = fulfilled_order
query =
line = order.lines.first()
metadata_key = "md key"
metadata_value = "md value"
line.store_value_in_private_metadata({metadata_key: metadata_value})
line.store_value_in_metadata({metadata_key: metadata_value})
line.save()
staff_api_client.user.user_permissions.add(permission_manage_orders)
response = staff_api_client.post_graphql(query)
content = get_graphql_content(response)
order_data = content["data"]["orders"]["edges"][0]["node"]
first_order_data_line = order_data["lines"][0]
variant_id = graphene.Node.to_global_id("ProductVariant", line.variant.pk)
assert first_order_data_line["thumbnail"] is None
assert first_order_data_line["variant"]["id"] == variant_id
assert first_order_data_line["quantity"] == line.quantity
assert first_order_data_line["unitPrice"]["currency"] == line.unit_price.currency
assert first_order_data_line["metadata"] == [
{"key": metadata_key, "value": metadata_value}
]
assert first_order_data_line["privateMetadata"] == [
{"key": metadata_key, "value": metadata_value}
]
expected_unit_price = Money(
amount=str(first_order_data_line["unitPrice"]["gross"]["amount"]),
currency="USD",
)
assert first_order_data_line["totalPrice"]["currency"] == line.unit_price.currency
assert expected_unit_price == line.unit_price.gross
expected_total_price = Money(
amount=str(first_order_data_line["totalPrice"]["gross"]["amount"]),
currency="USD",
)
assert expected_total_price == line.unit_price.gross * line.quantity
allocation = line.allocations.first()
allocation_id = graphene.Node.to_global_id("Allocation", allocation.pk)
warehouse_id = graphene.Node.to_global_id(
"Warehouse", allocation.stock.warehouse.pk
)
assert first_order_data_line["allocations"] == [
{
"id": allocation_id,
"quantity": allocation.quantity_allocated,
"warehouse": {"id": warehouse_id},
}
]
|
|
52,612 | 209,122 | 370 | scapy/layers/inet.py | 132 | 28 | def in4_pseudoheader(proto, u, plen):
# type: (int, IP, int) -> bytes
if u.len is not None:
if u.ihl is None:
olen = sum(len(x) for x in u.options)
ihl = 5 + olen // 4 + (1 if olen % 4 else 0)
else:
ihl = u.ihl
ln = max(u.len - 4 * ihl, 0)
else:
ln = plen
# Filter out IPOption_LSRR and IPOption_SSRR
sr_options = [opt for opt in u.options if isinstance(opt, IPOption_LSRR) or
isinstance(opt, IPOption_SSRR)]
len_sr_options = len(sr_options)
if len_sr_options == 1 and len(sr_options[0].routers):
# The checksum must be computed using the final
# destination address
u.dst = sr_options[0].routers[-1]
elif len_sr_options > 1:
message = "Found %d Source Routing Options! "
message += "Falling back to IP.dst for checksum computation."
warning(message, len_sr_options)
return struct.pack("!4s4sHH",
inet_pton(socket.AF_IN | Support TCP-MD5 and TCP-AO (#3358)
Support TCP-MD5 and TCP-AO | in4_pseudoheader | 20ac1d00389d0735e6d8cd1347f0a53f478144ba | scapy | inet.py | 14 | 24 | https://github.com/secdev/scapy.git | 10 | 182 | 0 | 95 | 302 | Python | {
"docstring": "IPv4 Pseudo Header as defined in RFC793 as bytes\n\n :param proto: value of upper layer protocol\n :param u: IP layer instance\n :param plen: the length of the upper layer and payload\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 31,
"vocab_size": 23
} | def in4_pseudoheader(proto, u, plen):
# type: (int, IP, int) -> bytes
if u.len is not None:
if u.ihl is None:
olen = sum(len(x) for x in u.options)
ihl = 5 + olen // 4 + (1 if olen % 4 else 0)
else:
ihl = u.ihl
ln = max(u.len - 4 * ihl, 0)
else:
ln = plen
# Filter out IPOption_LSRR and IPOption_SSRR
sr_options = [opt for opt in u.options if isinstance(opt, IPOption_LSRR) or
isinstance(opt, IPOption_SSRR)]
len_sr_options = len(sr_options)
if len_sr_options == 1 and len(sr_options[0].routers):
# The checksum must be computed using the final
# destination address
u.dst = sr_options[0].routers[-1]
elif len_sr_options > 1:
message = "Found %d Source Routing Options! "
message += "Falling back to IP.dst for checksum computation."
warning(message, len_sr_options)
return struct.pack("!4s4sHH",
inet_pton(socket.AF_INET, u.src),
inet_pton(socket.AF_INET, u.dst),
proto,
ln)
|
|
35,399 | 153,405 | 270 | modin/core/storage_formats/base/doc_utils.py | 91 | 23 | def doc_resample_fillna(method, refer_to, params=None, overwrite_template_params=False):
action = f"fill missing values in each group in | REFACTOR-#4093: Refactor base to be smaller (#4220)
Signed-off-by: jeffreykennethli <jkli@ponder.io> | doc_resample_fillna | be9d382e35a9b87565499c029056afe1ddce6f37 | modin | doc_utils.py | 13 | 20 | https://github.com/modin-project/modin.git | 3 | 70 | 0 | 62 | 256 | Python | {
"docstring": "\n Build decorator which adds docstring for the resample fillna query compiler method.\n\n Parameters\n ----------\n method : str\n Fillna method name.\n refer_to : str\n Method name in ``modin.pandas.resample.Resampler`` module to refer to for\n more information about parameters and output format.\n params : str, optional\n Method parameters in the NumPy docstyle format to substitute\n to the docstring template.\n overwrite_template_params : bool, default: False\n If `params` is specified indicates whether to overwrite method parameters in\n the docstring template or append then at the end.\n\n Returns\n -------\n callable\n \n Get {prop} for each {dt_type} value.\n {params}\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with the same shape as `self`, where each element is\n {prop} for the corresponding {dt_type} value.\n \n Perform {refer_to} operation on the underlying time-series data to the specified `freq`.\n\n Parameters\n ----------\n freq : str\n ambiguous : {{\"raise\", \"infer\", \"NaT\"}} or bool mask, default: \"raise\"\n nonexistent : {{\"raise\", \"shift_forward\", \"shift_backward\", \"NaT\"}} or timedelta, default: \"raise\"\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with performed {refer_to} operation on every element.\n \n Apply \"{refer_to}\" function to each string value in QueryCompiler.\n {params}\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing the result of execution of the \"{refer_to}\" function\n against each string element.\n ",
"language": "en",
"n_whitespaces": 376,
"n_words": 189,
"vocab_size": 113
} | def doc_resample_fillna(method, refer_to, params=None, overwrite_template_params=False):
action = f"fill missing values in each group independently using {method} method"
params_substitution = "limit : int\n"
if params:
params_substitution = (
params
if overwrite_template_params
else format_string(
"{params}\n{params_substitution}",
params=params,
params_substitution=params_substitution,
)
)
build_rules = "- QueryCompiler contains unsampled data with missing values filled."
return doc_resample(
action=action,
extra_params=params_substitution,
build_rules=build_rules,
refer_to=refer_to,
)
doc_dt = partial(
doc_qc_method,
template=,
one_column_method=True,
refer_to_module_name="Series.dt",
)
doc_dt_timestamp = partial(doc_dt, dt_type="datetime")
doc_dt_interval = partial(doc_dt, dt_type="interval")
doc_dt_period = partial(doc_dt, dt_type="period")
doc_dt_round = partial(
doc_qc_method,
template=,
one_column_method=True,
refer_to_module_name="Series.dt",
)
doc_str_method = partial(
doc_qc_method,
template=,
one_column_method=True,
refer_to_module_name="Series.str",
)
|
|
14,730 | 68,154 | 9 | erpnext/utilities/transaction_base.py | 19 | 9 | def delete_events(ref_type, ref_name):
events = (
frappe.db.sql_list(
,
(ref_type, ref_name),
)
or []
)
if events:
frappe.delete_doc("Event", events, for_reload=True)
| style: format code with black | delete_events | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | transaction_base.py | 11 | 18 | https://github.com/frappe/erpnext.git | 3 | 44 | 0 | 18 | 69 | Python | {
"docstring": " SELECT\n\t\t\tdistinct `tabEvent`.name\n\t\tfrom\n\t\t\t`tabEvent`, `tabEvent Participants`\n\t\twhere\n\t\t\t`tabEvent`.name = `tabEvent Participants`.parent\n\t\t\tand `tabEvent Participants`.reference_doctype = %s\n\t\t\tand `tabEvent Participants`.reference_docname = %s\n\t\t",
"language": "en",
"n_whitespaces": 15,
"n_words": 22,
"vocab_size": 14
} | def delete_events(ref_type, ref_name):
events = (
frappe.db.sql_list(
,
(ref_type, ref_name),
)
or []
)
if events:
frappe.delete_doc("Event", events, for_reload=True)
|
|
31,990 | 140,519 | 115 | python/ray/serve/deployment_state.py | 39 | 9 | def check_started(self) -> ReplicaStartupStatus:
status, version = self._actor.check_ready()
if status == ReplicaStartupStatus.SUCCEEDED:
# Re-assign Depl | Clean up docstyle in python modules and add LINT rule (#25272) | check_started | 905258dbc19753c81039f993477e7ab027960729 | ray | deployment_state.py | 11 | 14 | https://github.com/ray-project/ray.git | 3 | 39 | 0 | 30 | 67 | Python | {
"docstring": "Check if the replica has started. If so, transition to RUNNING.\n\n Should handle the case where the replica has already stopped.\n\n Returns:\n status: Most recent state of replica by\n querying actor obj ref\n ",
"language": "en",
"n_whitespaces": 80,
"n_words": 33,
"vocab_size": 28
} | def check_started(self) -> ReplicaStartupStatus:
status, version = self._actor.check_ready()
if status == ReplicaStartupStatus.SUCCEEDED:
# Re-assign DeploymentVersion if start / update / recover succeeded
# by reading re-computed version in RayServeReplica
if version is not None:
self._version = version
return status
|
|
110,202 | 311,537 | 164 | tests/components/homekit_controller/test_sensor.py | 44 | 15 | async def test_battery_low(hass, utcnow):
helper = await setup_test_component(
hass, create_battery_level_sensor, suffix="battery"
)
state = await helper.async_update( | Improve homekit_controller tests (#65266) | test_battery_low | 58b8c30221a6f6e5acbbe98b7e3298b03fb741f5 | core | test_sensor.py | 12 | 20 | https://github.com/home-assistant/core.git | 1 | 93 | 0 | 26 | 149 | Python | {
"docstring": "Test reading the state of a HomeKit battery's low state.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | async def test_battery_low(hass, utcnow):
helper = await setup_test_component(
hass, create_battery_level_sensor, suffix="battery"
)
state = await helper.async_update(
ServicesTypes.BATTERY_SERVICE,
{
CharacteristicsTypes.BATTERY_LEVEL: 1,
CharacteristicsTypes.STATUS_LO_BATT: 0,
},
)
assert state.attributes["icon"] == "mdi:battery-10"
state = await helper.async_update(
ServicesTypes.BATTERY_SERVICE,
{
CharacteristicsTypes.BATTERY_LEVEL: 1,
CharacteristicsTypes.STATUS_LO_BATT: 1,
},
)
assert state.attributes["icon"] == "mdi:battery-alert"
|
|
20,496 | 101,059 | 59 | lib/model/loss/perceptual_loss_plaid.py | 24 | 13 | def _hyab(self, y_true, y_pred):
delta = y_true | Add Flip Loss Function
- Add Flip for AMD and TF
- Split Perceptual Loss functions to own modules
- Fix allowed input shape for models
- Allow GUI tooltip to display at higher width | _hyab | 582c2ce40c11ef235dd3f9100f70e1e2832f8dd3 | faceswap | perceptual_loss_plaid.py | 14 | 5 | https://github.com/deepfakes/faceswap.git | 1 | 65 | 0 | 20 | 97 | Python | {
"docstring": " Compute the HyAB distance between true and predicted images.\n\n Parameters\n ----------\n y_true: :class:`plaidml.tile.Value`\n The ground truth batch of images in standard or Hunt-adjusted L*A*B* color space\n y_pred: :class:`plaidml.tile.Value`\n The predicted batch of images in in standard or Hunt-adjusted L*A*B* color space\n\n Returns\n -------\n :class:`plaidml.tile.Value`\n image tensor containing the per-pixel HyAB distances between true and predicted images\n ",
"language": "en",
"n_whitespaces": 146,
"n_words": 56,
"vocab_size": 34
} | def _hyab(self, y_true, y_pred):
delta = y_true - y_pred
root = K.sqrt(K.clip(K.pow(delta[..., 0:1], 2), self._epsilon, None))
delta_norm = frobenius_norm(delta[..., 1:3])
return root + delta_norm
|
|
7,862 | 43,199 | 231 | tests/cli/commands/test_db_command.py | 26 | 19 | def test_dry_run(self, run_cleanup_mock, dry_run_arg, expected):
args = self.parser.parse_args(
[
'db',
'clean',
'--clean-before-timestamp',
'2021-01-01',
*dry_run_arg,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=expected,
clean_before_ti | Don't rely on current ORM structure for db clean command (#23574)
For command DB clean, by not relying on the ORM models, we will be able to use the command even when the metadatabase is not yet upgraded to the version of Airflow you have installed.
Additionally we archive all rows before deletion. | test_dry_run | 95bd6b71cc9f5da377e272707f7b68000d980939 | airflow | test_db_command.py | 11 | 19 | https://github.com/apache/airflow.git | 1 | 74 | 0 | 25 | 116 | Python | {
"docstring": "\n When tz included in the string then default timezone should not be used.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 13
} | def test_dry_run(self, run_cleanup_mock, dry_run_arg, expected):
args = self.parser.parse_args(
[
'db',
'clean',
'--clean-before-timestamp',
'2021-01-01',
*dry_run_arg,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=expected,
clean_before_timestamp=pendulum.parse('2021-01-01 00:00:00Z'),
verbose=False,
confirm=True,
skip_archive=False,
)
|
|
107,978 | 309,272 | 11 | homeassistant/components/homekit/util.py | 5 | 5 | def async_dismiss_setup_message(hass, entry_id):
| Import persistent notification (part 3) (#63900) | async_dismiss_setup_message | 2eab3c8de1fd80a8f1456fd97389ca687c11ecb7 | core | util.py | 7 | 2 | https://github.com/home-assistant/core.git | 1 | 16 | 0 | 5 | 27 | Python | {
"docstring": "Dismiss persistent notification and remove QR code.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def async_dismiss_setup_message(hass, entry_id):
persistent_notification.async_dismiss(hass, entry_id)
|
|
50,294 | 203,309 | 139 | django/apps/registry.py | 33 | 16 | def get_containing_app_config(self, object_name):
self.check_apps_ready()
candidates = []
for app_config in self.app_configs.values():
if object_name.startswith(app_config.name):
subpath = object_name[len(app_config.name) :]
if subpath == "" or subpath[0] == ".":
candidates.append(app_config)
if candidates:
return sorted(candidates, key=lambd | Refs #33476 -- Reformatted code with Black. | get_containing_app_config | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | registry.py | 16 | 10 | https://github.com/django/django.git | 6 | 92 | 0 | 28 | 152 | Python | {
"docstring": "\n Look for an app config containing a given object.\n\n object_name is the dotted Python path to the object.\n\n Return the app config for the inner application in case of nesting.\n Return None if the object isn't in any registered app config.\n ",
"language": "en",
"n_whitespaces": 77,
"n_words": 41,
"vocab_size": 30
} | def get_containing_app_config(self, object_name):
self.check_apps_ready()
candidates = []
for app_config in self.app_configs.values():
if object_name.startswith(app_config.name):
subpath = object_name[len(app_config.name) :]
if subpath == "" or subpath[0] == ".":
candidates.append(app_config)
if candidates:
return sorted(candidates, key=lambda ac: -len(ac.name))[0]
|
|
82,926 | 279,328 | 158 | keras/engine/base_layer.py | 28 | 15 | def _track_variables(self, value):
for val in tf.nest.flatten(value):
if isinstance(val, tf.Variable):
self._track_variable(val)
elif tf_utils.is_extension_type(val):
# Manually expand extension types to track resource variables.
nested_vals = tf_utils. | Prepare keras for making ResourceVariables as CompositeTensors.
We are going to let ResourceVariable be a subclass of CompositeTensor. Changes in this CL are necessary to not break existing code.
Specifically, to track resource variables embedded in composite tensors, we will need to manually expand composite tensors layer by layer instead of replying on tf.nest.
Currently resource variables are atoms and considered to have the same structure as tensors. So we could have one branch to be a resource variable and the other branch to be a tensor. After making resource variable as composite tensors, resource variables will be tf.nest sequences instead of atoms. To avoid the type spec mismatch, we replace resource variables with tf.nest atoms just for the purpose of tf.nest.assert_same_structure.
PiperOrigin-RevId: 464573876 | _track_variables | 102ab667f513956d89f55f2f9480b9cdc5372eef | keras | base_layer.py | 15 | 9 | https://github.com/keras-team/keras.git | 4 | 63 | 0 | 27 | 103 | Python | {
"docstring": "Tracks `Variable`s including `Variable`s in `CompositeTensor`s.",
"language": "en",
"n_whitespaces": 5,
"n_words": 6,
"vocab_size": 5
} | def _track_variables(self, value):
for val in tf.nest.flatten(value):
if isinstance(val, tf.Variable):
self._track_variable(val)
elif tf_utils.is_extension_type(val):
# Manually expand extension types to track resource variables.
nested_vals = tf_utils.type_spec_from_value(val)._to_components(
val
)
self._track_variables(nested_vals)
|
|
45,489 | 186,573 | 386 | certbot-apache/certbot_apache/_internal/configurator.py | 111 | 30 | def _create_vhost_v2(self, node):
addrs = set()
for param in node.parameters:
addr = obj.Addr.fromstring(param)
if addr:
addrs.add(addr)
is_ssl = False
# Exclusion to match the behavior in get_virtual_hosts_v2
sslengine = node.find_directives("SSLEngine", exclude=False)
if sslengine:
for directive in sslengine:
if directive.parameters[0].lower() == "on":
is_ssl = True
break
# "SSLEngine on" might be set outside of <VirtualHost>
# Treat vhosts with port 443 as ssl vhosts
for addr in addrs:
if addr.get_port() == "443":
is_ssl = True
enabled = apache_uti | Fully type certbot-nginx module (#9124)
* Work in progress
* Fix type
* Work in progress
* Work in progress
* Work in progress
* Work in progress
* Work in progress
* Oups.
* Fix typing in UnspacedList
* Fix logic
* Finish typing
* List certbot-nginx as fully typed in tox
* Fix lint
* Fix checks
* Organize imports
* Fix typing for Python 3.6
* Fix checks
* Fix lint
* Update certbot-nginx/certbot_nginx/_internal/configurator.py
Co-authored-by: alexzorin <alex@zor.io>
* Update certbot-nginx/certbot_nginx/_internal/configurator.py
Co-authored-by: alexzorin <alex@zor.io>
* Fix signature of deploy_cert regarding the installer interface
* Update certbot-nginx/certbot_nginx/_internal/obj.py
Co-authored-by: alexzorin <alex@zor.io>
* Fix types
* Update certbot-nginx/certbot_nginx/_internal/parser.py
Co-authored-by: alexzorin <alex@zor.io>
* Precise type
* Precise _coerce possible inputs/outputs
* Fix type
* Update certbot-nginx/certbot_nginx/_internal/http_01.py
Co-authored-by: ohemorange <ebportnoy@gmail.com>
* Fix type
* Remove an undesirable implementation.
* Fix type
Co-authored-by: alexzorin <alex@zor.io>
Co-authored-by: ohemorange <ebportnoy@gmail.com> | _create_vhost_v2 | 16aad35d31a887dab157f9d4f5e0fe9218d06064 | certbot | configurator.py | 14 | 25 | https://github.com/certbot/certbot.git | 9 | 159 | 0 | 76 | 259 | Python | {
"docstring": "Used by get_virtual_hosts_v2 to create vhost objects using ParserNode\n interfaces.\n :param interfaces.BlockNode node: The BlockNode object of VirtualHost block\n :returns: newly created vhost\n :rtype: :class:`~certbot_apache.obj.VirtualHost`\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 25,
"vocab_size": 24
} | def _create_vhost_v2(self, node):
addrs = set()
for param in node.parameters:
addr = obj.Addr.fromstring(param)
if addr:
addrs.add(addr)
is_ssl = False
# Exclusion to match the behavior in get_virtual_hosts_v2
sslengine = node.find_directives("SSLEngine", exclude=False)
if sslengine:
for directive in sslengine:
if directive.parameters[0].lower() == "on":
is_ssl = True
break
# "SSLEngine on" might be set outside of <VirtualHost>
# Treat vhosts with port 443 as ssl vhosts
for addr in addrs:
if addr.get_port() == "443":
is_ssl = True
enabled = apache_util.included_in_paths(node.filepath, self.parsed_paths)
macro = False
# Check if the VirtualHost is contained in a mod_macro block
if node.find_ancestors("Macro"):
macro = True
vhost = obj.VirtualHost(
node.filepath, None, addrs, is_ssl, enabled, modmacro=macro, node=node
)
self._populate_vhost_names_v2(vhost)
return vhost
|
|
13,203 | 63,204 | 710 | .venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py | 154 | 24 | def insert_on(self, path, loc=None, replace=False):
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
fo | upd; format | insert_on | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | __init__.py | 14 | 38 | https://github.com/jindongwang/transferlearning.git | 18 | 210 | 0 | 96 | 343 | Python | {
"docstring": "Ensure self.location is on path\n\n If replace=False (default):\n - If location is already in path anywhere, do nothing.\n - Else:\n - If it's an egg and its parent directory is on path,\n insert just ahead of the parent.\n - Else: add to the end of path.\n If replace=True:\n - If location is already on path anywhere (not eggs)\n or higher priority than its parent (eggs)\n do nothing.\n - Else:\n - If it's an egg and its parent directory is on path,\n insert just ahead of the parent,\n removing any lower-priority entries.\n - Else: add it to the front of path.\n ",
"language": "en",
"n_whitespaces": 288,
"n_words": 100,
"vocab_size": 50
} | def insert_on(self, path, loc=None, replace=False):
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
|
|
@add_start_docstrings(
"""
The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
XGLM_START_DOCSTRING,
) | 6,060 | 33,107 | 187 | src/transformers/models/xglm/modeling_tf_xglm.py | 47 | 22 | def serving_output(self, output):
pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
cross_attns = (
tf.convert_to_tensor(output.cross_attentions)
if self.config.output_attent | Add TF implementation of `XGLMModel` (#16543)
* Add TFXGLM models
* Add todo: self.supports_xla_generation = False
Co-authored-by: Daniel Stancl <stancld@Daniels-MacBook-Pro.local>
Co-authored-by: Daniel Stancl <stancld@daniels-mbp.home>
Co-authored-by: Joao Gante <joaofranciscocardosogante@gmail.com>
Co-authored-by: Daniel <daniel.stancl@rossum.ai>
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> | serving_output | c72d7d91bf4899760725793421eff9da640c8527 | transformers | modeling_tf_xglm.py | 11 | 16 | https://github.com/huggingface/transformers.git | 6 | 113 | 1 | 32 | 180 | Python | {
"docstring": "\n The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 20,
"vocab_size": 19
} | def serving_output(self, output):
pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
cross_attns = (
tf.convert_to_tensor(output.cross_attentions)
if self.config.output_attentions and self.config.add_cross_attention
else None
)
return TFBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=output.hidden_states,
past_key_values=pkv,
hidden_states=hs,
attentions=attns,
cross_attentions=cross_attns,
)
@add_start_docstrings(
,
XGLM_START_DOCSTRING,
) |
36,047 | 154,524 | 124 | modin/core/execution/ray/implementations/cudf_on_ray/partitioning/partition_manager.py | 30 | 15 | def _apply_func_to_list_of_partitions(cls, func, partitions, **kwargs):
preprocessed_map_func = cls.preprocess_func(func)
key_futures = RayWrapper.materialize(
[
partition.apply(preprocessed_map_func, **kwargs)
for partition in partitions
]
)
gpu_managers = [partition.get_gpu_manager() | REFACTOR-#5009: use RayWrapper.materialize instead of ray.get (#5010)
Signed-off-by: Myachev <anatoly.myachev@intel.com> | _apply_func_to_list_of_partitions | 1dc16415333bf2428ee2b1f4d31ff94e66b9a0a6 | modin | partition_manager.py | 12 | 10 | https://github.com/modin-project/modin.git | 3 | 65 | 0 | 25 | 100 | Python | {
"docstring": "\n Apply `func` to a list of remote partitions from `partitions`.\n\n Parameters\n ----------\n func : callable\n The function to apply.\n partitions : np.ndarray\n NumPy array with partitions.\n **kwargs : dict\n Additional keywords arguments to be passed in `func`.\n\n Returns\n -------\n np.ndarray\n A NumPy array of ``cuDFOnRayDataframePartition`` objects.\n\n Notes\n -----\n This preprocesses the `func` first before applying it to the partitions.\n ",
"language": "en",
"n_whitespaces": 195,
"n_words": 59,
"vocab_size": 46
} | def _apply_func_to_list_of_partitions(cls, func, partitions, **kwargs):
preprocessed_map_func = cls.preprocess_func(func)
key_futures = RayWrapper.materialize(
[
partition.apply(preprocessed_map_func, **kwargs)
for partition in partitions
]
)
gpu_managers = [partition.get_gpu_manager() for partition in partitions]
return cls._create_partitions(key_futures, gpu_managers)
|
|
31,884 | 140,169 | 43 | python/ray/serve/deployment_function_executor_node.py | 11 | 9 | def _execute_impl(self, *args, **kwargs) -> ObjectRef:
return self._deployment_function_handle.remote(
* | [Serve][Deployment Graph][Perf] Add minimal executor DAGNode (#24754)
closes #24475
Current deployment graph has big perf issues compare with using plain deployment handle, mostly because overhead of DAGNode traversal mechanism. We need this mechanism to empower DAG API, specially deeply nested objects in args where we rely on pickling; But meanwhile the nature of each execution becomes re-creating and replacing every `DAGNode` instances involved upon each execution, that incurs overhead.
Some overhead is inevitable due to pickling and executing DAGNode python code, but they could be quite minimal. As I profiled earlier, pickling itself is quite fast for our benchmarks at magnitude of microseconds.
Meanwhile the elephant in the room is DeploymentNode and its relatives are doing too much work in constructor that's beyond necessary, thus slowing everything down. So the fix is as simple as
1) Introduce a new set of executor dag node types that contains absolute minimal information that only preserves the DAG structure with traversal mechanism, and ability to call relevant deployment handles.
2) Add a simple new pass in our build() that generates and replaces nodes with executor dag to produce a final executor dag to run the graph.
Current ray dag -> serve dag mixed a lot of stuff related to deployment generation and init args, in longer term we should remove them but our correctness depends on it so i rather leave it as separate PR.
### Current 10 node chain with deployment graph `.bind()`
```
chain_length: 10, num_clients: 1
latency_mean_ms: 41.05, latency_std_ms: 15.18
throughput_mean_tps: 27.5, throughput_std_tps: 3.2
```
### Using raw deployment handle without dag overhead
```
chain_length: 10, num_clients: 1
latency_mean_ms: 20.39, latency_std_ms: 4.57
throughput_mean_tps: 51.9, throughput_std_tps: 1.04
```
### After this PR:
```
chain_length: 10, num_clients: 1
latency_mean_ms: 20.35, latency_std_ms: 0.87
throughput_mean_tps: 48.4, throughput_std_tps: 1.43
``` | _execute_impl | f27e85cd7df5ca2873ef6231200a1530e16ac35d | ray | deployment_function_executor_node.py | 9 | 10 | https://github.com/ray-project/ray.git | 1 | 31 | 0 | 11 | 50 | Python | {
"docstring": "Executor of DeploymentNode getting called each time on dag.execute.\n\n The execute implementation is recursive, that is, the method nodes will\n receive whatever this method returns. We return a handle here so method\n node can directly call upon.\n ",
"language": "en",
"n_whitespaces": 65,
"n_words": 37,
"vocab_size": 35
} | def _execute_impl(self, *args, **kwargs) -> ObjectRef:
return self._deployment_function_handle.remote(
*self._bound_args, **self._bound_kwargs
)
|
|
@keras_export('keras.utils.load_img', 'keras.preprocessing.image.load_img') | 79,779 | 268,948 | 81 | keras/preprocessing/image.py | 51 | 18 | def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):
if data_format is None:
data_format = backend.image_data_format()
img = array_to_img(x, data_format=data_format, scale=scale)
if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'):
warnings.warn('The JPG format does not support '
'RGBA images, converting to RGB.')
img = img.conve | Copy image utils from keras_preprocessing directly into core keras
This is not new code, we are just moving these utilities directly
into keras from keras-preprocessing.
For the library code, just fixed linting errors.
For the test code, had to do more major changes to port from pytest, but
hopefully any errors have been caught by the tests themselves.
PiperOrigin-RevId: 427274651 | save_img | 373ad97c72ed1ac4b6898e85b2cfd7b016e4b469 | keras | image.py | 11 | 9 | https://github.com/keras-team/keras.git | 5 | 94 | 1 | 44 | 171 | Python | {
"docstring": "Saves an image stored as a Numpy array to a path or file object.\n\n Args:\n path: Path or file object.\n x: Numpy array.\n data_format: Image data format, either \"channels_first\" or\n \"channels_last\".\n file_format: Optional file format override. If omitted, the format to use\n is determined from the filename extension. If a file object was used\n instead of a filename, this parameter should always be used.\n scale: Whether to rescale image values to be within `[0, 255]`.\n **kwargs: Additional keyword arguments passed to `PIL.Image.save()`.\n ",
"language": "en",
"n_whitespaces": 135,
"n_words": 82,
"vocab_size": 63
} | def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):
if data_format is None:
data_format = backend.image_data_format()
img = array_to_img(x, data_format=data_format, scale=scale)
if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'):
warnings.warn('The JPG format does not support '
'RGBA images, converting to RGB.')
img = img.convert('RGB')
img.save(path, format=file_format, **kwargs)
@keras_export('keras.utils.load_img', 'keras.preprocessing.image.load_img') |
15,900 | 72,483 | 239 | wagtail/admin/views/pages/edit.py | 61 | 14 | def log_commenting_changes(self, changes, revision):
for comment in changes["new_comments"]:
comment.log_create(page_revision=revision, user=self.request.user)
for comment in changes["edited_comments"]:
comment.log_edit(page_revision=revision, user=self.request.user)
for comment in changes["resolved_comments"]:
comment.log_resolve(page_revision=revision, user=self.request.user)
for comment in changes["deleted_comments"]:
comment.log_delete(page_revision=revision, user=self.request.user)
for comment, replies in changes["new_replies"]:
for reply in replies:
reply.log_create(page_revision=revision, user=self.request.user)
for comment, replies in changes["edited_replies"]:
for reply in replies:
reply.log_edit(page_revision=revision, user=self.request.user)
for comment, replies in changes["deleted_replies"]:
for reply in replies:
reply.log_delete(page_revision=revision, user=self.request.user)
| Reformat with black | log_commenting_changes | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | edit.py | 14 | 18 | https://github.com/wagtail/wagtail.git | 11 | 199 | 0 | 26 | 306 | Python | {
"docstring": "\n Generates log entries for any changes made to comments or replies.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 11
} | def log_commenting_changes(self, changes, revision):
for comment in changes["new_comments"]:
comment.log_create(page_revision=revision, user=self.request.user)
for comment in changes["edited_comments"]:
comment.log_edit(page_revision=revision, user=self.request.user)
for comment in changes["resolved_comments"]:
comment.log_resolve(page_revision=revision, user=self.request.user)
for comment in changes["deleted_comments"]:
comment.log_delete(page_revision=revision, user=self.request.user)
for comment, replies in changes["new_replies"]:
for reply in replies:
reply.log_create(page_revision=revision, user=self.request.user)
for comment, replies in changes["edited_replies"]:
for reply in replies:
reply.log_edit(page_revision=revision, user=self.request.user)
for comment, replies in changes["deleted_replies"]:
for reply in replies:
reply.log_delete(page_revision=revision, user=self.request.user)
|
|
23,015 | 108,011 | 201 | lib/matplotlib/patches.py | 76 | 20 | def __new__(cls, stylename, **kwargs):
# The "class" should have the _style_list attribute, which is a mapping
# of style names to style classes.
_list = stylename.replace(" ", "").split(",")
_name = _list[0].lower()
try:
_cls = cls._style_list[_name]
except KeyError as err:
raise ValueError(f"Unknown style: {stylename}") from err
try:
| Small style fixes. | __new__ | 075ff0952896f44d7d0b0b3318f0978ae53f84d7 | matplotlib | patches.py | 12 | 13 | https://github.com/matplotlib/matplotlib.git | 5 | 120 | 0 | 59 | 208 | Python | {
"docstring": "Return the instance of the subclass with the given style name.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 9
} | def __new__(cls, stylename, **kwargs):
# The "class" should have the _style_list attribute, which is a mapping
# of style names to style classes.
_list = stylename.replace(" ", "").split(",")
_name = _list[0].lower()
try:
_cls = cls._style_list[_name]
except KeyError as err:
raise ValueError(f"Unknown style: {stylename}") from err
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = {k: float(v) for k, v in _args_pair}
except ValueError as err:
raise ValueError(f"Incorrect style argument: {stylename}") from err
return _cls(**{**_args, **kwargs})
|
|
81,916 | 277,252 | 369 | keras/engine/base_layer.py | 93 | 14 | def losses(self):
collected_losses = []
for layer in self._flatten_layers():
# If any eager losses are present, we assume the model to be part of
# an eager training loop (either a custom one or the one used when
# `run_eagerly=True`) and so we always return just the eager losses.
if layer._eager_losses:
# Filter placeholder losses that may have been added by revived
# layers. (see base_layer_utils for details).
if (
layer._eager_losses[0]
is not base_layer_utils.REVIVED_LOSS_PLACEHOLDER
):
collected_losses.extend(layer._eager_losses)
else:
collected_losses.extend(layer._losses)
for regularizer in layer._callable_losses:
| reduct too long lines | losses | fa6d9107a498f7c2403ff28c7b389a1a0c5cc083 | keras | base_layer.py | 14 | 16 | https://github.com/keras-team/keras.git | 6 | 83 | 0 | 71 | 140 | Python | {
"docstring": "List of losses added using the `add_loss()` API.\n\n Variable regularization tensors are created when this property is\n accessed, so it is eager safe: accessing `losses` under a\n `tf.GradientTape` will propagate gradients back to the corresponding\n variables.\n\n Examples:\n\n >>> class MyLayer(tf.keras.layers.Layer):\n ... def call(self, inputs):\n ... self.add_loss(tf.abs(tf.reduce_mean(inputs)))\n ... return inputs\n >>> l = MyLayer()\n >>> l(np.ones((10, 1)))\n >>> l.losses\n [1.0]\n\n >>> inputs = tf.keras.Input(shape=(10,))\n >>> x = tf.keras.layers.Dense(10)(inputs)\n >>> outputs = tf.keras.layers.Dense(1)(x)\n >>> model = tf.keras.Model(inputs, outputs)\n >>> # Activity regularization.\n >>> len(model.losses)\n 0\n >>> model.add_loss(tf.abs(tf.reduce_mean(x)))\n >>> len(model.losses)\n 1\n\n >>> inputs = tf.keras.Input(shape=(10,))\n >>> d = tf.keras.layers.Dense(10, kernel_initializer='ones')\n >>> x = d(inputs)\n >>> outputs = tf.keras.layers.Dense(1)(x)\n >>> model = tf.keras.Model(inputs, outputs)\n >>> # Weight regularization.\n >>> model.add_loss(lambda: tf.reduce_mean(d.kernel))\n >>> model.losses\n [<tf.Tensor: shape=(), dtype=float32, numpy=1.0>]\n\n Returns:\n A list of tensors.\n ",
"language": "en",
"n_whitespaces": 385,
"n_words": 128,
"vocab_size": 83
} | def losses(self):
collected_losses = []
for layer in self._flatten_layers():
# If any eager losses are present, we assume the model to be part of
# an eager training loop (either a custom one or the one used when
# `run_eagerly=True`) and so we always return just the eager losses.
if layer._eager_losses:
# Filter placeholder losses that may have been added by revived
# layers. (see base_layer_utils for details).
if (
layer._eager_losses[0]
is not base_layer_utils.REVIVED_LOSS_PLACEHOLDER
):
collected_losses.extend(layer._eager_losses)
else:
collected_losses.extend(layer._losses)
for regularizer in layer._callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
collected_losses.append(loss_tensor)
return collected_losses
|
|
76,442 | 260,724 | 310 | sklearn/linear_model/_least_angle.py | 71 | 32 | def fit(self, X, y, Xy=None):
self._validate_params()
X, y = self._validate_data(X, y, y_numeric=True, multi_output=True)
_normalize = _deprecate_normalize(
self.normalize, default=True, estimator_name=self.__class__.__name__
)
alpha = getattr(self, "alp | MAINT Parameter Validation for Lars, LarsCV, LassoLars, LassoLarsCV and LassoLarsIC (#24033)
Co-authored-by: jeremie du boisberranger <jeremiedbb@yahoo.fr> | fit | 6c0e0b2e4723d11e29057635c7061a36bc1a8512 | scikit-learn | _least_angle.py | 13 | 26 | https://github.com/scikit-learn/scikit-learn.git | 3 | 169 | 0 | 52 | 251 | Python | {
"docstring": "Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \\\n default=None\n Xy = np.dot(X.T, y) that can be precomputed. It is useful\n only when the Gram matrix is precomputed.\n\n Returns\n -------\n self : object\n Returns an instance of self.\n ",
"language": "en",
"n_whitespaces": 203,
"n_words": 70,
"vocab_size": 49
} | def fit(self, X, y, Xy=None):
self._validate_params()
X, y = self._validate_data(X, y, y_numeric=True, multi_output=True)
_normalize = _deprecate_normalize(
self.normalize, default=True, estimator_name=self.__class__.__name__
)
alpha = getattr(self, "alpha", 0.0)
if hasattr(self, "n_nonzero_coefs"):
alpha = 0.0 # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
if self.jitter is not None:
rng = check_random_state(self.random_state)
noise = rng.uniform(high=self.jitter, size=len(y))
y = y + noise
self._fit(
X,
y,
max_iter=max_iter,
alpha=alpha,
fit_path=self.fit_path,
normalize=_normalize,
Xy=Xy,
)
return self
|
|
10,055 | 50,226 | 363 | modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/efficientnet.py | 73 | 26 | def _decode_block_string(block_string):
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
cond_1 = ('s' in options and len(options['s']) == 1)
cond_2 = ((len(options['s']) == 2) and (options['s'][0] == options['s | add disco_diffusion_ernievil_base | _decode_block_string | ffcde21305c61d950a9f93e57e6180c9a9665b87 | PaddleHub | efficientnet.py | 14 | 20 | https://github.com/PaddlePaddle/PaddleHub.git | 7 | 213 | 0 | 56 | 348 | Python | {
"docstring": " Gets a block through a string notation of arguments. ",
"language": "en",
"n_whitespaces": 10,
"n_words": 9,
"vocab_size": 8
} | def _decode_block_string(block_string):
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
cond_1 = ('s' in options and len(options['s']) == 1)
cond_2 = ((len(options['s']) == 2) and (options['s'][0] == options['s'][1]))
assert (cond_1 or cond_2)
return BlockArgs(kernel_size=int(options['k']),
num_repeat=int(options['r']),
input_filters=int(options['i']),
output_filters=int(options['o']),
expand_ratio=int(options['e']),
id_skip=('noskip' not in block_string),
se_ratio=float(options['se']) if 'se' in options else None,
stride=[int(options['s'][0])])
|
|
38,543 | 160,171 | 17 | numpy/lib/function_base.py | 12 | 5 | def copy(a, order='K', subok=False):
return | Improve documentation formatting | copy | 0307f89d48368a39ed97a252f9faed3c7bf64446 | numpy | function_base.py | 8 | 2 | https://github.com/numpy/numpy.git | 1 | 31 | 0 | 12 | 49 | Python | {
"docstring": "\n Return an array copy of the given object.\n\n Parameters\n ----------\n a : array_like\n Input data.\n order : {'C', 'F', 'A', 'K'}, optional\n Controls the memory layout of the copy. 'C' means C-order,\n 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,\n 'C' otherwise. 'K' means match the layout of `a` as closely\n as possible. (Note that this function and :meth:`ndarray.copy` are very\n similar, but have different default values for their order=\n arguments.)\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise the\n returned array will be forced to be a base-class array (defaults to False).\n\n .. versionadded:: 1.19.0\n\n Returns\n -------\n arr : ndarray\n Array interpretation of `a`.\n\n See Also\n --------\n ndarray.copy : Preferred method for creating an array copy\n\n Notes\n -----\n This is equivalent to:\n\n >>> np.array(a, copy=True) #doctest: +SKIP\n\n Examples\n --------\n Create an array x, with a reference y and a copy z:\n\n >>> x = np.array([1, 2, 3])\n >>> y = x\n >>> z = np.copy(x)\n\n Note that, when we modify x, y changes, but not z:\n\n >>> x[0] = 10\n >>> x[0] == y[0]\n True\n >>> x[0] == z[0]\n False\n\n Note that, np.copy clears previously set WRITEABLE=False flag.\n\n >>> a = np.array([1, 2, 3])\n >>> a.flags[\"WRITEABLE\"] = False\n >>> b = np.copy(a)\n >>> b.flags[\"WRITEABLE\"]\n True\n >>> b[0] = 3\n >>> b\n array([3, 2, 3])\n\n Note that np.copy is a shallow copy and will not copy object\n elements within arrays. This is mainly important for arrays\n containing Python objects. The new array will contain the\n same object which may lead to surprises if that object can\n be modified (is mutable):\n\n >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)\n >>> b = np.copy(a)\n >>> b[2][0] = 10\n >>> a\n array([1, 'm', list([10, 3, 4])], dtype=object)\n\n To ensure all elements within an ``object`` array are copied,\n use `copy.deepcopy`:\n\n >>> import copy\n >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)\n >>> c = copy.deepcopy(a)\n >>> c[2][0] = 10\n >>> c\n array([1, 'm', list([10, 3, 4])], dtype=object)\n >>> a\n array([1, 'm', list([2, 3, 4])], dtype=object)\n\n ",
"language": "en",
"n_whitespaces": 593,
"n_words": 340,
"vocab_size": 188
} | def copy(a, order='K', subok=False):
return array(a, order=order, subok=subok, copy=True)
# Basic operations
|
|
31,769 | 139,754 | 13 | python/ray/data/tests/test_context_propagation.py | 7 | 4 | def test_context_placement_group():
driver_code =
proc = run_string_as_driver_no | [Datasets] Add explicit resource allocation option via a top-level scheduling strategy (#24438)
Instead of letting Datasets implicitly use cluster resources in the margins of explicit allocations of other libraries, such as Tune, Datasets should provide an option for explicitly allocating resources for a Datasets workload for users that want to box Datasets in. This PR adds such an explicit resource allocation option, via exposing a top-level scheduling strategy on the DatasetContext with which a placement group can be given. | test_context_placement_group | 68d4dd3a8b2defa5549cfa70e59aa26f2d4825a3 | ray | test_context_propagation.py | 8 | 30 | https://github.com/ray-project/ray.git | 1 | 23 | 0 | 6 | 27 | Python | {
"docstring": "\nimport ray\nfrom ray.data.context import DatasetContext\nfrom ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy\nfrom ray._private.test_utils import placement_group_assert_no_leak\n\nray.init(num_cpus=1)\n\ncontext = DatasetContext.get_current()\n# This placement group will take up all cores of the local cluster.\nplacement_group = ray.util.placement_group(\n name=\"core_hog\",\n strategy=\"SPREAD\",\n bundles=[\n {\"CPU\": 1},\n ],\n)\nray.get(placement_group.ready())\ncontext.scheduling_strategy = PlacementGroupSchedulingStrategy(placement_group)\npipe = ray.data.range(100, parallelism=2) \\\n .window(blocks_per_window=1) \\\n .map(lambda x: x + 1)\nassert pipe.take_all() == list(range(1, 101))\nplacement_group_assert_no_leak([placement_group])\nray.shutdown()\n ",
"language": "en",
"n_whitespaces": 78,
"n_words": 64,
"vocab_size": 55
} | def test_context_placement_group():
driver_code =
proc = run_string_as_driver_nonblocking(driver_code)
|
|
40,576 | 170,562 | 83 | pandas/core/arrays/categorical.py | 25 | 9 | def reorder_categories(self, new_categories, ordered=None):
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered)
| DEPR: remove inplace arg in Categorical methods (#49321)
* deprecate inplace arg in categorical methods
* fix tests
* add back test
* doc fix
* doc fixes
* avoid constructing new objects on every iteration
* cleanup | reorder_categories | ab6562a20bd894d02fb28675809698d5be0436f9 | pandas | categorical.py | 10 | 6 | https://github.com/pandas-dev/pandas.git | 2 | 43 | 0 | 24 | 70 | Python | {
"docstring": "\n Reorder categories as specified in new_categories.\n\n `new_categories` need to include all old categories and no new category\n items.\n\n Parameters\n ----------\n new_categories : Index-like\n The categories in new order.\n ordered : bool, optional\n Whether or not the categorical is treated as a ordered categorical.\n If not given, do not change the ordered information.\n\n Returns\n -------\n cat : Categorical\n Categorical with reordered categories.\n\n Raises\n ------\n ValueError\n If the new categories do not contain all old category items or any\n new ones\n\n See Also\n --------\n rename_categories : Rename categories.\n add_categories : Add new categories.\n remove_categories : Remove the specified categories.\n remove_unused_categories : Remove categories which are not used.\n set_categories : Set the categories to the specified ones.\n ",
"language": "en",
"n_whitespaces": 325,
"n_words": 114,
"vocab_size": 71
} | def reorder_categories(self, new_categories, ordered=None):
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered)
|
|
71,391 | 246,887 | 361 | tests/rest/client/test_rooms.py | 150 | 17 | def test_get_member_list_no_permission_former_member_with_at_token(self):
# create a room, invite the user and the user joins
room_id = self.helper.create_room_as("@alice:red")
self.helper.invite(room_id, "@alice:red", self.user_id)
self.helper.join(room_id, self.user_id)
# sync to get an at token
channel = self.make_request("GET", "/sync")
self.assertEqual(200, channel.code)
sync_token = channel.json_body["next_batch"]
# check that the user can see the member list to start with
channel = self.make_request(
"GET", "/rooms/%s/members?at=%s" % (room_id, sync_token)
)
se | Replace assertEquals and friends with non-deprecated versions. (#12092) | test_get_member_list_no_permission_former_member_with_at_token | 02d708568b476f2f7716000b35c0adfa4cbd31b3 | synapse | test_rooms.py | 10 | 21 | https://github.com/matrix-org/synapse.git | 1 | 206 | 0 | 78 | 351 | Python | {
"docstring": "\n Tests that a former member of the room can not get the member list\n (in the case that they use an at token).\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 23,
"vocab_size": 19
} | def test_get_member_list_no_permission_former_member_with_at_token(self):
# create a room, invite the user and the user joins
room_id = self.helper.create_room_as("@alice:red")
self.helper.invite(room_id, "@alice:red", self.user_id)
self.helper.join(room_id, self.user_id)
# sync to get an at token
channel = self.make_request("GET", "/sync")
self.assertEqual(200, channel.code)
sync_token = channel.json_body["next_batch"]
# check that the user can see the member list to start with
channel = self.make_request(
"GET", "/rooms/%s/members?at=%s" % (room_id, sync_token)
)
self.assertEqual(200, channel.code, msg=channel.result["body"])
# ban the user (Note: the user is actually allowed to see this event and
# state so that they know they're banned!)
self.helper.change_membership(room_id, "@alice:red", self.user_id, "ban")
# invite a third user and let them join
self.helper.invite(room_id, "@alice:red", "@bob:red")
self.helper.join(room_id, "@bob:red")
# now, with the original user, sync again to get a new at token
channel = self.make_request("GET", "/sync")
self.assertEqual(200, channel.code)
sync_token = channel.json_body["next_batch"]
# check the user can no longer see the updated member list
channel = self.make_request(
"GET", "/rooms/%s/members?at=%s" % (room_id, sync_token)
)
self.assertEqual(403, channel.code, msg=channel.result["body"])
|
|
56,599 | 222,501 | 45 | python3.10.4/Lib/difflib.py | 22 | 8 | def _keep_original_ws(s, tag_s):
return ''.join(
c if tag_c == " " and c.isspace() else tag_c
for c, tag_c in zip(s, tag_s)
)
| add python 3.10.4 for windows | _keep_original_ws | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | difflib.py | 11 | 5 | https://github.com/XX-net/XX-Net.git | 4 | 38 | 0 | 19 | 63 | Python | {
"docstring": "Replace whitespace with the original whitespace characters in `s`",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | def _keep_original_ws(s, tag_s):
return ''.join(
c if tag_c == " " and c.isspace() else tag_c
for c, tag_c in zip(s, tag_s)
)
|
|
23,150 | 108,343 | 21 | lib/matplotlib/cm.py | 9 | 6 | def unregister_cmap(name):
cmap = _colormaps.get(name, None)
_colormaps.unregister(name)
return cmap
| MNT: Remove cmap_d colormap access | unregister_cmap | fb902f735995372f345a8333804f5c6052f29770 | matplotlib | cm.py | 8 | 4 | https://github.com/matplotlib/matplotlib.git | 1 | 24 | 0 | 8 | 41 | Python | {
"docstring": "\n Remove a colormap recognized by :func:`get_cmap`.\n\n You may not remove built-in colormaps.\n\n If the named colormap is not registered, returns with no error, raises\n if you try to de-register a default colormap.\n\n .. warning::\n\n Colormap names are currently a shared namespace that may be used\n by multiple packages. Use `unregister_cmap` only if you know you\n have registered that name before. In particular, do not\n unregister just in case to clean the name before registering a\n new colormap.\n\n Parameters\n ----------\n name : str\n The name of the colormap to be un-registered\n\n Returns\n -------\n ColorMap or None\n If the colormap was registered, return it if not return `None`\n\n Raises\n ------\n ValueError\n If you try to de-register a default built-in colormap.\n ",
"language": "en",
"n_whitespaces": 209,
"n_words": 118,
"vocab_size": 80
} | def unregister_cmap(name):
cmap = _colormaps.get(name, None)
_colormaps.unregister(name)
return cmap
|
|
22,017 | 104,902 | 116 | src/datasets/utils/streaming_download_manager.py | 53 | 15 | def _get_extraction_protocol_with_magic_number(f) -> Optional[str]:
magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH)
f.seek(0)
for i in range(MAGIC_NUMBER_MAX_LENGTH):
compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None: # TODO(QL): raise an error for .tar.gz files as | don't check f.loc in _get_extraction_protocol_with_magic_number (#4318) | _get_extraction_protocol_with_magic_number | 17fd2ea68cf75b36369a9f018497875e292db26a | datasets | streaming_download_manager.py | 13 | 11 | https://github.com/huggingface/datasets.git | 4 | 81 | 0 | 36 | 135 | Python | {
"docstring": "read the magic number from a file-like object and return the compression protocol",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 12
} | def _get_extraction_protocol_with_magic_number(f) -> Optional[str]:
magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH)
f.seek(0)
for i in range(MAGIC_NUMBER_MAX_LENGTH):
compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None: # TODO(QL): raise an error for .tar.gz files as in _get_extraction_protocol
return compression
compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None:
raise NotImplementedError(f"Compression protocol '{compression}' not implemented.")
|
|
54,275 | 215,953 | 322 | salt/modules/lxc.py | 118 | 22 | def _get_veths(net_data):
if isinstance(net_data, dict):
net_data = list(net_data.items())
nics = salt.utils.odict.OrderedDict()
current_nic = salt.utils.odict.OrderedDict()
no_names = True
for item in net_data:
if item and isinstance(item, dict):
item = list(item.items())[0]
# skip LXC configuration comment lines, and play only with tuples conf
elif isinstance(item, str):
# deal with reflection of commented lxc config | Update to latest ``pyupgrade`` hook. Stop skipping it on CI.
Signed-off-by: Pedro Algarvio <palgarvio@vmware.com> | _get_veths | f2a783643de61cac1ff3288b40241e5ce6e1ddc8 | salt | lxc.py | 20 | 24 | https://github.com/saltstack/salt.git | 14 | 206 | 0 | 74 | 342 | Python | {
"docstring": "\n Parse the nic setup inside lxc conf tuples back to a dictionary indexed by\n network interface\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 16,
"vocab_size": 16
} | def _get_veths(net_data):
if isinstance(net_data, dict):
net_data = list(net_data.items())
nics = salt.utils.odict.OrderedDict()
current_nic = salt.utils.odict.OrderedDict()
no_names = True
for item in net_data:
if item and isinstance(item, dict):
item = list(item.items())[0]
# skip LXC configuration comment lines, and play only with tuples conf
elif isinstance(item, str):
# deal with reflection of commented lxc configs
sitem = item.strip()
if sitem.startswith("#") or not sitem:
continue
elif "=" in item:
item = tuple(a.strip() for a in item.split("=", 1))
if item[0] == "lxc.network.type":
current_nic = salt.utils.odict.OrderedDict()
if item[0] == "lxc.network.name":
no_names = False
nics[item[1].strip()] = current_nic
current_nic[item[0].strip()] = item[1].strip()
# if not ethernet card name has been collected, assuming we collected
# data for eth0
if no_names and current_nic:
nics[DEFAULT_NIC] = current_nic
return nics
|
|
24,799 | 112,957 | 42 | nni/runtime/log.py | 17 | 10 | def start_stdout_logging() -> None:
if '_stdout_' in _handlers:
return
| Logging refactor (step 1) - experiment handlers (#4792) | start_stdout_logging | 4feab0e34b490500b06efd6e7e8a34d686702c2f | nni | log.py | 9 | 14 | https://github.com/microsoft/nni.git | 2 | 41 | 0 | 15 | 75 | Python | {
"docstring": "\n Register the stdout handler.\n\n This function should be invoked on importing nni.\n\n It is safe to call it multiple times.\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 20,
"vocab_size": 20
} | def start_stdout_logging() -> None:
if '_stdout_' in _handlers:
return
handler = StreamHandler(sys.stdout)
handler.setFormatter(_StdoutFormatter())
_handlers['_stdout_'] = handler
_root_logger.addHandler(handler)
|
|
@frappe.whitelist() | 14,109 | 66,146 | 14 | erpnext/hr/doctype/job_offer/job_offer.py | 22 | 12 | def get_staffing_plan_detail(designation, company, offer_date):
detail = frappe.db.sql(
,
(designation, company, offer_date),
as_dict=1,
)
return frappe._dict(detail[0]) if (detail and detail[0].parent) else None
@frappe.whitelist() | style: format code with black | get_staffing_plan_detail | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | job_offer.py | 10 | 21 | https://github.com/frappe/erpnext.git | 3 | 55 | 1 | 21 | 90 | Python | {
"docstring": "\n\t\tSELECT DISTINCT spd.parent,\n\t\t\tsp.from_date as from_date,\n\t\t\tsp.to_date as to_date,\n\t\t\tsp.name,\n\t\t\tsum(spd.vacancies) as vacancies,\n\t\t\tspd.designation\n\t\tFROM `tabStaffing Plan Detail` spd, `tabStaffing Plan` sp\n\t\tWHERE\n\t\t\tsp.docstatus=1\n\t\t\tAND spd.designation=%s\n\t\t\tAND sp.company=%s\n\t\t\tAND spd.parent = sp.name\n\t\t\tAND %s between sp.from_date and sp.to_date\n\t",
"language": "en",
"n_whitespaces": 25,
"n_words": 38,
"vocab_size": 30
} | def get_staffing_plan_detail(designation, company, offer_date):
detail = frappe.db.sql(
,
(designation, company, offer_date),
as_dict=1,
)
return frappe._dict(detail[0]) if (detail and detail[0].parent) else None
@frappe.whitelist() |
71,912 | 247,777 | 217 | tests/push/test_push_rule_evaluator.py | 94 | 8 | def test_display_name(self) -> None:
evaluator = self._get_evaluator({"body": "foo bar baz"})
condition = {
"kind": "contains_display_name",
}
# Blank names are skipped.
self.assertFalse(evaluator.matches(condition, "@user:test", ""))
# Check a display name that doesn't match.
self.assertFalse(evaluator.matches(condition, "@user:test", "not found"))
# Check a display name which matches.
self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
# A display name that matches, but not a full word does not result in a match.
self.assertFalse(evaluator.matches(condition, "@user:test", "ba"))
# A display name should not be interpreted as a regular expression.
self.assertFalse(evaluator.matches(condition, "@user:test", | Add type hints to tests files. (#12256) | test_display_name | 9d21ecf7ceab55bc19c4457b8b07401b0b1623a7 | synapse | test_push_rule_evaluator.py | 11 | 12 | https://github.com/matrix-org/synapse.git | 1 | 118 | 0 | 58 | 217 | Python | {
"docstring": "Check for a matching display name in the body of the event.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def test_display_name(self) -> None:
evaluator = self._get_evaluator({"body": "foo bar baz"})
condition = {
"kind": "contains_display_name",
}
# Blank names are skipped.
self.assertFalse(evaluator.matches(condition, "@user:test", ""))
# Check a display name that doesn't match.
self.assertFalse(evaluator.matches(condition, "@user:test", "not found"))
# Check a display name which matches.
self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
# A display name that matches, but not a full word does not result in a match.
self.assertFalse(evaluator.matches(condition, "@user:test", "ba"))
# A display name should not be interpreted as a regular expression.
self.assertFalse(evaluator.matches(condition, "@user:test", "ba[rz]"))
# A display name with spaces should work fine.
self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar"))
|
|
54,176 | 215,784 | 33 | tests/pytests/functional/modules/file/test_readlink.py | 17 | 11 | def test_readlink_not_a_link(file, source):
with pytest.raises(Salt | Add some funtional tests
Add functional tests for the following:
- file.readlink
- file.replace
- file.symlink
Remove unit tests for file.replace as they are duplicated in the added
functional test | test_readlink_not_a_link | a35b29b2651bf33c5d5b45e64bc7765ffde4aff4 | salt | test_readlink.py | 10 | 4 | https://github.com/saltstack/salt.git | 1 | 34 | 0 | 17 | 61 | Python | {
"docstring": "\n Test readlink where the path is not a link\n Should throw a SaltInvocationError\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 13,
"vocab_size": 12
} | def test_readlink_not_a_link(file, source):
with pytest.raises(SaltInvocationError) as exc:
file.readlink(path=source)
assert "A valid link was not specified" in exc.value.message
|
|
14,660 | 67,910 | 76 | erpnext/stock/report/stock_analytics/stock_analytics.py | 106 | 21 | def get_periodic_data(entry, filters):
periodic_data = {}
for d in entry:
period = get_period(d.posting_date, filters)
bal_qty = 0
# if period against item does not exist yet, instantiate it
# insert existing balance dict against period, and add/subtract to it
if periodic_data.get(d.item_code) and not periodic_data.get(d.item_code).get(period):
previous_balance = periodic_data[d.item_code]["balance"].copy()
periodic_data[d.item_code][period] = previous_balance
if d.voucher_type == "Stock Reconciliation":
if periodic_data.get(d.item_code) and periodic_data.get(d.item_code).get("balance").get(
d.warehouse
):
bal_qty = periodic_data[d.item_code]["balance"][d.warehouse]
qty_diff = d.qty_after_transaction - bal_qty
else:
qty_diff = d.actual_qty
if filters["value_quantity"] == "Quantity":
value = qty_diff
else:
v | style: format code with black | get_periodic_data | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | stock_analytics.py | 17 | 27 | https://github.com/frappe/erpnext.git | 8 | 274 | 0 | 67 | 435 | Python | {
"docstring": "Structured as:\n\tItem 1\n\t - Balance (updated and carried forward):\n\t - Warehouse A : bal_qty/value\n\t - Warehouse B : bal_qty/value\n\t - Jun 2021 (sum of warehouse quantities used in report)\n\t - Warehouse A : bal_qty/value\n\t - Warehouse B : bal_qty/value\n\t - Jul 2021 (sum of warehouse quantities used in report)\n\t - Warehouse A : bal_qty/value\n\t - Warehouse B : bal_qty/value\n\tItem 2\n\t - Balance (updated and carried forward):\n\t - Warehouse A : bal_qty/value\n\t - Warehouse B : bal_qty/value\n\t - Jun 2021 (sum of warehouse quantities used in report)\n\t - Warehouse A : bal_qty/value\n\t - Warehouse B : bal_qty/value\n\t - Jul 2021 (sum of warehouse quantities used in report)\n\t - Warehouse A : bal_qty/value\n\t - Warehouse B : bal_qty/value\n\t",
"language": "en",
"n_whitespaces": 433,
"n_words": 118,
"vocab_size": 26
} | def get_periodic_data(entry, filters):
periodic_data = {}
for d in entry:
period = get_period(d.posting_date, filters)
bal_qty = 0
# if period against item does not exist yet, instantiate it
# insert existing balance dict against period, and add/subtract to it
if periodic_data.get(d.item_code) and not periodic_data.get(d.item_code).get(period):
previous_balance = periodic_data[d.item_code]["balance"].copy()
periodic_data[d.item_code][period] = previous_balance
if d.voucher_type == "Stock Reconciliation":
if periodic_data.get(d.item_code) and periodic_data.get(d.item_code).get("balance").get(
d.warehouse
):
bal_qty = periodic_data[d.item_code]["balance"][d.warehouse]
qty_diff = d.qty_after_transaction - bal_qty
else:
qty_diff = d.actual_qty
if filters["value_quantity"] == "Quantity":
value = qty_diff
else:
value = d.stock_value_difference
# period-warehouse wise balance
periodic_data.setdefault(d.item_code, {}).setdefault("balance", {}).setdefault(d.warehouse, 0.0)
periodic_data.setdefault(d.item_code, {}).setdefault(period, {}).setdefault(d.warehouse, 0.0)
periodic_data[d.item_code]["balance"][d.warehouse] += value
periodic_data[d.item_code][period][d.warehouse] = periodic_data[d.item_code]["balance"][
d.warehouse
]
return periodic_data
|
|
76,516 | 260,817 | 27 | sklearn/utils/__init__.py | 11 | 6 | def shuffle(*arrays, random_state=None, n_samples=None):
return resample(
*arrays, replace=False, n_samples=n_samples, random_state=random_state
)
| DOC ensures sklearn.utils.shuffle passes numpydoc validation (#24367)
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com> | shuffle | 49279c3267c0c54cdba80a571820c46f25fbe883 | scikit-learn | __init__.py | 8 | 4 | https://github.com/scikit-learn/scikit-learn.git | 1 | 33 | 0 | 11 | 50 | Python | {
"docstring": "Shuffle arrays or sparse matrices in a consistent way.\n\n This is a convenience alias to ``resample(*arrays, replace=False)`` to do\n random permutations of the collections.\n\n Parameters\n ----------\n *arrays : sequence of indexable data-structures\n Indexable data-structures can be arrays, lists, dataframes or scipy\n sparse matrices with consistent first dimension.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for shuffling\n the data.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n n_samples : int, default=None\n Number of samples to generate. If left to None this is\n automatically set to the first dimension of the arrays. It should\n not be larger than the length of arrays.\n\n Returns\n -------\n shuffled_arrays : sequence of indexable data-structures\n Sequence of shuffled copies of the collections. The original arrays\n are not impacted.\n\n See Also\n --------\n resample : Resample arrays or sparse matrices in a consistent way.\n\n Examples\n --------\n It is possible to mix sparse and dense arrays in the same run::\n\n >>> import numpy as np\n >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])\n >>> y = np.array([0, 1, 2])\n\n >>> from scipy.sparse import coo_matrix\n >>> X_sparse = coo_matrix(X)\n\n >>> from sklearn.utils import shuffle\n >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)\n >>> X\n array([[0., 0.],\n [2., 1.],\n [1., 0.]])\n\n >>> X_sparse\n <3x2 sparse matrix of type '<... 'numpy.float64'>'\n with 3 stored elements in Compressed Sparse Row format>\n\n >>> X_sparse.toarray()\n array([[0., 0.],\n [2., 1.],\n [1., 0.]])\n\n >>> y\n array([2, 1, 0])\n\n >>> shuffle(y, n_samples=2, random_state=0)\n array([0, 1])\n ",
"language": "en",
"n_whitespaces": 519,
"n_words": 248,
"vocab_size": 152
} | def shuffle(*arrays, random_state=None, n_samples=None):
return resample(
*arrays, replace=False, n_samples=n_samples, random_state=random_state
)
|
|
78,567 | 266,763 | 483 | test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py | 118 | 45 | def check_changes(self, args, results): # type: (SanityConfig, Results) -> None
integration_targets = list(walk_integration_targets())
module_targets = list(walk_module_targets())
integration_targets_by_name = dict((target.name, target) for target in integration_targets)
module_names_by_path = dict((target.path, target.module) for target in module_targets)
disabled_targets = []
unstable_targets = []
unsupported_targets = []
for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]:
| ansible-test - Code cleanup and refactoring. (#77169)
* Remove unnecessary PyCharm ignores.
* Ignore intentional undefined attribute usage.
* Add missing type hints. Fix existing type hints.
* Fix docstrings and comments.
* Use function to register completion handler.
* Pass strings to display functions.
* Fix CompositeAction handling of dest argument.
* Use consistent types in expressions/assignments.
* Use custom function to keep linters happy.
* Add missing raise for custom exception.
* Clean up key/value type handling in cloud plugins.
* Use dataclass instead of dict for results.
* Add custom type_guard function to check lists.
* Ignore return type that can't be checked (yet).
* Avoid changing types on local variables. | check_changes | a06fa496d3f837cca3c437ab6e9858525633d147 | ansible | integration_aliases.py | 14 | 36 | https://github.com/ansible/ansible.git | 14 | 298 | 0 | 76 | 456 | Python | {
"docstring": "Check changes and store results in the provided result dictionary.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def check_changes(self, args, results): # type: (SanityConfig, Results) -> None
integration_targets = list(walk_integration_targets())
module_targets = list(walk_module_targets())
integration_targets_by_name = dict((target.name, target) for target in integration_targets)
module_names_by_path = dict((target.path, target.module) for target in module_targets)
disabled_targets = []
unstable_targets = []
unsupported_targets = []
for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]:
for target in args.metadata.change_description.focused_command_targets[command]:
if self.DISABLED in integration_targets_by_name[target].aliases:
disabled_targets.append(target)
elif self.UNSTABLE in integration_targets_by_name[target].aliases:
unstable_targets.append(target)
elif self.UNSUPPORTED in integration_targets_by_name[target].aliases:
unsupported_targets.append(target)
untested_modules = []
for path in args.metadata.change_description.no_integration_paths:
module = module_names_by_path.get(path)
if module:
untested_modules.append(module)
comments = [
self.format_comment(self.TEMPLATE_DISABLED, disabled_targets),
self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets),
self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets),
self.format_comment(self.TEMPLATE_UNTESTED, untested_modules),
]
comments = [comment for comment in comments if comment]
labels = dict(
needs_tests=bool(untested_modules),
disabled_tests=bool(disabled_targets),
unstable_tests=bool(unstable_targets),
unsupported_tests=bool(unsupported_targets),
)
results.comments += comments
results.labels.update(labels)
|
|
42,449 | 177,582 | 377 | label_studio/tests/test_next_task.py | 122 | 42 | def test_overlap_first(business_client, setup_before_upload, show_overlap_first):
c = business_client
config = dict(
title='test_overlap_first',
| fix: DEV-1348: Fix _rearrange_overlap_cohort filter condition for overlap bulk update with concurrent import (#1844)
* [fix] Rearrange overlap depending in annotations count
* Fix next task test for not random overlap assignment
* Delete unused method
* Rename rearrange method to have back compatibility
* Refactor to Q_finished_annotations from tasks.models
* Fix filter for tasks with max annotations
* Change filter for tasks with max annotations
* Change project stats recalculation condition
* Fix rearrange during import from storage
* Change _rearrange_overlap_cohort filter condition
* Switching to bulk_update in _rearrange_overlap_cohort
* Stylize code
* Add is_labeled on import
* Fix tests
* Fix tests
* Fix tests more
Co-authored-by: nik <nik@heartex.net>
Co-authored-by: Sergei Ivashchenko <triklozoid@gmail.com>
Co-authored-by: niklub <lubimov.nicolas@gmail.com>
Co-authored-by: Max Tkachenko <makseq@gmail.com> | test_overlap_first | 35125cca12ba1e8703c4284894e4e2db44ce7009 | label-studio | test_next_task.py | 17 | 63 | https://github.com/heartexlabs/label-studio.git | 8 | 396 | 0 | 84 | 474 | Python | {
"docstring": "\n <View>\n <Text name=\"text\" value=\"$text\"></Text>\n <Choices name=\"text_class\" choice=\"single\">\n <Choice value=\"class_A\"></Choice>\n <Choice value=\"class_B\"></Choice>\n </Choices>\n </View>",
"language": "en",
"n_whitespaces": 104,
"n_words": 13,
"vocab_size": 12
} | def test_overlap_first(business_client, setup_before_upload, show_overlap_first):
c = business_client
config = dict(
title='test_overlap_first',
is_published=True,
maximum_annotations=1,
show_overlap_first=show_overlap_first,
sampling="Uniform sampling",
label_config=
)
project = make_project(config, business_client.user)
annotation_result = json.dumps([{
'from_name': 'text_class',
'to_name': 'text',
'type': 'choices',
'value': {'choices': ['class_A']}
}])
num_tasks = 1000
overlap_cohort_percentage = 1
# set up tasks overlap
setup_after_upload = True
if setup_before_upload:
r = c.patch(
f'/api/projects/{project.id}/',
data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}),
content_type='application/json'
)
assert r.status_code == 200
setup_after_upload = False
# create tasks
tasks = []
for i in range(num_tasks):
tasks.append({'data': {'text': f'this is {str(i)}'}})
r = business_client.post(
f'/api/projects/{project.id}/tasks/bulk/', data=json.dumps(tasks), content_type='application/json')
assert r.status_code == 201
if setup_after_upload:
r = c.patch(
f'/api/projects/{project.id}/',
data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}),
content_type='application/json'
)
assert r.status_code == 200
expected_tasks_with_overlap = int(overlap_cohort_percentage / 100. * num_tasks)
assert Task.objects.filter(Q(project_id=project.id) & Q(overlap__gt=1)).count() == expected_tasks_with_overlap
|
|
76,710 | 261,252 | 136 | sklearn/utils/extmath.py | 54 | 15 | def svd_flip(u, v, u_based_decision=True):
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, range(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[r | DOC Ensures that svd_flip passes numpydoc validation (#24581)
Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> | svd_flip | 97057d329da1786aa03206251aab68bf51312390 | scikit-learn | extmath.py | 16 | 12 | https://github.com/scikit-learn/scikit-learn.git | 2 | 127 | 0 | 30 | 191 | Python | {
"docstring": "Sign correction to ensure deterministic output from SVD.\n\n Adjusts the columns of u and the rows of v such that the loadings in the\n columns in u that are largest in absolute value are always positive.\n\n Parameters\n ----------\n u : ndarray\n Parameters u and v are the output of `linalg.svd` or\n :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner\n dimensions so one can compute `np.dot(u * s, v)`.\n\n v : ndarray\n Parameters u and v are the output of `linalg.svd` or\n :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner\n dimensions so one can compute `np.dot(u * s, v)`.\n The input v should really be called vt to be consistent with scipy's\n output.\n\n u_based_decision : bool, default=True\n If True, use the columns of u as the basis for sign flipping.\n Otherwise, use the rows of v. The choice of which variable to base the\n decision on is generally algorithm dependent.\n\n Returns\n -------\n u_adjusted : ndarray\n Array u with adjusted columns and the same dimensions as u.\n\n v_adjusted : ndarray\n Array v with adjusted rows and the same dimensions as v.\n ",
"language": "en",
"n_whitespaces": 298,
"n_words": 171,
"vocab_size": 86
} | def svd_flip(u, v, u_based_decision=True):
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, range(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[range(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
|
|
31,802 | 139,905 | 118 | rllib/policy/tf_policy.py | 36 | 13 | def extra_action_out_fn(self) -> Dict[str, TensorType]:
extra_fetches = {}
# Action-logp and action-prob.
if self._sampled_action_logp is not None:
extra_fetches | [RLlib] Migrate MAML, MB-MPO, MARWIL, and BC to use Policy sub-classing implementation. (#24914) | extra_action_out_fn | d5a6d46049d0ea0490c90366a081de79a87d0fac | ray | tf_policy.py | 10 | 17 | https://github.com/ray-project/ray.git | 3 | 65 | 0 | 25 | 103 | Python | {
"docstring": "Extra values to fetch and return from compute_actions().\n\n By default we return action probability/log-likelihood info\n and action distribution inputs (if present).\n\n Returns:\n Dict[str, TensorType]: An extra fetch-dict to be passed to and\n returned from the compute_actions() call.\n ",
"language": "en",
"n_whitespaces": 92,
"n_words": 37,
"vocab_size": 30
} | def extra_action_out_fn(self) -> Dict[str, TensorType]:
extra_fetches = {}
# Action-logp and action-prob.
if self._sampled_action_logp is not None:
extra_fetches[SampleBatch.ACTION_PROB] = self._sampled_action_prob
extra_fetches[SampleBatch.ACTION_LOGP] = self._sampled_action_logp
# Action-dist inputs.
if self._dist_inputs is not None:
extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = self._dist_inputs
return extra_fetches
|
|
8,562 | 45,432 | 733 | airflow/jobs/triggerer_job.py | 162 | 24 | async def cleanup_finished_triggers(self):
for trigger_id, details in list(self.triggers.items()):
if details["task"].done():
# Check to see if it exited for | Log traceback in trigger excs (#21213) | cleanup_finished_triggers | 4ad21f5f7c2d416cf813a860564bc2bf3e161d46 | airflow | triggerer_job.py | 18 | 25 | https://github.com/apache/airflow.git | 7 | 160 | 0 | 116 | 275 | Python | {
"docstring": "\n Go through all trigger tasks (coroutines) and clean up entries for\n ones that have exited, optionally warning users if the exit was\n not normal.\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 24,
"vocab_size": 24
} | async def cleanup_finished_triggers(self):
for trigger_id, details in list(self.triggers.items()):
if details["task"].done():
# Check to see if it exited for good reasons
saved_exc = None
try:
result = details["task"].result()
except (asyncio.CancelledError, SystemExit, KeyboardInterrupt):
# These are "expected" exceptions and we stop processing here
# If we don't, then the system requesting a trigger be removed -
# which turns into CancelledError - results in a failure.
del self.triggers[trigger_id]
continue
except BaseException as e:
# This is potentially bad, so log it.
self.log.exception("Trigger %s exited with error %s", details["name"], e)
saved_exc = e
else:
# See if they foolishly returned a TriggerEvent
if isinstance(result, TriggerEvent):
self.log.error(
"Trigger %s returned a TriggerEvent rather than yielding it", details["name"]
)
# See if this exited without sending an event, in which case
# any task instances depending on it need to be failed
if details["events"] == 0:
self.log.error(
"Trigger %s exited without sending an event. Dependent tasks will be failed.",
details["name"],
)
self.failed_triggers.append((trigger_id, saved_exc))
del self.triggers[trigger_id]
await asyncio.sleep(0)
|
|
30,145 | 133,910 | 89 | rllib/contrib/sumo/utils.py | 26 | 8 | def get_global_travel_time(self):
gtt = 0
for entity in self.tripinfo:
gtt += self.get_duration(entity, default=0.0 | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | get_global_travel_time | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | utils.py | 11 | 7 | https://github.com/ray-project/ray.git | 3 | 53 | 0 | 17 | 79 | Python | {
"docstring": "\n Returns the global travel time computed from SUMO tripinfo data.\n\n The functions process_tripinfo_file() needs to be called in advance\n to initialize the data structures required.\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 25,
"vocab_size": 23
} | def get_global_travel_time(self):
gtt = 0
for entity in self.tripinfo:
gtt += self.get_duration(entity, default=0.0)
for entity in self.personinfo:
gtt += self.get_duration(entity, default=0.0)
return gtt
###########################################################################
# ROUTING
|
|
4,190 | 22,114 | 26 | pipenv/patched/pip/_vendor/requests/sessions.py | 13 | 7 | def post(self, url, data=None, json=None, **kwargs):
r
re | Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | post | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | sessions.py | 8 | 11 | https://github.com/pypa/pipenv.git | 1 | 40 | 0 | 12 | 58 | Python | {
"docstring": "Sends a POST request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param data: (optional) Dictionary, list of tuples, bytes, or file-like\n object to send in the body of the :class:`Request`.\n :param json: (optional) json to send in the body of the :class:`Request`.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :rtype: requests.Response\n ",
"language": "en",
"n_whitespaces": 108,
"n_words": 55,
"vocab_size": 39
} | def post(self, url, data=None, json=None, **kwargs):
r
return self.request("POST", url, data=data, json=json, **kwargs)
|
|
52,723 | 209,531 | 162 | scapy/contrib/http2.py | 44 | 10 | def __getitem__(self, idx):
# type: (int) -> HPackHdrEntry
assert idx >= 0
if idx > type(self)._static_entries_last_idx:
idx -= type(self)._static_entries_last_idx + 1
if idx >= len(self._dynamic_table):
raise KeyError(
'EINVAL: idx: out-of-bound read: {}; maximum index: {}'.format(idx, len(self._dynamic_tab | E275 - Missing whitespace after keyword (#3711)
Co-authored-by: Alexander Aring <alex.aring@gmail.com>
Co-authored-by: Anmol Sarma <me@anmolsarma.in>
Co-authored-by: antoine.torre <torreantoine1@gmail.com>
Co-authored-by: Antoine Vacher <devel@tigre-bleu.net>
Co-authored-by: Arnaud Ebalard <arno@natisbad.org>
Co-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>
Co-authored-by: Brian Bienvenu <brian@bienvenu.id.au>
Co-authored-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
Co-authored-by: CQ <cq674350529@163.com>
Co-authored-by: Daniel Collins <kinap@users.noreply.github.com>
Co-authored-by: Federico Maggi <federico.maggi@gmail.com>
Co-authored-by: Florian Maury <florian.maury@ssi.gouv.fr>
Co-authored-by: _Frky <3105926+Frky@users.noreply.github.com>
Co-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>
Co-authored-by: gpotter2 <gabriel@potter.fr>
Co-authored-by: Guillaume Valadon <guillaume@valadon.net>
Co-authored-by: Hao Zheng <haozheng10@gmail.com>
Co-authored-by: Haresh Khandelwal <hareshkhandelwal@gmail.com>
Co-authored-by: Harri Hämäläinen <hhamalai@iki.fi>
Co-authored-by: hecke <hecke@naberius.de>
Co-authored-by: Jan Romann <jan.romann@gmail.com>
Co-authored-by: Jan Sebechlebsky <sebechlebskyjan@gmail.com>
Co-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>
Co-authored-by: jockque <38525640+jockque@users.noreply.github.com>
Co-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>
Co-authored-by: Keith Scott <kscott@mitre.org>
Co-authored-by: Kfir Gollan <kfir@drivenets.com>
Co-authored-by: Lars Munch <lars@segv.dk>
Co-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>
Co-authored-by: Leonard Crestez <cdleonard@gmail.com>
Co-authored-by: Marcel Patzlaff <mpatzlaff@benocs.com>
Co-authored-by: Martijn Thé <martijnthe@users.noreply.github.com>
Co-authored-by: Martine Lenders <authmillenon@gmail.com>
Co-authored-by: Michael Farrell <micolous+git@gmail.com>
Co-authored-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
Co-authored-by: mkaliszan <mkaliszan@benocs.com>
Co-authored-by: mtury <maxence.tury@ssi.gouv.fr>
Co-authored-by: Neale Ranns <nranns@cisco.com>
Co-authored-by: Octavian Toader <Octavian.Toader@belden.com>
Co-authored-by: Peter Eisenlohr <peter@eisenlohr.org>
Co-authored-by: Phil <phil@secdev.org>
Co-authored-by: Pierre Lalet <pierre@droids-corp.org>
Co-authored-by: Pierre Lorinquer <pierre.lorinquer@ssi.gouv.fr>
Co-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>
Co-authored-by: plorinquer <pierre.lorinquer@ssi.gouv.fr>
Co-authored-by: pvinci <pvinci@users.noreply.github.com>
Co-authored-by: Rahul Jadhav <nyrahul@gmail.com>
Co-authored-by: Robin Jarry <robin.jarry@6wind.com>
Co-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>
Co-authored-by: rperez <rperez@debian>
Co-authored-by: Sabrina Dubroca <sd@queasysnail.net>
Co-authored-by: Sebastian Baar <sebastian.baar@gmx.de>
Co-authored-by: sebastien mainand <sebastien.mainand@ssi.gouv.fr>
Co-authored-by: smehner1 <smehner1@gmail.com>
Co-authored-by: speakinghedge <hecke@naberius.de>
Co-authored-by: Steven Van Acker <steven@singularity.be>
Co-authored-by: Thomas Faivre <thomas.faivre@6wind.com>
Co-authored-by: Tran Tien Dat <peter.trantiendat@gmail.com>
Co-authored-by: Wael Mahlous <wael.mahlous@gmail.com>
Co-authored-by: waeva <74464394+waeva@users.noreply.github.com>
Co-authored-by: Alexander Aring <alex.aring@gmail.com>
Co-authored-by: Anmol Sarma <me@anmolsarma.in>
Co-authored-by: antoine.torre <torreantoine1@gmail.com>
Co-authored-by: Antoine Vacher <devel@tigre-bleu.net>
Co-authored-by: Arnaud Ebalard <arno@natisbad.org>
Co-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>
Co-authored-by: Brian Bienvenu <brian@bienvenu.id.au>
Co-authored-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
Co-authored-by: CQ <cq674350529@163.com>
Co-authored-by: Daniel Collins <kinap@users.noreply.github.com>
Co-authored-by: Federico Maggi <federico.maggi@gmail.com>
Co-authored-by: Florian Maury <florian.maury@ssi.gouv.fr>
Co-authored-by: _Frky <3105926+Frky@users.noreply.github.com>
Co-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>
Co-authored-by: gpotter2 <gabriel@potter.fr>
Co-authored-by: Guillaume Valadon <guillaume@valadon.net>
Co-authored-by: Hao Zheng <haozheng10@gmail.com>
Co-authored-by: Haresh Khandelwal <hareshkhandelwal@gmail.com>
Co-authored-by: Harri Hämäläinen <hhamalai@iki.fi>
Co-authored-by: hecke <hecke@naberius.de>
Co-authored-by: Jan Romann <jan.romann@gmail.com>
Co-authored-by: Jan Sebechlebsky <sebechlebskyjan@gmail.com>
Co-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>
Co-authored-by: jockque <38525640+jockque@users.noreply.github.com>
Co-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>
Co-authored-by: Keith Scott <kscott@mitre.org>
Co-authored-by: Kfir Gollan <kfir@drivenets.com>
Co-authored-by: Lars Munch <lars@segv.dk>
Co-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>
Co-authored-by: Leonard Crestez <cdleonard@gmail.com>
Co-authored-by: Marcel Patzlaff <mpatzlaff@benocs.com>
Co-authored-by: Martijn Thé <martijnthe@users.noreply.github.com>
Co-authored-by: Martine Lenders <authmillenon@gmail.com>
Co-authored-by: Michael Farrell <micolous+git@gmail.com>
Co-authored-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
Co-authored-by: mkaliszan <mkaliszan@benocs.com>
Co-authored-by: mtury <maxence.tury@ssi.gouv.fr>
Co-authored-by: Neale Ranns <nranns@cisco.com>
Co-authored-by: Octavian Toader <Octavian.Toader@belden.com>
Co-authored-by: Peter Eisenlohr <peter@eisenlohr.org>
Co-authored-by: Phil <phil@secdev.org>
Co-authored-by: Pierre Lalet <pierre@droids-corp.org>
Co-authored-by: Pierre Lorinquer <pierre.lorinquer@ssi.gouv.fr>
Co-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>
Co-authored-by: pvinci <pvinci@users.noreply.github.com>
Co-authored-by: Rahul Jadhav <nyrahul@gmail.com>
Co-authored-by: Robin Jarry <robin.jarry@6wind.com>
Co-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>
Co-authored-by: rperez <rperez@debian>
Co-authored-by: Sabrina Dubroca <sd@queasysnail.net>
Co-authored-by: Sebastian Baar <sebastian.baar@gmx.de>
Co-authored-by: sebastien mainand <sebastien.mainand@ssi.gouv.fr>
Co-authored-by: smehner1 <smehner1@gmail.com>
Co-authored-by: Steven Van Acker <steven@singularity.be>
Co-authored-by: Thomas Faivre <thomas.faivre@6wind.com>
Co-authored-by: Tran Tien Dat <peter.trantiendat@gmail.com>
Co-authored-by: Wael Mahlous <wael.mahlous@gmail.com>
Co-authored-by: waeva <74464394+waeva@users.noreply.github.com> | __getitem__ | 08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf | scapy | http2.py | 16 | 10 | https://github.com/secdev/scapy.git | 3 | 76 | 0 | 37 | 124 | Python | {
"docstring": "Gets an element from the header tables (static or dynamic indifferently)\n\n :param int idx: the index number of the entry to retrieve. If the index\n value is superior to the last index of the static entry table, then the\n dynamic entry type is requested, following the procedure described in\n RFC 7541 par2.3.3\n :return: HPackHdrEntry: the entry defined at this requested index. If the entry does not exist, KeyError is # noqa: E501\n raised\n :raises: KeyError, AssertionError\n ",
"language": "en",
"n_whitespaces": 135,
"n_words": 76,
"vocab_size": 55
} | def __getitem__(self, idx):
# type: (int) -> HPackHdrEntry
assert idx >= 0
if idx > type(self)._static_entries_last_idx:
idx -= type(self)._static_entries_last_idx + 1
if idx >= len(self._dynamic_table):
raise KeyError(
'EINVAL: idx: out-of-bound read: {}; maximum index: {}'.format(idx, len(self._dynamic_table)) # noqa: E501
)
return self._dynamic_table[idx]
return type(self)._static_entries[idx]
|
|
51,819 | 206,977 | 208 | tests/admin_changelist/tests.py | 67 | 27 | def test_pagination(self):
parent = Parent.objects.create(name="anything")
for i in range(1, 31):
Child.objects.create(name="name %s" % i, parent=parent)
Child.objects.create(name="filtered %s" % i, parent=parent)
request = self.factory.get("/child/")
request.user = self.superuser
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list( | Refs #33476 -- Reformatted code with Black. | test_pagination | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 12 | 17 | https://github.com/django/django.git | 2 | 209 | 0 | 43 | 327 | Python | {
"docstring": "\n Regression tests for #12893: Pagination in admins changelist doesn't\n use queryset set by modeladmin.\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 14,
"vocab_size": 14
} | def test_pagination(self):
parent = Parent.objects.create(name="anything")
for i in range(1, 31):
Child.objects.create(name="name %s" % i, parent=parent)
Child.objects.create(name="filtered %s" % i, parent=parent)
request = self.factory.get("/child/")
request.user = self.superuser
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
|
|
75,274 | 258,522 | 155 | sklearn/discriminant_analysis.py | 47 | 14 | def transform(self, X):
if self.solver == "lsqr":
raise NotImplementedError(
"transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')."
)
check_is_fitted(self)
X = self._validate_data(X, reset=False)
if self.solver == "svd":
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == "eigen":
X_new = np.dot(X, self.scalings_)
return X_new[:, : self._max_components]
| DOC Add documentation on output shape of LDA.transform (#22238) | transform | ab08e4dba5f1f87b8c3395f32469a6ddb5e34f89 | scikit-learn | discriminant_analysis.py | 12 | 12 | https://github.com/scikit-learn/scikit-learn.git | 4 | 88 | 0 | 38 | 147 | Python | {
"docstring": "Project data to maximize class separation.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components) or \\\n (n_samples, min(rank, n_components))\n Transformed data. In the case of the 'svd' solver, the shape\n is (n_samples, min(rank, n_components)).\n ",
"language": "en",
"n_whitespaces": 139,
"n_words": 46,
"vocab_size": 34
} | def transform(self, X):
if self.solver == "lsqr":
raise NotImplementedError(
"transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')."
)
check_is_fitted(self)
X = self._validate_data(X, reset=False)
if self.solver == "svd":
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == "eigen":
X_new = np.dot(X, self.scalings_)
return X_new[:, : self._max_components]
|
|
110,121 | 311,456 | 301 | tests/components/homekit_controller/test_climate.py | 101 | 25 | async def test_heater_cooler_hvac_mode_vs_hvac_action(hass, utcnow):
helper = await setup_test_component(hass, create_heater_cooler_service)
# Simulate that current temperature is above target temp
# Heating might be on, but hvac_action currently 'off'
await helper.async_update(
ServicesTypes.HEATER_COOLER,
{
CharacteristicsTypes.TEMPERATURE_CURRENT: 22,
CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21,
CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.IDLE,
CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT,
CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED,
},
)
state = await helper.poll_and_get_state()
assert state | Improve homekit_controller tests (#65266) | test_heater_cooler_hvac_mode_vs_hvac_action | 58b8c30221a6f6e5acbbe98b7e3298b03fb741f5 | core | test_climate.py | 11 | 28 | https://github.com/home-assistant/core.git | 1 | 161 | 0 | 56 | 256 | Python | {
"docstring": "Check that we haven't conflated hvac_mode and hvac_action.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | async def test_heater_cooler_hvac_mode_vs_hvac_action(hass, utcnow):
helper = await setup_test_component(hass, create_heater_cooler_service)
# Simulate that current temperature is above target temp
# Heating might be on, but hvac_action currently 'off'
await helper.async_update(
ServicesTypes.HEATER_COOLER,
{
CharacteristicsTypes.TEMPERATURE_CURRENT: 22,
CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21,
CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.IDLE,
CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT,
CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED,
},
)
state = await helper.poll_and_get_state()
assert state.state == "heat"
assert state.attributes["hvac_action"] == "idle"
# Simulate that current temperature is below target temp
# Heating might be on and hvac_action currently 'heat'
await helper.async_update(
ServicesTypes.HEATER_COOLER,
{
CharacteristicsTypes.TEMPERATURE_CURRENT: 19,
CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21,
CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.HEATING,
CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT,
CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED,
},
)
state = await helper.poll_and_get_state()
assert state.state == "heat"
assert state.attributes["hvac_action"] == "heating"
|
|
74,072 | 253,415 | 74 | examples/contrib/webscanner_helper/watchdog.py | 20 | 5 | def not_in_timeout(cls, last_triggered, timeout):
return (
last_triggered is None
or timeout is None
or (tim | [autofix.ci] apply automated fixes | not_in_timeout | 8c2428c9d355ca5fbc3dd90e9820ceb1cc795837 | mitmproxy | watchdog.py | 12 | 6 | https://github.com/mitmproxy/mitmproxy.git | 3 | 32 | 0 | 16 | 51 | Python | {
"docstring": "Checks if current error lies not in timeout after last trigger (potential reset of connection).",
"language": "en",
"n_whitespaces": 14,
"n_words": 15,
"vocab_size": 15
} | def not_in_timeout(cls, last_triggered, timeout):
return (
last_triggered is None
or timeout is None
or (time.time() - last_triggered > timeout)
)
|
|
47,049 | 194,733 | 909 | parlai/core/torch_generator_agent.py | 201 | 42 | def get_rescored_finished(self, n_best=None):
# if we never actually finished, force one
if not self.finished:
self.outputs[-1][0] = self.eos
self.finished.append(
_HypothesisTail(
timestep=len(self.outputs) - 1,
hypid=0,
score=self.all_scores[-1][0],
tokenid=self.outputs[-1][0],
token_score=self.token_scores[0, -1]
if self.token_scores is not None
else None,
token_rank=self.token_ranks[0, -1]
if self.token_ranks is not None
else None,
)
)
rescored_finished = []
for finished_item in self.finished:
current_length = finished_item.timestep + 1
# these weights are from Google NMT paper
length_penalty = math.pow((1 + current_length) / 6, self.length_penalty)
rescored_finished.append(
_HypothesisTail(
timestep=finished_item.timestep,
h | Logging token level losses at inference time (#4169) | get_rescored_finished | daa85bf085c9e275cc65d0b03758d1f70742b57f | ParlAI | torch_generator_agent.py | 16 | 51 | https://github.com/facebookresearch/ParlAI.git | 9 | 336 | 0 | 140 | 525 | Python | {
"docstring": "\n Return finished hypotheses according to adjusted scores.\n\n Score adjustment is done according to the Google NMT paper, which\n penalizes long utterances.\n\n :param n_best:\n number of finalized hypotheses to return\n\n :return:\n list of (tokens, score, token_metadata) 3-tuples, in sorted order, where:\n - tokens is a tensor of token ids\n - score is the adjusted log probability of the entire utterance\n - token_metadata dictionary:\n token_logprobs -> a tensor of conditional log probabilities of tokens\n token_ranks -> a tensor of ranks of tokens in vocabulator, by probability, when sampled\n ",
"language": "en",
"n_whitespaces": 228,
"n_words": 86,
"vocab_size": 59
} | def get_rescored_finished(self, n_best=None):
# if we never actually finished, force one
if not self.finished:
self.outputs[-1][0] = self.eos
self.finished.append(
_HypothesisTail(
timestep=len(self.outputs) - 1,
hypid=0,
score=self.all_scores[-1][0],
tokenid=self.outputs[-1][0],
token_score=self.token_scores[0, -1]
if self.token_scores is not None
else None,
token_rank=self.token_ranks[0, -1]
if self.token_ranks is not None
else None,
)
)
rescored_finished = []
for finished_item in self.finished:
current_length = finished_item.timestep + 1
# these weights are from Google NMT paper
length_penalty = math.pow((1 + current_length) / 6, self.length_penalty)
rescored_finished.append(
_HypothesisTail(
timestep=finished_item.timestep,
hypid=finished_item.hypid,
score=finished_item.score / length_penalty,
tokenid=finished_item.tokenid,
token_score=finished_item.token_score,
token_rank=finished_item.token_rank,
)
)
# Note: beam size is almost always pretty small, so sorting is cheap enough
srted = sorted(rescored_finished, key=attrgetter('score'), reverse=True)
if n_best is not None:
srted = srted[:n_best]
n_best_list = []
for hyp in srted:
hyp_data = self._get_hyp_from_finished(hyp)
token_ids = self._get_pretty_hypothesis(hyp_data)
token_metadata = (
self._get_pretty_token_metadata(hyp_data) if self.verbose else None
)
n_best_list.append((token_ids, hyp.score, token_metadata))
# check that there is at least one finished candidate
# and assert that each of them contains only one EOS
assert (
len(n_best_list) >= 1
), f'TreeSearch returned {len(n_best_list)} candidates, must be >= 1'
for (pred, score, _) in n_best_list:
assert (pred == self.eos).sum() == 1, (
f'TreeSearch returned a finalized hypo with multiple end tokens '
f'with score {score.item():.2f}'
)
return n_best_list
|
|
39,870 | 166,924 | 22 | pandas/core/resample.py | 8 | 5 | def quantile(self, q=0.5, **kwargs):
return self._do | DEPR: numeric_only default in resampler ops (#47177) | quantile | 62b6d25551d006758422c20e7f931858e23054a9 | pandas | resample.py | 8 | 2 | https://github.com/pandas-dev/pandas.git | 1 | 29 | 0 | 8 | 44 | Python | {
"docstring": "\n Return value at the given quantile.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n\n Returns\n -------\n DataFrame or Series\n Quantile of values within each group.\n\n See Also\n --------\n Series.quantile\n Return a series, where the index is q and the values are the quantiles.\n DataFrame.quantile\n Return a DataFrame, where the columns are the columns of self,\n and the values are the quantiles.\n DataFrameGroupBy.quantile\n Return a DataFrame, where the coulmns are groupby columns,\n and the values are its quantiles.\n ",
"language": "en",
"n_whitespaces": 238,
"n_words": 80,
"vocab_size": 48
} | def quantile(self, q=0.5, **kwargs):
return self._downsample("quantile", q=q, **kwargs)
|
|
45,933 | 188,795 | 25 | src/calibre/gui2/preferences/create_custom_column.py | 11 | 5 | def current_columns(self):
return copy.deepcopy(self.custcols) #de | Yet another version of CreateNewCustomColumn.
My apologies for the multiple commits. I have been working with @davidfor and we cycled a few times. I hope this is the last, barring bugs. | current_columns | 7b9bb6e62424e4b3c960e9e25c45a6946988959c | calibre | create_custom_column.py | 8 | 2 | https://github.com/kovidgoyal/calibre.git | 1 | 15 | 0 | 11 | 28 | Python | {
"docstring": "\n Return the currently defined custom columns\n\n Return the currently defined custom columns including the ones that haven't\n yet been created. It is a dict of dicts defined as follows:\n custcols[lookup_name] = {\n 'label': lookup_name,\n 'name': column_heading,\n 'datatype': datatype,\n 'display': display,\n 'normalized': None,\n 'colnum': an integer used internally,\n 'is_multiple': is_multiple,\n }\n Columns that already exist will have additional attributes that this class\n doesn't use. See calibre.library.field_metadata.add_custom_field() for the\n complete list.\n ",
"language": "en",
"n_whitespaces": 278,
"n_words": 69,
"vocab_size": 58
} | def current_columns(self):
return copy.deepcopy(self.custcols) #deepcopy to prevent users from changing it
|
|
16,345 | 75,054 | 59 | wagtail/images/image_operations.py | 16 | 8 | def transform_vector(self, vector):
return Vector(
(vector | Reformat with black | transform_vector | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | image_operations.py | 12 | 5 | https://github.com/wagtail/wagtail.git | 1 | 52 | 0 | 14 | 78 | Python | {
"docstring": "\n Transforms the given vector into the coordinate space of the final image.\n\n Use this to find out where a point on the source image would end up in the\n final image after cropping/resizing has been performed.\n\n Returns a new vector.\n ",
"language": "en",
"n_whitespaces": 76,
"n_words": 40,
"vocab_size": 33
} | def transform_vector(self, vector):
return Vector(
(vector.x + self.offset[0]) * self.scale[0],
(vector.y + self.offset[1]) * self.scale[1],
)
|
|
17,347 | 82,306 | 60 | cms/utils/conf.py | 27 | 8 | def _load_from_file(module_path):
from imp import PY_SOURCE, load_module
imported = None
if module_path:
with open(module_path, 'r') as openfile:
imported = load_module("mod", openfile, module_path, ('imp | Enabled isort workflow (#7200)
* Ran isort
* Enabled isort workflow
Co-authored-by: Vinit Kumar <mail@vinitkumar.me> | _load_from_file | a3110e1ff24085373898c7d2a85f628abeb8518d | django-cms | conf.py | 14 | 7 | https://github.com/django-cms/django-cms.git | 2 | 48 | 0 | 24 | 85 | Python | {
"docstring": "\n Load a python module from its absolute filesystem path\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 9,
"vocab_size": 9
} | def _load_from_file(module_path):
from imp import PY_SOURCE, load_module
imported = None
if module_path:
with open(module_path, 'r') as openfile:
imported = load_module("mod", openfile, module_path, ('imported', 'r', PY_SOURCE))
return imported
|
|
50,562 | 203,858 | 284 | django/contrib/gis/db/backends/postgis/schema.py | 60 | 15 | def _alter_column_type_sql(self, table, old_field, new_field, new_type):
if not hasattr(old_field, "dim") or not hasattr(new_field, "dim"):
return super()._alter_column_type_sql(table, old_field, new_field, new_type)
if old_field.dim == 2 and new_field.dim == 3:
sql_alter = self.sql_alter_column_to_3d
elif old_field.dim == 3 and new_field.dim == 2:
sql_alter = self.sql_alter_co | Refs #33476 -- Reformatted code with Black. | _alter_column_type_sql | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | schema.py | 13 | 20 | https://github.com/django/django.git | 7 | 121 | 0 | 42 | 189 | Python | {
"docstring": "\n Special case when dimension changed.\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 5,
"vocab_size": 5
} | def _alter_column_type_sql(self, table, old_field, new_field, new_type):
if not hasattr(old_field, "dim") or not hasattr(new_field, "dim"):
return super()._alter_column_type_sql(table, old_field, new_field, new_type)
if old_field.dim == 2 and new_field.dim == 3:
sql_alter = self.sql_alter_column_to_3d
elif old_field.dim == 3 and new_field.dim == 2:
sql_alter = self.sql_alter_column_to_2d
else:
sql_alter = self.sql_alter_column_type
return (
(
sql_alter
% {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
),
[],
)
|