complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
1
9
def dist_from_wheel_url(name, url, session): # type: (str, str, PipSession) -> Distribution with LazyZipOverHTTP(url, session) as wheel: # For read-only ZIP files, ZipFile only needs methods read, # seek, seekable and tell, not the whole IO protocol. zip_file = ZipFile(wheel) # type: ignore # After context manager exit, wheel.name # is an invalid file by intention. return pkg_resources_distribution_for_wheel(zip_file, name, wheel.name)
.venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py
66
transferlearning
{ "docstring": "Return a pkg_resources.Distribution from the given wheel URL.\n\n This uses HTTP range requests to only fetch the potion of the wheel\n containing metadata, just enough for the object to be constructed.\n If such requests are not supported, HTTPRangeRequestUnsupported\n is raised.\n ", "language": "en", "n_whitespaces": 55, "n_words": 40, "vocab_size": 34 }
59
Python
53
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
lazy_wheel.py
60,904
4
37
dist_from_wheel_url
https://github.com/jindongwang/transferlearning.git
upd; format
111
0
12,335
10
3
6
def _configure_experiment_defaults(self): for option in self._exclude_autolog: if not self.experiment_kwargs.get(option): self.experiment_kwargs[option] = False
python/ray/tune/integration/comet.py
53
ray
{ "docstring": "Disable the specific autologging features that cause throttling.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
12
Python
12
3d79815cd08c1be8e56c245e662f34366523847e
comet.py
129,598
4
32
_configure_experiment_defaults
https://github.com/ray-project/ray.git
Comet Integration (#20766) This PR adds a `CometLoggerCallback` to the Tune Integrations, allowing users to log runs from Ray to [Comet](https://www.comet.ml/site/). Co-authored-by: Michael Cullan <mjcullan@gmail.com> Co-authored-by: Antoni Baum <antoni.baum@protonmail.com>
52
0
28,986
12
2
15
def add_group_component(self, components, name, group): new_comp = components[components['component'].isin(set(group))].copy() group_cols = new_comp['col'].unique() if len(group_cols) > 0: new_comp = pd.DataFrame({'col': group_cols, 'component': name}) components = pd.concat([components, new_comp]) return components
python/prophet/forecaster.py
134
prophet
{ "docstring": "Adds a component with given name that contains all of the components\n in group.\n\n Parameters\n ----------\n components: Dataframe with components.\n name: Name of new group component.\n group: List of components that form the group.\n\n Returns\n -------\n Dataframe with components.\n ", "language": "en", "n_whitespaces": 109, "n_words": 39, "vocab_size": 29 }
27
Python
22
10310ceb2da05837a198db6714d658a1e0a32478
forecaster.py
3,286
7
81
add_group_component
https://github.com/facebook/prophet.git
Change deprecated `append` to `concat` (#2114)
84
0
429
13
3
15
def permission_denied(request, exception, template_name=ERROR_403_TEMPLATE_NAME): try: template = loader.get_template(template_name) except TemplateDoesNotExist: if template_name != ERROR_403_TEMPLATE_NAME: # Reraise if it's a missing custom template. raise return HttpResponseForbidden( ERROR_PAGE_TEMPLATE % {"title": "403 Forbidden", "details": ""}, content_type="text/html", ) return HttpResponseForbidden( template.render(request=request, context={"exception": str(exception)}) )
django/views/defaults.py
128
django
{ "docstring": "\n Permission denied (403) handler.\n\n Templates: :template:`403.html`\n Context:\n exception\n The message from the exception which triggered the 403 (if one was\n supplied).\n\n If the template does not exist, an Http403 response containing the text\n \"403 Forbidden\" (as per RFC 7231) will be returned.\n ", "language": "en", "n_whitespaces": 90, "n_words": 42, "vocab_size": 38 }
40
Python
36
9c19aff7c7561e3a82978a272ecdaad40dda5c00
defaults.py
206,834
13
74
permission_denied
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
134
0
51,740
14
1
6
def transpose_qkv(X, num_heads): # Shape of input `X`: # (`batch_size`, no. of queries or key-value pairs, `num_hiddens`). # Shape of output `X`: # (`batch_size`, no. of queries or key-value pairs, `num_heads`, # `num_hiddens` / `num_heads`) X = X.reshape(X.shape[0], X.shape[1], num_heads, -1) # Shape of output `X`: # (`batch_size`, `num_heads`, no. of queries or key-value pairs, # `num_hiddens` / `num_heads`) X = X.transpose(0, 2, 1, 3) # Shape of `output`: # (`batch_size` * `num_heads`, no. of queries or key-value pairs, # `num_hiddens` / `num_heads`) return X.reshape(-1, X.shape[2], X.shape[3])
d2l/mxnet.py
111
d2l-zh
{ "docstring": "Transposition for parallel computation of multiple attention heads.\n\n Defined in :numref:`sec_multihead-attention`", "language": "en", "n_whitespaces": 13, "n_words": 11, "vocab_size": 11 }
87
Python
37
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
mxnet.py
158,149
4
69
transpose_qkv
https://github.com/d2l-ai/d2l-zh.git
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <haizhou.sun@smartmore.com> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <ubuntu@ip-172-31-12-66.us-west-2.compute.internal> Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: Aston Zhang <asv325@gmail.com> * 重复语句 (#1188) Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <haizhou.sun@smartmore.com> Co-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com> Co-authored-by: Xinwei Liu <xinzone@outlook.com> Co-authored-by: Anirudh Dagar <anirudhdagar6@gmail.com> Co-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com> Co-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com> Co-authored-by: gyro永不抽风 <1247006353@qq.com> Co-authored-by: CanChengZheng <zcc550169544@163.com> Co-authored-by: linlin <jajupmochi@gmail.com> Co-authored-by: iuk <liukun0104@gmail.com> Co-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com> Co-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com> Co-authored-by: Chiyuan Fu <fuchiyuan2019@outlook.com> Co-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com> Co-authored-by: Haiker Sun <haizhou.uestc2011@gmail.com> Co-authored-by: Ming Liu <akira.liu@njnu.edu.cn> Co-authored-by: goldmermaid <goldpiggy@berkeley.edu> Co-authored-by: silenceZheng66 <13754430639@163.com> Co-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com> Co-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com> Co-authored-by: Krahets <krahets@163.com> Co-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com> Co-authored-by: Jameson <miraclecome@gmail.com> Co-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com> Co-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com> Co-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com> Co-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com> Co-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com> Co-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com> Co-authored-by: VigourJiang <jiangfuqiang154@163.com> Co-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com> Co-authored-by: LYF <27893441+liyufan@users.noreply.github.com> Co-authored-by: Aston Zhang <asv325@gmail.com> Co-authored-by: xiaotinghe <xiaotih@amazon.com> Co-authored-by: Ubuntu <ubuntu@ip-172-31-12-66.us-west-2.compute.internal> Co-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com> Co-authored-by: HinGwenWoong <peterhuang0323@qq.com> Co-authored-by: Shuai Zhang <cheungdaven@gmail.com>
132
0
37,330
10
1
14
async def test_device_diagnostics_error(hass, integration): dev_reg = async_get(hass) device = dev_reg.async_get_or_create( config_entry_id=integration.entry_id, identifiers={("test", "test")} ) with pytest.raises(ValueError): await async_get_device_diagnostics(hass, integration, device)
tests/components/zwave_js/test_diagnostics.py
90
core
{ "docstring": "Test the device diagnostics raises exception when an invalid device is used.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
20
Python
19
11d0dcf7ac4ddc2638f403ef0ee6b796ac5bbceb
test_diagnostics.py
310,240
7
53
test_device_diagnostics_error
https://github.com/home-assistant/core.git
Add zwave_js device diagnostics (#64504) * Add zwave_js device diagnostics * Add diagnostics as a dependency in manifest * Add failure scenario test * fix device diagnostics helper and remove dependency * tweak
49
0
108,926
12
1
4
def set_runtime_library_dirs(self, dirs): self.runtime_library_dirs = dirs[:]
python3.10.4/Lib/distutils/ccompiler.py
28
XX-Net
{ "docstring": "Set the list of directories to search for shared libraries at\n runtime to 'dirs' (a list of strings). This does not affect any\n standard search path that the runtime linker may search by\n default.\n ", "language": "en", "n_whitespaces": 63, "n_words": 34, "vocab_size": 27 }
6
Python
6
8198943edd73a363c266633e1aa5b2a9e9c9f526
ccompiler.py
222,570
2
16
set_runtime_library_dirs
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
20
0
56,644
8
1
3
def exec(self): raise NotImplementedError
ppg2mel/train/solver.py
16
MockingBird
{ "docstring": "\n Called by main to execute training/inference\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
4
Python
4
b617a87ee40ab384767a27335313c2c65ee094ec
solver.py
161,034
2
8
exec
https://github.com/babysor/MockingBird.git
Init ppg extractor and ppg2mel (#375) * Init ppg extractor and ppg2mel * add preprocess and training * FIx known issues * Update __init__.py Allow to gen audio * Fix length issue * Fix bug of preparing fid * Fix sample issues * Add UI usage of PPG-vc
18
0
38,858
6
2
16
def _gen_md5_for_arraylike_obj(md5_gen, data): import numpy as np len_bytes = _hash_uint64_ndarray_as_bytes(np.array([len(data)], dtype="uint64")) md5_gen.update(len_bytes) if len(data) < EvaluationDataset.NUM_SAMPLE_ROWS_FOR_HASH * 2: md5_gen.update(_hash_array_like_obj_as_bytes(data)) else: head_rows = data[: EvaluationDataset.NUM_SAMPLE_ROWS_FOR_HASH] tail_rows = data[-EvaluationDataset.NUM_SAMPLE_ROWS_FOR_HASH :] md5_gen.update(_hash_array_like_obj_as_bytes(head_rows)) md5_gen.update(_hash_array_like_obj_as_bytes(tail_rows))
mlflow/models/evaluation/base.py
163
mlflow
{ "docstring": "\n Helper method to generate MD5 hash array-like object, the MD5 will calculate over:\n - array length\n - first NUM_SAMPLE_ROWS_FOR_HASH rows content\n - last NUM_SAMPLE_ROWS_FOR_HASH rows content\n ", "language": "en", "n_whitespaces": 45, "n_words": 26, "vocab_size": 20 }
30
Python
28
964f5ab75098c55f028f8acfeeae05df35ea68d5
base.py
19,026
11
98
_gen_md5_for_arraylike_obj
https://github.com/mlflow/mlflow.git
Evaluation Default evaluator (#5092) * init Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * rename module Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * revert black change Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * change module path Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * lazy load pyspark Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * revert export Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix curcit import Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix conftest.py Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * Revert "fix conftest.py" This reverts commit 2ea29c62bfffc5461bf77f3da15b5c00f51de19b. * fix tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * default evaluator Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update import Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update hash algo Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update import Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comment Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix lint Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add more tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix lint Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update shap explainer Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * remove scikitplot dep Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add pr curve Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add shap.summary_plot Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * log explainer Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * improve explainer code Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update shap init Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update explainer creating Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update predict_proba Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add multi-class metrics artifacts Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add log_loss metric Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * lazy load pyspark Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * address ben comments Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * prevent show shap logo, add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * support spark model Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add shap version check Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update docs, loose classifier label limit Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * multiclass classifier merge metrics/plots Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * zfill feature name Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add config max_num_classes_threshold_logging_roc_pr_curve_for_multiclass_classifier Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * improve label handling Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * add tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * black Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * increase plot dpi Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix test fixture Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix pylint Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update doc Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * use matplot rc_context Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix shap import Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * refactor EvaluationDataset Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * limit user specify shap algos Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * clean Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * update evaluation dataset Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * use svg fig Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * revert svg Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * curve dashline, legend display ap/roc, legend move out Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * linewidth 1 Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * keyword arguments for evaluate, fix tests Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * mark abc.abstractmethod, kw args for ModelEvaluator methods Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix pylint Signed-off-by: Weichen Xu <weichen.xu@databricks.com> * fix pylint Signed-off-by: Weichen Xu <weichen.xu@databricks.com>
83
0
2,874
13
18
35
def fit(self, X, y, sample_weight=None): allowed_strategies = ("mean", "median", "quantile", "constant") if self.strategy not in allowed_strategies: raise ValueError( "Unknown strategy type: %s, expected one of %s." % (self.strategy, allowed_strategies) ) y = check_array(y, ensure_2d=False, input_name="y") if len(y) == 0: raise ValueError("y must not be empty.") if y.ndim == 1: y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] check_consistent_length(X, y, sample_weight) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X) if self.strategy == "mean": self.constant_ = np.average(y, axis=0, weights=sample_weight) elif self.strategy == "median": if sample_weight is None: self.constant_ = np.median(y, axis=0) else: self.constant_ = [ _weighted_percentile(y[:, k], sample_weight, percentile=50.0) for k in range(self.n_outputs_) ] elif self.strategy == "quantile": if self.quantile is None or not np.isscalar(self.quantile): raise ValueError( "Quantile must be a scalar in the range [0.0, 1.0], but got %s." % self.quantile ) percentile = self.quantile * 100.0 if sample_weight is None: self.constant_ = np.percentile(y, axis=0, q=percentile) else: self.constant_ = [ _weighted_percentile(y[:, k], sample_weight, percentile=percentile) for k in range(self.n_outputs_) ] elif self.strategy == "constant": if self.constant is None: raise TypeError( "Constant target value has to be specified " "when the constant strategy is used." ) self.constant_ = check_array( self.constant, accept_sparse=["csr", "csc", "coo"], ensure_2d=False, ensure_min_samples=0, ) if self.n_outputs_ != 1 and self.constant_.shape[0] != y.shape[1]: raise ValueError( "Constant target value should have shape (%d, 1)." % y.shape[1] ) self.constant_ = np.reshape(self.constant_, (1, -1)) return self
sklearn/dummy.py
646
scikit-learn
{ "docstring": "Fit the random regressor.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n self : object\n Fitted estimator.\n ", "language": "en", "n_whitespaces": 149, "n_words": 42, "vocab_size": 32 }
222
Python
128
b28c5bba66529217ceedd497201a684e5d35b73c
dummy.py
258,973
58
414
fit
https://github.com/scikit-learn/scikit-learn.git
FIX DummyRegressor overriding constant (#22486)
944
0
75,502
17
6
15
def _normalize_mode(im): if im.mode in RAWMODE: im.load() return im if Image.getmodebase(im.mode) == "RGB": im = im.convert("P", palette=Image.Palette.ADAPTIVE) if im.palette.mode == "RGBA": for rgba in im.palette.colors.keys(): if rgba[3] == 0: im.info["transparency"] = im.palette.colors[rgba] break return im return im.convert("L")
src/PIL/GifImagePlugin.py
173
Pillow
{ "docstring": "\n Takes an image (or frame), returns an image in a mode that is appropriate\n for saving in a Gif.\n\n It may return the original image, or it may return an image converted to\n palette or 'L' mode.\n\n :param im: Image object\n :returns: Image object\n ", "language": "en", "n_whitespaces": 66, "n_words": 44, "vocab_size": 33 }
37
Python
26
76871795f787756ab1978772b53237948bec377a
GifImagePlugin.py
242,620
13
103
_normalize_mode
https://github.com/python-pillow/Pillow.git
Resolved UNDONE by removing code
148
0
69,883
17
6
14
def iter_tree_files(root, on_error=None, follow_links=None): if on_error is not None and not callable(on_error): raise TypeError("on_error:{!r} is not callable.".format(on_error)) if follow_links is None: follow_links = True for entry in _iter_tree_entries_next( os.path.abspath(root), "", {}, on_error, follow_links ): if not entry.is_dir(follow_links): yield entry.path # Alias `iter_tree_files()` as `iter_tree()`. iter_tree = iter_tree_files
python/ray/_private/thirdparty/pathspec/util.py
136
ray
{ "docstring": "\n Walks the specified directory for all files.\n\n *root* (:class:`str`) is the root directory to search for files.\n\n *on_error* (:class:`~collections.abc.Callable` or :data:`None`)\n optionally is the error handler for file-system exceptions. It will be\n called with the exception (:exc:`OSError`). Reraise the exception to\n abort the walk. Default is :data:`None` to ignore file-system\n exceptions.\n\n *follow_links* (:class:`bool` or :data:`None`) optionally is whether\n to walk symbolic links that resolve to directories. Default is\n :data:`None` for :data:`True`.\n\n Raises :exc:`RecursionError` if recursion is detected.\n\n Returns an :class:`~collections.abc.Iterable` yielding the path to\n each file (:class:`str`) relative to *root*.\n ", "language": "en", "n_whitespaces": 133, "n_words": 90, "vocab_size": 59 }
47
Python
37
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
util.py
130,275
10
81
iter_tree_files
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
99
0
29,200
12
1
2
def insidetextorientation(self): return self["insidetextorientation"]
packages/python/plotly/plotly/graph_objs/_pie.py
22
plotly.py
{ "docstring": "\n Controls the orientation of the text inside chart sectors. When\n set to \"auto\", text may be oriented in any direction in order\n to be as big as possible in the middle of a sector. The\n \"horizontal\" option orients text to be parallel with the bottom\n of the chart, and may make text smaller in order to achieve\n that goal. The \"radial\" option orients text along the radius of\n the sector. The \"tangential\" option orients text perpendicular\n to the radius of the sector.\n\n The 'insidetextorientation' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['horizontal', 'radial', 'tangential', 'auto']\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 223, "n_words": 107, "vocab_size": 62 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_pie.py
227,576
2
11
insidetextorientation
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
59,249
7
34
11
def forgeHeaders(items=None, base=None): items = items or {} for _ in list(items.keys()): if items[_] is None: del items[_] headers = OrderedDict(conf.httpHeaders if base is None else base) headers.update(items.items())
lib/request/basic.py
107
sqlmap
{ "docstring": "\n Prepare HTTP Cookie, HTTP User-Agent and HTTP Referer headers to use when performing\n the HTTP requests\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 13 }
28
Python
23
216565fb05166d4bcf80b35a4f8f381e9f6b3d08
basic.py
123,858
53
506
forgeHeaders
https://github.com/sqlmapproject/sqlmap.git
Fixes #5275
61
0
27,460
10
15
35
def update(self, value=None, values=None, disabled=None, visible=None, size=(None, None)): if not self._widget_was_created(): # if widget hasn't been created yet, then don't allow return if values is not None: self.Values = values self.TKOptionMenu['menu'].delete(0, 'end') # Insert list of new options (tk._setit hooks them up to var) # self.TKStringVar.set(self.Values[0]) for new_value in self.Values: self.TKOptionMenu['menu'].add_command(label=new_value, command=tk._setit(self.TKStringVar, new_value)) if value is None: self.TKStringVar.set('') if size == (None, None): max_line_len = max([len(str(l)) for l in self.Values]) if len(self.Values) else 0 if self.AutoSizeText is False: width = self.Size[0] else: width = max_line_len + 1 self.TKOptionMenu.configure(width=width) else: self.TKOptionMenu.configure(width=size[0]) if value is not None: self.DefaultValue = value self.TKStringVar.set(value) if disabled == True: self.TKOptionMenu['state'] = 'disabled' elif disabled == False: self.TKOptionMenu['state'] = 'normal' if visible is False: self._pack_forget_save_settings() # self.TKOptionMenu.pack_forget() elif visible is True: self._pack_restore_settings() # self.TKOptionMenu.pack(padx=self.pad_used[0], pady=self.pad_used[1]) if visible is not None: self._visible = visible Update = update # ------------------------- OPTION MENU Element lazy functions ------------------------- # InputOptionMenu = OptionMenu # ---------------------------------------------------------------------- # # Listbox # # ---------------------------------------------------------------------- #
PySimpleGUI.py
452
PySimpleGUI
{ "docstring": "\n Changes some of the settings for the OptionMenu Element. Must call `Window.Read` or `Window.Finalize` prior\n\n Changes will not be visible in your window until you call window.read or window.refresh.\n\n If you change visibility, your element may MOVE. If you want it to remain stationary, use the \"layout helper\"\n function \"pin\" to ensure your element is \"pinned\" to that location in your layout so that it returns there\n when made visible.\n\n :param value: the value to choose by default\n :type value: (Any)\n :param values: Values to be displayed\n :type values: List[Any]\n :param disabled: disable or enable state of the element\n :type disabled: (bool)\n :param visible: control visibility of element\n :type visible: (bool)\n :param size: (width, height) size in characters (wide), height is ignored and present to be consistent with other elements\n :type size: (int, int) (width, UNUSED)\n ", "language": "en", "n_whitespaces": 274, "n_words": 136, "vocab_size": 90 }
160
Python
96
ed2bc288ff17344f6406c49623036620f18e65bb
PySimpleGUI.py
212,851
52
270
update
https://github.com/PySimpleGUI/PySimpleGUI.git
Completed switching all elements over to the new way of handling visiblity
628
0
53,459
18
1
7
def hermite_prob_poly(n, x=None, polys=False): r return named_poly(n, dup_hermite_prob, ZZ, "probabilist's Hermite polynomial", (x,), polys)
sympy/polys/orthopolys.py
47
sympy
{ "docstring": "Generates the probabilist's Hermite polynomial `He_n(x)`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "language": "en", "n_whitespaces": 64, "n_words": 32, "vocab_size": 27 }
14
Python
14
5c9a4787c032d39abb80aae106030b177263a7cc
orthopolys.py
199,733
14
33
hermite_prob_poly
https://github.com/sympy/sympy.git
Probabilist's Hermite polynomials The plain or physicist's Hermite polynomials have leading coefficient 2^n, which leads to orthogonality with respect to the simplest possible form of the weight function exp(-x^2) and is the specific normalisation appearing in the solution to the Schrödinger equation for the quantum harmonic oscillator, but leads to unnecessary complications everywhere else. Removing the twos in the 3-term recurrence relation leads to the monic probabilist's version; its weight function of exp(-x^2/2) becomes the standard normal distribution after normalising. This version also forms the sign-alternated matching polynomial for the complete graph, a highly interesting connection to combinatorics.
30
0
49,380
8
2
10
def dijkstra_predecessor_and_distance(G, source, cutoff=None, weight="weight"): if source not in G: raise nx.NodeNotFound(f"Node {source} is not found in the graph") weight = _weight_function(G, weight) pred = {source: []} # dictionary of predecessors return (pred, _dijkstra(G, source, weight, pred=pred, cutoff=cutoff))
networkx/algorithms/shortest_paths/weighted.py
106
networkx
{ "docstring": "Compute weighted shortest path length and predecessors.\n\n Uses Dijkstra's Method to obtain the shortest weighted paths\n and return dictionaries of predecessors for each node and\n distance for each node from the `source`.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : node label\n Starting node for path\n\n cutoff : integer or float, optional\n Length (sum of edge weights) at which the search is stopped.\n If cutoff is provided, only return paths with summed weight <= cutoff.\n\n weight : string or function\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n Returns\n -------\n pred, distance : dictionaries\n Returns two dictionaries representing a list of predecessors\n of a node and the distance to each node.\n\n Raises\n ------\n NodeNotFound\n If `source` is not in `G`.\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n The list of predecessors contains more than one element only when\n there are more than one shortest paths to the key node.\n\n Examples\n --------\n >>> G = nx.path_graph(5, create_using=nx.DiGraph())\n >>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0)\n >>> sorted(pred.items())\n [(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])]\n >>> sorted(dist.items())\n [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]\n\n >>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0, 1)\n >>> sorted(pred.items())\n [(0, []), (1, [0])]\n >>> sorted(dist.items())\n [(0, 0), (1, 1)]\n ", "language": "en", "n_whitespaces": 519, "n_words": 302, "vocab_size": 161 }
38
Python
34
b7d65ffc7183e9a01cdc07b79f8f06403cc4dda4
weighted.py
176,660
6
67
dijkstra_predecessor_and_distance
https://github.com/networkx/networkx.git
DOC: remove note re: non-existant param (#5648)
61
0
42,028
11
2
12
def _chunk_positional_ranges(self) -> tuple[tuple[int, int], ...]: ranges = [] stop = 0 for c in self._data.iterchunks(): start, stop = stop, stop + len(c) ranges.append((start, stop)) return tuple(ranges)
pandas/core/arrays/_mixins.py
99
pandas
{ "docstring": "\n Return a tuple of tuples each containing the left (inclusive)\n and right (exclusive) positional bounds of each chunk's values\n within the underlying ChunkedArray.\n\n Returns\n -------\n tuple[tuple]\n ", "language": "en", "n_whitespaces": 76, "n_words": 26, "vocab_size": 23 }
27
Python
23
2d6a2c3e981208bf67bdd36cca726e8a399e487c
_mixins.py
165,733
16
63
_chunk_positional_ranges
https://github.com/pandas-dev/pandas.git
REF: move ArrowStringArray.__setitem__ and related methods to ArrowExtensionArray (#46439)
84
0
39,697
12
4
24
def _mesh(self): y, extendlen = self._proportional_y() # Use the vmin and vmax of the colorbar, which may not be the same # as the norm. There are situations where the colormap has a # narrower range than the colorbar and we want to accommodate the # extra contours. if (isinstance(self.norm, (colors.BoundaryNorm, colors.NoNorm)) or self.boundaries is not None): # not using a norm. y = y * (self.vmax - self.vmin) + self.vmin else: # Update the norm values in a context manager as it is only # a temporary change and we don't want to propagate any signals # attached to the norm (callbacks.blocked). with self.norm.callbacks.blocked(), \ cbook._setattr_cm(self.norm, vmin=self.vmin, vmax=self.vmax): y = self.norm.inverse(y) self._y = y X, Y = np.meshgrid([0., 1.], y) if self.orientation == 'vertical': return (X, Y, extendlen) else: return (Y, X, extendlen)
lib/matplotlib/colorbar.py
239
matplotlib
{ "docstring": "\n Return the coordinate arrays for the colorbar pcolormesh/patches.\n\n These are scaled between vmin and vmax, and already handle colorbar\n orientation.\n ", "language": "en", "n_whitespaces": 49, "n_words": 20, "vocab_size": 17 }
134
Python
92
fdb4ad372ab66177d99e478385c807e5843d6a0f
colorbar.py
107,725
17
152
_mesh
https://github.com/matplotlib/matplotlib.git
MNT: Use a context manager to change the norm in colorbar code This removes a deepcopy of the norm in Colorbar, instead updating the vmin/vmax via the context manager and ignoring any callback updates in the process.
429
0
22,892
14
5
20
def try_to_replace(self, provider, other, problems): rlist = self.reqts[other] unmatched = set() for s in rlist: matcher = self.get_matcher(s) if not matcher.match(provider.version): unmatched.add(s) if unmatched: # can't replace other with provider problems.add(('cantreplace', provider, other, frozenset(unmatched))) result = False else: # can replace other with provider self.remove_distribution(other) del self.reqts[other] for s in rlist: self.reqts.setdefault(provider, set()).add(s) self.add_distribution(provider) result = True return result
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py
202
transferlearning
{ "docstring": "\n Attempt to replace one provider with another. This is typically used\n when resolving dependencies from multiple sources, e.g. A requires\n (B >= 1.0) while C requires (B >= 1.1).\n\n For successful replacement, ``provider`` must meet all the requirements\n which ``other`` fulfills.\n\n :param provider: The provider we are trying to replace with.\n :param other: The provider we're trying to replace.\n :param problems: If False is returned, this will contain what\n problems prevented replacement. This is currently\n a tuple of the literal string 'cantreplace',\n ``provider``, ``other`` and the set of requirements\n that ``provider`` couldn't fulfill.\n :return: True if we can replace ``other`` with ``provider``, else\n False.\n ", "language": "en", "n_whitespaces": 288, "n_words": 104, "vocab_size": 78 }
59
Python
40
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
locators.py
62,006
19
126
try_to_replace
https://github.com/jindongwang/transferlearning.git
upd; format
284
0
12,817
16
11
19
def set_choices(self, choices, choices_names=None, none_choice_name=None): self.unselect() # Validate choices if choices is None: raise ValueError('Choices cannot be None.') if not isinstance(choices, Iterable): raise ValueError('Choices must be Iterable') if choices_names is None: choices_names = tuple(str(c) for c in choices) elif isinstance(choices_names, (list,tuple)): if len(choices_names) != len(choices): raise ValueError('mismatch len of choices and choices names') elif isinstance(choices_names, dict): choices_names = [ choices_names[x] for x in choices ] else: raise ValueError('unsupported type of choices_names') if not all( isinstance(x, str) for x in choices_names ): raise ValueError('all values in choices_names must be a str') choices = tuple(choices) self._set_choices(choices, choices_names, none_choice_name) self._send_choices()
xlib/mp/csw/DynamicSingleSwitch.py
260
DeepFaceLive
{ "docstring": "\n set choices, and optional choices_names.\n\n choices_names list/dict/None if list, should match the len of choices\n if dict, should return a str by key of choice\n if None, choices will be stringfied\n\n none_choice_name('') str/None if not None, shows None choice with name,\n by default empty string\n ", "language": "en", "n_whitespaces": 230, "n_words": 45, "vocab_size": 36 }
97
Python
62
ae8a1e0ff4b13e6e6a0155e346864805b2ca81dd
DynamicSingleSwitch.py
179,085
20
160
set_choices
https://github.com/iperov/DeepFaceLive.git
added Face Animator module
364
0
42,898
13
2
14
def get_batches_by_oldest(item_code, warehouse): batches = get_batch_qty(item_code=item_code, warehouse=warehouse) batches_dates = [ [batch, frappe.get_value("Batch", batch.batch_no, "expiry_date")] for batch in batches ] batches_dates.sort(key=lambda tup: tup[1]) return batches_dates @frappe.whitelist()
erpnext/stock/doctype/batch/batch.py
106
@frappe.whitelist()
erpnext
{ "docstring": "Returns the oldest batch and qty for the given item_code and warehouse", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 10 }
25
Python
22
494bd9ef78313436f0424b918f200dab8fc7c20b
batch.py
67,582
7
60
get_batches_by_oldest
https://github.com/frappe/erpnext.git
style: format code with black
17
1
14,565
11
3
11
def recover_args(flattened_args): assert ( len(flattened_args) % 2 == 0 ), "Flattened arguments need to be even-numbered. See `flatten_args`." args = [] kwargs = {} for name_index in range(0, len(flattened_args), 2): name, arg = flattened_args[name_index], flattened_args[name_index + 1] if name == DUMMY_TYPE: args.append(arg) else: kwargs[name] = arg return args, kwargs
python/ray/_private/signature.py
128
ray
{ "docstring": "Recreates `args` and `kwargs` from the flattened arg list.\n\n Args:\n flattened_args: List of args and kwargs. This should be the output of\n `flatten_args`.\n\n Returns:\n args: The non-keyword arguments passed into the function.\n kwargs: The keyword arguments passed into the function.\n ", "language": "en", "n_whitespaces": 81, "n_words": 40, "vocab_size": 30 }
49
Python
43
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
signature.py
130,222
13
80
recover_args
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
120
0
29,164
12
2
8
def captured_output(stream_name): # type: (str) -> Iterator[StreamWrapper] orig_stdout = getattr(sys, stream_name) setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout)) try: yield getattr(sys, stream_name) finally: setattr(sys, stream_name, orig_stdout)
.venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py
75
transferlearning
{ "docstring": "Return a context manager used by captured_stdout/stdin/stderr\n that temporarily replaces the sys stream *stream_name* with a StringIO.\n\n Taken from Lib/support/__init__.py in the CPython repo.\n ", "language": "en", "n_whitespaces": 33, "n_words": 24, "vocab_size": 22 }
22
Python
18
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
misc.py
61,271
7
46
captured_output
https://github.com/jindongwang/transferlearning.git
upd; format
54
0
12,482
10
1
2
def _recreate_cm(self): return self
python3.10.4/Lib/contextlib.py
16
XX-Net
{ "docstring": "Return a recreated instance of self.\n\n Allows an otherwise one-shot context manager like\n _GeneratorContextManager to support use as\n a decorator via implicit recreation.\n\n This is a private interface just for _GeneratorContextManager.\n See issue #11647 for details.\n ", "language": "en", "n_whitespaces": 78, "n_words": 36, "vocab_size": 33 }
4
Python
4
8198943edd73a363c266633e1aa5b2a9e9c9f526
contextlib.py
221,732
2
8
_recreate_cm
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
18
0
56,495
6
24
40
def _fill_disk_filename(conn, vm_name, disk, hypervisor, pool_caps): # Compute the filename without extension since it may not make sense for some pool types disk["filename"] = "{}_{}".format(vm_name, disk["name"]) # Compute the source file path base_dir = disk.get("pool", None) if hypervisor in ["qemu", "kvm", "xen"]: # Compute the base directory from the pool property. We may have either a path # or a libvirt pool name there. if not base_dir: base_dir = _get_images_dir() # If the pool is a known libvirt one, skip the filename since a libvirt volume will be created later if base_dir not in conn.listStoragePools(): # For path-based disks, keep the qcow2 default format if not disk.get("format"): disk["format"] = "qcow2" disk["filename"] = "{}.{}".format(disk["filename"], disk["format"]) disk["source_file"] = os.path.join(base_dir, disk["filename"]) else: if "pool" not in disk: disk["pool"] = base_dir pool_obj = conn.storagePoolLookupByName(base_dir) pool_xml = ElementTree.fromstring(pool_obj.XMLDesc()) pool_type = pool_xml.get("type") # Disk pools volume names are partition names, they need to be named based on the device name if pool_type == "disk": device = pool_xml.find("./source/device").get("path") all_volumes = pool_obj.listVolumes() if disk.get("source_file") not in all_volumes: indexes = [ int(re.sub("[a-z]+", "", vol_name)) for vol_name in all_volumes ] or [0] index = min( idx for idx in range(1, max(indexes) + 2) if idx not in indexes ) disk["filename"] = "{}{}".format(os.path.basename(device), index) # Is the user wanting to reuse an existing volume? if disk.get("source_file"): if not disk.get("source_file") in pool_obj.listVolumes(): raise SaltInvocationError( "{} volume doesn't exist in pool {}".format( disk.get("source_file"), base_dir ) ) disk["filename"] = disk["source_file"] del disk["source_file"] # Get the default format from the pool capabilities if not disk.get("format"): volume_options = ( [ type_caps.get("options", {}).get("volume", {}) for type_caps in pool_caps.get("pool_types") if type_caps["name"] == pool_type ] or [{}] )[0] # Still prefer qcow2 if possible if "qcow2" in volume_options.get("targetFormatType", []): disk["format"] = "qcow2" else: disk["format"] = volume_options.get("default_format", None) elif hypervisor == "bhyve" and vm_name: disk["filename"] = "{}.{}".format(vm_name, disk["name"]) disk["source_file"] = os.path.join( "/dev/zvol", base_dir or "", disk["filename"] ) elif hypervisor in ["esxi", "vmware"]: if not base_dir: base_dir = __salt__["config.get"]("virt:storagepool", "[0] ") disk["filename"] = "{}.{}".format(disk["filename"], disk["format"]) disk["source_file"] = "{}{}".format(base_dir, disk["filename"])
salt/modules/virt.py
924
salt
{ "docstring": "\n Compute the disk file name and update it in the disk value.\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 10 }
328
Python
174
f2a783643de61cac1ff3288b40241e5ce6e1ddc8
virt.py
215,962
60
518
_fill_disk_filename
https://github.com/saltstack/salt.git
Update to latest ``pyupgrade`` hook. Stop skipping it on CI. Signed-off-by: Pedro Algarvio <palgarvio@vmware.com>
1,234
0
54,284
23
1
13
def test_auto_create_auto_join_remote_room(self) -> None: # Register a first user; this should call _create_and_join_rooms self.get_success(self.handler.register_user(localpart="jeff")) _, kwargs = self.room_member_handler.update_membership.call_args self.assertEqual(kwargs["room_id"], self.room_id) self.assertEqual(kwargs["action"], "join") self.assertEqual(kwargs["remote_room_hosts"], ["remotetest"]) # Register a second user; this should call _join_rooms self.get_success(self.handler.register_user(localpart="jeff2")) _, kwargs = self.room_member_handler.update_membership.call_args self.assertEqual(kwargs["room_id"], self.room_id) self.assertEqual(kwargs["action"], "join") self.assertEqual(kwargs["remote_room_hosts"], ["remotetest"])
tests/handlers/test_register.py
232
synapse
{ "docstring": "Tests that we don't attempt to create remote rooms, and that we don't attempt\n to invite ourselves to rooms we're not in.", "language": "en", "n_whitespaces": 28, "n_words": 22, "vocab_size": 16 }
44
Python
27
652d1669c5a103b1c20478770c4aaf18849c09a3
test_register.py
250,388
13
134
test_auto_create_auto_join_remote_room
https://github.com/matrix-org/synapse.git
Add missing type hints to tests.handlers. (#14680) And do not allow untyped defs in tests.handlers.
135
0
73,411
11
1
12
def logsumexp(x, axis=None, keepdims=False): return tf.reduce_logsumexp(x, axis, keepdims) @keras_export("keras.backend.round") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
keras/backend.py
67
@keras_export("keras.backend.round") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
keras
{ "docstring": "Computes log(sum(exp(elements across dimensions of a tensor))).\n\n This function is more numerically stable than log(sum(exp(x))).\n It avoids overflows caused by taking the exp of large inputs and\n underflows caused by taking the log of small inputs.\n\n Args:\n x: A tensor or variable.\n axis: An integer, the axis to reduce over.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`, the reduced dimension is\n retained with length 1.\n\n Returns:\n The reduced tensor.\n ", "language": "en", "n_whitespaces": 168, "n_words": 89, "vocab_size": 62 }
11
Python
11
84afc5193d38057e2e2badf9c889ea87d80d8fbf
backend.py
269,469
2
25
logsumexp
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
14
1
80,104
7
1
22
def test_patch_bot_owner_of_bot_with_can_create_users(self) -> None: cordelia = self.example_user("cordelia") self.login("hamlet") self.create_bot() bot_realm = get_realm("zulip") bot_email = "hambot-bot@zulip.testserver" bot_user = get_user(bot_email, bot_realm) do_change_can_create_users(bot_user, True) self.logout() # iago is an ordinary organization administrator, and thus doesn't have # sufficient permissions to change ownership of this bot. self.login("iago") bot_info = { "bot_owner_id": cordelia.id, } result = self.client_patch(f"/json/bots/{bot_user.id}", bot_info) self.assert_json_error( result, "Must be an organization owner", ) self.logout() # desdemona is the organization owner and should be allowed to change the bot's ownership. self.login("desdemona") result = self.client_patch(f"/json/bots/{bot_user.id}", bot_info) self.assert_json_success(result) bot_user.refresh_from_db() self.assertEqual(bot_user.bot_owner, cordelia)
zerver/tests/test_bots.py
251
zulip
{ "docstring": "\n can_create_users is granted to organizations upon approval, and thus\n should be thought of as something that only organization owners should\n have control over.\n ", "language": "en", "n_whitespaces": 52, "n_words": 23, "vocab_size": 22 }
86
Python
65
af5d0d6f5e5444332f9f8e565d97f4acdceaa72f
test_bots.py
83,535
29
134
test_patch_bot_owner_of_bot_with_can_create_users
https://github.com/zulip/zulip.git
bots: Don't allow admins to change owner of bot with can_create_users. Ordinary organization administrators shouldn't be allowed to change ownership of a bot with the can_create_users permission. This is a special permission that is granted manually by server administrators to an organization (to a UserProfile of the org owners' choice) after approval by a server administator. The code comments provide more detail about why this is sensitive.
287
0
17,679
11
2
5
def percentile_fn(data, percentile): return data[int((len(data) - 1) * percentile)] if len(data) > 0 else None
src/sentry/api/endpoints/project_dynamic_sampling.py
54
sentry
{ "docstring": "\n Returns the nth percentile from a sorted list\n\n :param percentile: A value between 0 and 1\n :param data: Sorted list of values\n ", "language": "en", "n_whitespaces": 35, "n_words": 22, "vocab_size": 20 }
15
Python
15
923658b395545abc1b7f7a39cf64d198c9feea74
project_dynamic_sampling.py
92,239
2
34
percentile_fn
https://github.com/getsentry/sentry.git
feat(dynamic-sampling): Adds endpoint that returns onboarding flow trace info [TET-176] (#36113) This PR adds an endpoint for the dynamic sampling onboarding flow that: - Does a query to the transactions table to fetch a random sampleSize over the last passed statsPeriod date range. - If distrubutedTracing mode is enabled, then it runs a subsequent query to fetch the project breakdown in the traces from the first query - Calculates distribution function values like p50, p90, p95, p99, avg, max, min on the client side sample rates returned from the first query - Returns the percentage of transactions that did not have a sample rate
21
0
18,892
14
7
12
def equals(self, other): if not isinstance(other, PermutationGroup): return False set_self_gens = set(self.generators) set_other_gens = set(other.generators) # before reaching the general case there are also certain # optimisation and obvious cases requiring less or no actual # computation. if set_self_gens == set_other_gens: return True # in the most general case it will check that each generator of # one group belongs to the other PermutationGroup and vice-versa for gen1 in set_self_gens: if not other.contains(gen1): return False for gen2 in set_other_gens: if not self.contains(gen2): return False return True
sympy/combinatorics/perm_groups.py
127
sympy
{ "docstring": "Return ``True`` if PermutationGroup generated by elements in the\n group are same i.e they represent the same PermutationGroup.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation, PermutationGroup\n >>> p = Permutation(0, 1, 2, 3, 4, 5)\n >>> G = PermutationGroup([p, p**2])\n >>> H = PermutationGroup([p**2, p])\n >>> G.generators == H.generators\n False\n >>> G.equals(H)\n True\n\n ", "language": "en", "n_whitespaces": 137, "n_words": 53, "vocab_size": 43 }
86
Python
59
3e167a67bde4b4817666de48bf98d247bed86e2d
perm_groups.py
196,343
14
76
equals
https://github.com/sympy/sympy.git
Update sympy/combinatorics/perm_groups.py
251
0
47,843
10
1
1
def set_vars(): return {}
lib/gui/display_page.py
17
faceswap
{ "docstring": " Override to return a dict of page specific variables ", "language": "en", "n_whitespaces": 10, "n_words": 9, "vocab_size": 9 }
4
Python
4
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
display_page.py
100,334
2
8
set_vars
https://github.com/deepfakes/faceswap.git
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
18
0
19,830
6
4
24
async def get_tasks(self) -> dict: replies = await asyncio.gather( *[ self._client.get_task_info(node_id, timeout=DEFAULT_RPC_TIMEOUT) for node_id in self._client.get_all_registered_raylet_ids() ] ) result = defaultdict(dict) for reply in replies: tasks = reply.task_info_entries for task in tasks: data = self._message_to_dict( message=task, fields_to_decode=["task_id"], ) data = filter_fields(data, TaskState) result[data["task_id"]] = data return result
dashboard/state_aggregator.py
157
ray
{ "docstring": "List all task information from the cluster.\n\n Returns:\n {task_id -> task_data_in_dict}\n task_data_in_dict's schema is in TaskState\n ", "language": "en", "n_whitespaces": 52, "n_words": 16, "vocab_size": 16 }
47
Python
34
30ab5458a7e4ba2351d5e1beef8c8797b5946493
state_aggregator.py
138,400
24
98
get_tasks
https://github.com/ray-project/ray.git
[State Observability] Tasks and Objects API (#23912) This PR implements ray list tasks and ray list objects APIs. NOTE: You can ignore the merge conflict for now. It is because the first PR was reverted. There's a fix PR open now.
261
0
31,408
15
1
2
def measure(self): return self["measure"]
packages/python/plotly/plotly/graph_objs/_waterfall.py
22
plotly.py
{ "docstring": "\n An array containing types of values. By default the values are\n considered as 'relative'. However; it is possible to use\n 'total' to compute the sums. Also 'absolute' could be applied\n to reset the computed total or to declare an initial value\n where needed.\n\n The 'measure' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "language": "en", "n_whitespaces": 143, "n_words": 65, "vocab_size": 54 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_waterfall.py
228,605
2
11
measure
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
60,278
7
1
3
def disabled_train(self, mode=True): return self
ldm/models/diffusion/ddpm.py
21
stablediffusion
{ "docstring": "Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.", "language": "en", "n_whitespaces": 16, "n_words": 14, "vocab_size": 14 }
5
Python
5
ca86da3a30c4e080d4db8c25fca73de843663cb4
ddpm.py
157,333
2
12
disabled_train
https://github.com/Stability-AI/stablediffusion.git
release more models
11
0
36,899
6
1
11
def test_no_auth(self) -> None: channel = self.make_request("GET", self.url, b"{}") self.assertEqual( 401, channel.code, msg=channel.json_body, ) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
tests/rest/admin/test_device.py
88
synapse
{ "docstring": "\n Try to list devices of an user without authentication.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
16
Python
16
1595052b2681fb86c1c1b9a6028c1bc0d38a2e4b
test_device.py
249,234
11
55
test_no_auth
https://github.com/matrix-org/synapse.git
Use literals in place of `HTTPStatus` constants in tests (#13479) Replace - `HTTPStatus.NOT_FOUND` - `HTTPStatus.FORBIDDEN` - `HTTPStatus.UNAUTHORIZED` - `HTTPStatus.CONFLICT` - `HTTPStatus.CREATED` Signed-off-by: Dirk Klimpel <dirk@klimpel.org>
84
0
72,738
9
1
19
def test_image_crafter_index(encoder_doc_array, tmpdir): create_test_img(path=str(tmpdir), file_name='1.jpg') with Flow().add(uses=ImageCrafter) as f: res = f.index(inputs=encoder_doc_array) assert len(res) == 1 doc = res[0] assert doc.mime_type == 'image/jpeg' assert doc.tensor is not None
tests/unit/helloworld/multimodal/test_executors.py
120
jina
{ "docstring": "In this test, we input one ``DocumentArray`` with one ``Document``,\n and the `craft` method in the ``ImageCrafter`` returns chunks.\n In the ``ImageCrafter``, we filtered out all the modalities and only kept `image/jpeg`.\n So the 2 chunks should left only 1 chunk.\n And the tensor value of the ``Document`` is not empty once we finished crafting since\n we converted image uri/datauri to tensor.\n ", "language": "en", "n_whitespaces": 80, "n_words": 62, "vocab_size": 49 }
28
Python
24
217a11bb8dc613ed1136b8b541a68e6d53ca4fc1
test_executors.py
11,747
8
71
test_image_crafter_index
https://github.com/jina-ai/jina.git
test: fix tests failing after new docarray patch (#4449) * test: fix tests failing after new docarray patch * test: fix failing tests
56
0
2,104
12
1
9
def orthographic_projection(X, camera): camera = camera.reshape((-1, 1, 3)) X_trans = X[:, :, :2] + camera[:, :, 1:] shape = paddle.shape(X_trans) X_2d = (camera[:, :, 0] * X_trans.reshape((shape[0], -1))).reshape(shape) return X_2d @register
ppdet/modeling/architectures/pose3d_metro.py
137
@register
PaddleDetection
{ "docstring": "Perform orthographic projection of 3D points X using the camera parameters\n Args:\n X: size = [B, N, 3]\n camera: size = [B, 3]\n Returns:\n Projected 2D points -- size = [B, N, 2]\n ", "language": "en", "n_whitespaces": 63, "n_words": 33, "vocab_size": 24 }
31
Python
25
d4e34fe165c09db65fd00113708be1b711ac957c
pose3d_metro.py
211,415
6
86
orthographic_projection
https://github.com/PaddlePaddle/PaddleDetection.git
pose3d metro modeling (#6612) * pose3d metro modeling * delete extra comments
48
1
53,089
14
7
26
def to_euler(self, seq): extrinsic = _check_sequence(seq) i, j, k = seq.lower() i = _elementary_axis_index(i) j = _elementary_axis_index(j) k = _elementary_axis_index(k) if not extrinsic: i, k = k, i # check if sequence is symmetric symmetric = i == k if symmetric: k = 6 - i - j # parity of the permutation sign = (i - j) * (j - k) * (k - i) // 2 # permutate elements elements = [self.a, self.b, self.c, self.d] a = elements[0] b = elements[i] c = elements[j] d = elements[k] * sign if not symmetric: a, b, c, d = a - c, b + d, c + a, d - b # calculate angles half_sum = atan2(b, a) half_diff = atan2(d, c) angle_2 = 2*atan2(sqrt(c*c + d*d), sqrt(a*a + b*b)) # alternatively, we can use this to avoid the square root: # angle_2 = acos(2*(a*a + b*b)/(a*a + b*b + c*c + d*d) - 1) angle_1 = half_sum + half_diff angle_3 = half_sum - half_diff if extrinsic: angle_1, angle_3 = angle_3, angle_1 # for Tait-Bryan angles if not symmetric: angle_2 -= pi / 2 if extrinsic: angle_3 *= sign else: angle_1 *= sign return Matrix([angle_1, angle_2, angle_3])
sympy/algebras/quaternion.py
404
sympy
{ "docstring": "Returns Euler angles representing same in the sequence given by\n `seq`.\n\n Parameters\n ==========\n\n seq : string of length 3\n Represents the sequence of rotations.\n For intrinsic rotations, seq but be all lowercase and its elements\n must be from the set `['x', 'y', 'z']`\n For extrinsic rotations, seq but be all uppercase and its elements\n must be from the set `['X', 'Y', 'Z']`\n\n Returns\n =======\n\n Matrix\n The Euler angles calculated from the quaternion\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy.abc import a, b, c, d\n >>> euler = Quaternion(a, b, c, d).to_euler('zyz')\n >>> euler\n Matrix([[-atan2(-b, c) + atan2(d, a)],\n [2*atan2(sqrt(b**2 + c**2), sqrt(a**2 + d**2))],\n [atan2(-b, c) + atan2(d, a)]])\n\n ", "language": "en", "n_whitespaces": 313, "n_words": 112, "vocab_size": 73 }
197
Python
104
1d8576449e7ab757f13f49a1d33faed602aa88fb
quaternion.py
200,596
33
258
to_euler
https://github.com/sympy/sympy.git
implemented to_euler and from_euler
521
0
49,725
13
1
16
def test_jupyter_file_output(tmp_path): scene_name = "SimpleScene" current_renderer = config.renderer with tempconfig({"scene_names": [scene_name], "renderer": "opengl"}): file_name = _generate_file_name() actual_path = tmp_path.with_name(file_name) with actual_path.open("w") as outfile: outfile.write("") assert actual_path.exists() assert actual_path.is_file() # needs manually set back to avoid issues across tests config.renderer = current_renderer
tests/opengl/test_ipython_magic_opengl.py
144
manim
{ "docstring": "Check the jupyter file naming is valid and can be created", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
41
Python
33
206db54af53a87985c0d243d75304ea620dad520
test_ipython_magic_opengl.py
190,005
11
76
test_jupyter_file_output
https://github.com/ManimCommunity/manim.git
Migrate more `os.path` to `pathlib` in tests (#2991) * Migrate more `os.path` to `pathlib` in tests * Convert test fixtures to pathlib * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix mypy errors in tests * migrate another pathlib instance Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at>
113
0
46,287
13
17
5
async def test_statistic_during_period_hole(recorder_mock, hass, hass_ws_client): id = 1
tests/components/recorder/test_websocket_api.py
23
core
{ "docstring": "Test statistic_during_period when there are holes in the data.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
8
Python
8
9b8f94363c0b4ecd1434ac1ac3bb82febd3889d0
test_websocket_api.py
290,687
137
900
test_statistic_during_period_hole
https://github.com/home-assistant/core.git
Fix statistic_during_period for data with holes (#81847)
14
0
89,801
6
1
32
async def test_valve_set_state(hass, hk_driver, events): entity_id = "switch.valve_test" hass.states.async_set(entity_id, None) await hass.async_block_till_done() acc = Valve(hass, hk_driver, "Valve", entity_id, 2, {CONF_TYPE: TYPE_FAUCET}) await acc.run() await hass.async_block_till_done() assert acc.category == 29 # Faucet assert acc.char_valve_type.value == 3 # Water faucet acc = Valve(hass, hk_driver, "Valve", entity_id, 3, {CONF_TYPE: TYPE_SHOWER}) await acc.run() await hass.async_block_till_done() assert acc.category == 30 # Shower assert acc.char_valve_type.value == 2 # Shower head acc = Valve(hass, hk_driver, "Valve", entity_id, 4, {CONF_TYPE: TYPE_SPRINKLER}) await acc.run() await hass.async_block_till_done() assert acc.category == 28 # Sprinkler assert acc.char_valve_type.value == 1 # Irrigation acc = Valve(hass, hk_driver, "Valve", entity_id, 5, {CONF_TYPE: TYPE_VALVE}) await acc.run() await hass.async_block_till_done() assert acc.aid == 5 assert acc.category == 29 # Faucet assert acc.char_active.value == 0 assert acc.char_in_use.value == 0 assert acc.char_valve_type.value == 0 # Generic Valve hass.states.async_set(entity_id, STATE_ON) await hass.async_block_till_done() assert acc.char_active.value == 1 assert acc.char_in_use.value == 1 hass.states.async_set(entity_id, STATE_OFF) await hass.async_block_till_done() assert acc.char_active.value == 0 assert acc.char_in_use.value == 0 # Set from HomeKit call_turn_on = async_mock_service(hass, "switch", "turn_on") call_turn_off = async_mock_service(hass, "switch", "turn_off") acc.char_active.client_update_value(1) await hass.async_block_till_done() assert acc.char_in_use.value == 1 assert call_turn_on assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id assert len(events) == 1 assert events[-1].data[ATTR_VALUE] is None acc.char_active.client_update_value(0) await hass.async_block_till_done() assert acc.char_in_use.value == 0 assert call_turn_off assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id assert len(events) == 2 assert events[-1].data[ATTR_VALUE] is None
tests/components/homekit/test_type_switches.py
685
core
{ "docstring": "Test if Valve accessory and HA are updated accordingly.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
207
Python
70
3b33e0d832b238b40360383099391e2093ea05cb
test_type_switches.py
289,146
51
431
test_valve_set_state
https://github.com/home-assistant/core.git
Add support for restoring HomeKit IIDs (#79913)
371
0
88,293
10
7
30
def deal_bb(result_token): # find out <thead></thead> parts. thead_pattern = '<thead>(.*?)</thead>' if re.search(thead_pattern, result_token) is None: return result_token thead_part = re.search(thead_pattern, result_token).group() origin_thead_part = copy.deepcopy(thead_part) # check "rowspan" or "colspan" occur in <thead></thead> parts or not . span_pattern = "<td rowspan=\"(\d)+\" colspan=\"(\d)+\">|<td colspan=\"(\d)+\" rowspan=\"(\d)+\">|<td rowspan=\"(\d)+\">|<td colspan=\"(\d)+\">" span_iter = re.finditer(span_pattern, thead_part) span_list = [s.group() for s in span_iter] has_span_in_head = True if len(span_list) > 0 else False if not has_span_in_head: # <thead></thead> not include "rowspan" or "colspan" branch 1. # 1. replace <td> to <td><b>, and </td> to </b></td> # 2. it is possible to predict text include <b> or </b> by Text-line recognition, # so we replace <b><b> to <b>, and </b></b> to </b> thead_part = thead_part.replace('<td>', '<td><b>')\ .replace('</td>', '</b></td>')\ .replace('<b><b>', '<b>')\ .replace('</b></b>', '</b>') else: # <thead></thead> include "rowspan" or "colspan" branch 2. # Firstly, we deal rowspan or colspan cases. # 1. replace > to ><b> # 2. replace </td> to </b></td> # 3. it is possible to predict text include <b> or </b> by Text-line recognition, # so we replace <b><b> to <b>, and </b><b> to </b> # Secondly, deal ordinary cases like branch 1 # replace ">" to "<b>" replaced_span_list = [] for sp in span_list: replaced_span_list.append(sp.replace('>', '><b>')) for sp, rsp in zip(span_list, replaced_span_list): thead_part = thead_part.replace(sp, rsp) # replace "</td>" to "</b></td>" thead_part = thead_part.replace('</td>', '</b></td>') # remove duplicated <b> by re.sub mb_pattern = "(<b>)+" single_b_string = "<b>" thead_part = re.sub(mb_pattern, single_b_string, thead_part) mgb_pattern = "(</b>)+" single_gb_string = "</b>" thead_part = re.sub(mgb_pattern, single_gb_string, thead_part) # ordinary cases like branch 1 thead_part = thead_part.replace('<td>', '<td><b>').replace('<b><b>', '<b>') # convert <tb><b></b></tb> back to <tb></tb>, empty cell has no <b></b>. # but space cell(<tb> </tb>) is suitable for <td><b> </b></td> thead_part = thead_part.replace('<td><b></b></td>', '<td></td>') # deal with duplicated <b></b> thead_part = deal_duplicate_bb(thead_part) # deal with isolate span tokens, which causes by wrong predict by structure prediction. # eg.PMC5994107_011_00.png thead_part = deal_isolate_span(thead_part) # replace original result with new thead part. result_token = result_token.replace(origin_thead_part, thead_part) return result_token
ppstructure/table/table_master_match.py
484
PaddleOCR
{ "docstring": "\n In our opinion, <b></b> always occurs in <thead></thead> text's context.\n This function will find out all tokens in <thead></thead> and insert <b></b> by manual.\n :param result_token:\n :return:\n ", "language": "en", "n_whitespaces": 43, "n_words": 27, "vocab_size": 24 }
324
Python
170
ddaa2c2552e19635cd6cdf38619f1f176c358f89
table_master_match.py
24,488
35
264
deal_bb
https://github.com/PaddlePaddle/PaddleOCR.git
add SLANet
720
0
4,740
17
6
11
def _convert_other(other, raiseit=False, allow_float=False): if isinstance(other, Decimal): return other if isinstance(other, int): return Decimal(other) if allow_float and isinstance(other, float): return Decimal.from_float(other) if raiseit: raise TypeError("Unable to convert %s to Decimal" % other) return NotImplemented
python3.10.4/Lib/_pydecimal.py
105
XX-Net
{ "docstring": "Convert other to Decimal.\n\n Verifies that it's ok to use in an implicit construction.\n If allow_float is true, allow conversion from float; this\n is used in the comparison methods (__eq__ and friends).\n\n ", "language": "en", "n_whitespaces": 45, "n_words": 32, "vocab_size": 29 }
34
Python
25
8198943edd73a363c266633e1aa5b2a9e9c9f526
_pydecimal.py
219,627
10
66
_convert_other
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
80
0
55,662
11
4
14
def _process_hidden_imports(self): # For each hidden import required by the module being hooked... for import_module_name in self.hiddenimports: try: # Graph node for this module. Do not implicitly create namespace packages for non-existent packages. caller = self.module_graph.find_node(self.module_name, create_nspkg=False) # Manually import this hidden import from this module. self.module_graph.import_hook(import_module_name, caller) # If this hidden import is unimportable, print a non-fatal warning. Hidden imports often become # desynchronized from upstream packages and hence are only "soft" recommendations. except ImportError: if self.warn_on_missing_hiddenimports: logger.warning('Hidden import "%s" not found!', import_module_name) # FIXME: This is pretty... intense. Attempting to cleanly "undo" prior module graph operations is a recipe for # subtle edge cases and difficult-to-debug issues. It would be both safer and simpler to prevent these # imports from being added to the graph in the first place. To do so: # # * Remove the _process_excluded_imports() method below. # * Remove the PostGraphAPI.del_imports() method, which cannot reasonably be supported by the following solution, # appears to be currently broken, and (in any case) is not called anywhere in the PyInstaller codebase. # * Override the ModuleGraph._safe_import_hook() superclass method with a new PyiModuleGraph._safe_import_hook() # subclass method resembling: # # def _safe_import_hook( # self, target_module_name, source_module, fromlist, # level=DEFAULT_IMPORT_LEVEL, attr=None): # # if source_module.identifier in self._module_hook_cache: # for module_hook in self._module_hook_cache[ # source_module.identifier]: # if target_module_name in module_hook.excludedimports: # return [] # # return super()._safe_import_hook( # target_module_name, source_module, fromlist, # level=level, attr=attr)
PyInstaller/depend/imphook.py
121
pyinstaller
{ "docstring": "\n Add all imports listed in this hook script's `hiddenimports` attribute to the module graph as if directly\n imported by this hooked module.\n\n These imports are typically _not_ implicitly detectable by PyInstaller and hence must be explicitly defined\n by hook scripts.\n ", "language": "en", "n_whitespaces": 76, "n_words": 40, "vocab_size": 35 }
234
Python
146
35451d0df77dd4e2c3ad613ee35cb28d99a9421e
imphook.py
263,832
8
57
_process_hidden_imports
https://github.com/pyinstaller/pyinstaller.git
depend: allow hooks to opt out of missing hidden import warnings Implement new standard hook variable, called `warn_on_missing_hiddenimports`. This optional boolean flag allows a hook to opt out from warnings generated by missing hidden imports originating from that hook.
615
0
77,456
15
3
3
def check_install_build_global(options, check_options=None): # type: (Values, Optional[Values]) -> None if check_options is None: check_options = options
.venv/lib/python3.8/site-packages/pip/_internal/cli/cmdoptions.py
32
transferlearning
{ "docstring": "Disable wheels if per-setup.py call options are set.\n\n :param options: The OptionParser options to update.\n :param check_options: The options to check, if not supplied defaults to\n options.\n ", "language": "en", "n_whitespaces": 43, "n_words": 27, "vocab_size": 20 }
16
Python
15
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
cmdoptions.py
60,504
13
62
check_install_build_global
https://github.com/jindongwang/transferlearning.git
upd; format
32
0
12,188
8
1
3
def user_documents_path() -> Path: return PlatformDirs().user_documents_path
pipenv/patched/notpip/_vendor/platformdirs/__init__.py
25
pipenv
{ "docstring": "\n :returns: documents path tied to the user\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
6
Python
6
f3166e673fe8d40277b804d35d77dcdb760fc3b3
__init__.py
20,185
5
13
user_documents_path
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
12
0
3,239
8
1
8
def _config_changeable_items(self) -> dict: return Config(self._config_section, configfile=self._configfile).changeable_items
plugins/train/model/_base/model.py
38
faceswap
{ "docstring": " dict: The configuration options that can be updated after the model has already been\n created. ", "language": "en", "n_whitespaces": 27, "n_words": 15, "vocab_size": 15 }
7
Python
7
ff6b0209dd5ad57b81b0aca570df7f39a7119bfb
model.py
100,823
4
23
_config_changeable_items
https://github.com/deepfakes/faceswap.git
Refactoring and TravisCI to Github Actions (#1239) * refactor training * travis to actions
21
0
20,274
10
1
5
def _import_module(name): __import__(name) return sys.modules[name]
.venv/lib/python3.8/site-packages/pip/_vendor/six.py
30
transferlearning
{ "docstring": "Import module, returning the module after the last dot.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
5
Python
5
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
six.py
63,730
3
17
_import_module
https://github.com/jindongwang/transferlearning.git
upd; format
14
0
13,488
7
2
31
def keras_model_summary(name, data, step=None): summary_metadata = tf.compat.v1.SummaryMetadata() # Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for # the rationale. summary_metadata.plugin_data.plugin_name = "graph_keras_model" # version number = 1 summary_metadata.plugin_data.content = b"1" try: json_string = data.to_json() except Exception as exc: # pylint: disable=broad-except # An exception should not break a model code. logging.warning( "Model failed to serialize as JSON. Ignoring... %s", exc ) return False with tf.summary.experimental.summary_scope( name, "graph_keras_model", [data, step] ) as (tag, _): with tf.device("cpu:0"): tensor = tf.constant(json_string, dtype=tf.string) return tf.summary.write( tag=tag, tensor=tensor, step=step, metadata=summary_metadata ) @keras_export("keras.callbacks.TensorBoard", v1=[])
keras/callbacks.py
240
@keras_export("keras.callbacks.TensorBoard", v1=[])
keras
{ "docstring": "Writes a Keras model as JSON to as a Summary.\n\n Writing the Keras model configuration allows the TensorBoard graph plugin to\n render a conceptual graph, as opposed to graph of ops. In case the model fails\n to serialize as JSON, it ignores and returns False.\n\n Args:\n name: A name for this summary. The summary tag used for TensorBoard will be\n this name prefixed by any active name scopes.\n data: A Keras Model to write.\n step: Explicit `int64`-castable monotonic step value for this summary. If\n omitted, this defaults to `tf.summary.experimental.get_step()`, which must\n not be None.\n\n Returns:\n True on success, or False if no summary was written because no default\n summary writer was available.\n\n Raises:\n ValueError: if a default writer exists, but no step was provided and\n `tf.summary.experimental.get_step()` is None.\n ", "language": "en", "n_whitespaces": 207, "n_words": 128, "vocab_size": 87 }
90
Python
73
84afc5193d38057e2e2badf9c889ea87d80d8fbf
callbacks.py
269,927
19
133
keras_model_summary
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
219
1
80,336
14
4
20
def test_operator_extra_link_multiple_operators(dag_run, task_2, task_3, viewer_client): response = viewer_client.get( f"{ENDPOINT}?dag_id={task_2.dag_id}&task_id={task_2.task_id}" f"&execution_date={DEFAULT_DATE}&link_name=airflow", follow_redirects=True, ) assert response.status_code == 200 response_str = response.data if isinstance(response.data, bytes): response_str = response_str.decode() assert json.loads(response_str) == {'url': 'https://airflow.apache.org/1.10.5/', 'error': None} response = viewer_client.get( f"{ENDPOINT}?dag_id={task_3.dag_id}&task_id={task_3.task_id}" f"&execution_date={DEFAULT_DATE}&link_name=airflow", follow_redirects=True, ) assert response.status_code == 200 response_str = response.data if isinstance(response.data, bytes): response_str = response_str.decode() assert json.loads(response_str) == {'url': 'https://airflow.apache.org/1.10.5/', 'error': None} # Also check that the other Operator Link defined for this operator exists response = viewer_client.get( f"{ENDPOINT}?dag_id={task_3.dag_id}&task_id={task_3.task_id}" f"&execution_date={DEFAULT_DATE}&link_name=google", follow_redirects=True, ) assert response.status_code == 200 response_str = response.data if isinstance(response.data, bytes): response_str = response_str.decode() assert json.loads(response_str) == {'url': 'https://www.google.com', 'error': None}
tests/www/views/test_views_extra_links.py
386
airflow
{ "docstring": "\n This tests checks if Operator Link (AirflowLink2) defined in\n Airflow Plugin (AirflowLink2) is attached to all the list of\n operators defined in the AirflowLink2().operators property\n\n AirflowLink2 returns 'https://airflow.apache.org/1.10.5/' link\n GoogleLink returns 'https://www.google.com'\n ", "language": "en", "n_whitespaces": 51, "n_words": 32, "vocab_size": 27 }
99
Python
43
08575ddd8a72f96a3439f73e973ee9958188eb83
test_views_extra_links.py
45,549
31
195
test_operator_extra_link_multiple_operators
https://github.com/apache/airflow.git
Change BaseOperatorLink interface to take a ti_key, not a datetime (#21798)
243
0
8,641
12
5
23
def to_json(self) -> dict: return { "id": self.id, "query": self.query, "opCode": self.op_code.name, "authoritativeAnswer": self.authoritative_answer, "truncation": self.truncation, "recursionDesired": self.recursion_desired, "recursionAvailable": self.recursion_available, "responseCode": self.response_code.name, "responseCodeHttpEquiv": self.response_code.http_equiv_status_code, "questions": [{ "name": question.name, "type": question.type.name, "class": question.class_.name, } for question in self.questions], "answers": [rr.to_json() for rr in self.answers], "authorities": [rr.to_json() for rr in self.authorities], "additionals": [rr.to_json() for rr in self.additionals], "size": self.size, "timestamp": self.timestamp, }
mitmproxy/dns.py
271
mitmproxy
{ "docstring": "\n Converts the message into json for the mitmweb.\n Sync with web/src/flow.ts.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 10 }
60
Python
49
f2f918a17e8d06c638b1bb5b06b3150a786d77a0
dns.py
250,896
26
166
to_json
https://github.com/mitmproxy/mitmproxy.git
[dns] build and improve web UI
302
0
73,565
12
1
4
def no_verify_ssl(self) -> bool: return self.namespace.no_verify_ssl
certbot/certbot/configuration.py
25
certbot
{ "docstring": "Disable verification of the ACME server's certificate.\n\n The root certificates trusted by Certbot can be overriden by setting the\n REQUESTS_CA_BUNDLE environment variable.\n ", "language": "en", "n_whitespaces": 43, "n_words": 22, "vocab_size": 20 }
6
Python
6
ae7967c8aed28a8416a329e5eeac117c1672c878
configuration.py
186,795
7
14
no_verify_ssl
https://github.com/certbot/certbot.git
docs: how to override the trusted CA certificates (#9357) * docs: how to override the trusted CA certificates * Update certbot/docs/using.rst Co-authored-by: ohemorange <ebportnoy@gmail.com> Co-authored-by: ohemorange <ebportnoy@gmail.com>
20
0
45,626
7
2
7
def _dictionary(self): # type: () -> Dict[str, Any] # NOTE: Dictionaries are not populated if not loaded. So, conditionals # are not needed here. retval = {} for variant in OVERRIDE_ORDER: retval.update(self._config[variant]) return retval
.venv/lib/python3.8/site-packages/pip/_internal/configuration.py
50
transferlearning
{ "docstring": "A dictionary representing the loaded configuration.\n ", "language": "en", "n_whitespaces": 13, "n_words": 6, "vocab_size": 6 }
34
Python
28
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
configuration.py
60,671
5
28
_dictionary
https://github.com/jindongwang/transferlearning.git
upd; format
100
0
12,237
11
1
14
def test_api_set_storage_path_not_provided(self): response = self.client.post( "/api/documents/bulk_edit/", json.dumps( { "documents": [self.doc1.id], "method": "set_storage_path", "parameters": {}, }, ), content_type="application/json", ) self.assertEqual(response.status_code, 400) self.async_task.assert_not_called()
src/documents/tests/test_api.py
113
paperless-ngx
{ "docstring": "\n GIVEN:\n - API data to set the storage path of a document\n - API data is missing storage path ID\n WHEN:\n - API is called\n THEN:\n - set_storage_path is called with correct document IDs and storage_path ID\n ", "language": "en", "n_whitespaces": 110, "n_words": 37, "vocab_size": 24 }
21
Python
21
53baed03895f28f24113d376b089e3ef281b34ed
test_api.py
319,780
14
66
test_api_set_storage_path_not_provided
https://github.com/paperless-ngx/paperless-ngx.git
Increases test coverage of storage paths
187
0
116,993
15
4
13
def encrypt_file(self, file, key=0): # precondition assert isinstance(file, str) and isinstance(key, int) try: with open(file, "r") as fin: with open("encrypt.out", "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(line, key)) except: return False return True
XORcipher/XOR_cipher.py
125
Python
{ "docstring": "\n input: filename (str) and a key (int)\n output: returns true if encrypt process was\n successful otherwise false\n if key not passed the method uses the key by the constructor.\n otherwise key = 1\n ", "language": "en", "n_whitespaces": 76, "n_words": 33, "vocab_size": 26 }
37
Python
32
f0af0c43340763724f139fa68aa1e5a9ffe458b4
XOR_cipher.py
22,543
10
72
encrypt_file
https://github.com/geekcomputers/Python.git
refactor: clean code Signed-off-by: slowy07 <slowy.arfy@gmail.com>
177
0
4,359
17
1
3
def test_finished_signal(qtbot): signal_triggered = False
tests/unit/misc/test_msgbox.py
18
qutebrowser
{ "docstring": "Make sure we can pass a slot to be called when the dialog finished.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
5
Python
5
0877fb0d78635692e481c8bde224fac5ad0dd430
test_msgbox.py
321,418
9
64
test_finished_signal
https://github.com/qutebrowser/qutebrowser.git
Run scripts/dev/rewrite_enums.py
11
0
117,705
6
7
26
def _install_setup_packages(self) -> None: setup_packages = [(pkg.unsafe_name, pkg.specs) for pkg in parse_requirements(_INSTALLER_REQUIREMENTS)] for pkg in setup_packages: if pkg not in self._env.missing_packages: continue self._env.missing_packages.pop(self._env.missing_packages.index(pkg)) pkg_str = self._format_package(*pkg) if self._env.is_conda: cmd = ["conda", "install", "-y"] else: cmd = [sys.executable, "-m", "pip", "install", "--no-cache-dir"] if self._env.is_admin: cmd.append("--user") cmd.append(pkg_str) clean_pkg = pkg_str.replace("\"", "") if self._subproc_installer(cmd, clean_pkg) != 0: logger.error("Unable to install package: %s. Process aborted", clean_pkg) sys.exit(1)
setup.py
269
faceswap
{ "docstring": " Install any packages that are required for the setup.py installer to work. This\n includes the pexpect package if it is not already installed.\n\n Subprocess is used as we do not currently have pexpect\n ", "language": "en", "n_whitespaces": 55, "n_words": 33, "vocab_size": 29 }
63
Python
48
03f6cb4e7e106bc227ad781a515338097fba26f9
setup.py
101,106
24
160
_install_setup_packages
https://github.com/deepfakes/faceswap.git
setup.py: implement logging
306
0
20,537
15
1
7
def attrs_dict(self): return OrderedDict( [ ("src", self.url), ("width", self.width), ("height", self.height), ("alt", self.alt), ] )
wagtail/images/models.py
71
wagtail
{ "docstring": "\n A dict of the src, width, height, and alt attributes for an <img> tag.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
15
Python
15
d10f15e55806c6944827d801cd9c2d53f5da4186
models.py
75,098
9
44
attrs_dict
https://github.com/wagtail/wagtail.git
Reformat with black
118
0
16,354
10
2
17
def _call_ntimes(self, fun, times): gc.collect(generation=1) mem1 = self._get_mem() for x in range(times): ret = self.call(fun) del x, ret gc.collect(generation=1) mem2 = self._get_mem() self.assertEqual(gc.garbage, []) diff = mem2 - mem1 # can also be negative return diff
psutil/tests/__init__.py
127
psutil
{ "docstring": "Get 2 distinct memory samples, before and after having\n called fun repeatedly, and return the memory difference.\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 15 }
36
Python
27
04e7aa604155736cce0abcc15c9b7b63d941b0e9
__init__.py
189,048
11
78
_call_ntimes
https://github.com/giampaolo/psutil.git
docs: fix simple typo, repeadetly -> repeatedly (#2123)
122
0
45,981
10
4
9
def get_filesystem_type(filepath): # We import it locally so that click autocomplete works import psutil root_type = "unknown" for part in psutil.disk_partitions(): if part.mountpoint == '/': root_type = part.fstype continue if filepath.startswith(part.mountpoint): return part.fstype return root_type
dev/breeze/src/airflow_breeze/utils/run_utils.py
87
airflow
{ "docstring": "\n Determine the type of filesystem used - we might want to use different parameters if tmpfs is used.\n :param filepath: path to check\n :return: type of filesystem\n ", "language": "en", "n_whitespaces": 40, "n_words": 27, "vocab_size": 23 }
35
Python
28
4ffd4f09532fceb67675fce4c1f5cd383eff992e
run_utils.py
46,791
10
49
get_filesystem_type
https://github.com/apache/airflow.git
Prepare Breeze2 for prime time :) (#22713) This is a review and clean-up for all the parameters and commands for Breeze2 in order to prepare it for being used by the contribugors. There are various small fixes here and there, removal of duplicated code, refactoring and moving code around as well as cleanup and review all the parameters used for all implemented commands. The parameters, default values and their behaviours were updated to match "new" life of Breeze rather than old one. Some improvements are made to the autocomplete and click help messages printed. Full list of choices is always displayed, parameters are groups according to their target audience, and they were sorted according to importance and frequency of use. Various messages have been colourised according to their meaning - warnings as yellow, errors as red and informational messages as bright_blue. The `dry-run` option has been added to just show what would have been run without actually running some potentially "write" commands (read commands are still executed) so that you can easily verify and manually copy and execute the commands with option to modify them before. The `dry_run` and `verbose` options are now used for all commands. The "main" command now runs "shell" by default similarly as the original Breeze. All "shortcut" parameters have been standardized - i.e common options (verbose/dry run/help) have one and all common flags that are likely to be used often have an assigned shortcute. The "stop" and "cleanup" command have been added as they are necessary for average user to complete the regular usage cycle. Documentation for all the important methods have been updated.
100
0
8,995
11
3
8
def input_mask(self): inputs = self.input if isinstance(inputs, list): return [getattr(x, "_keras_mask", None) for x in inputs] else: return getattr(inputs, "_keras_mask", None)
keras/engine/base_layer.py
73
keras
{ "docstring": "Retrieves the input mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Input mask tensor (potentially None) or list of input\n mask tensors.\n\n Raises:\n AttributeError: if the layer is connected to\n more than one incoming layers.\n ", "language": "en", "n_whitespaces": 131, "n_words": 52, "vocab_size": 36 }
21
Python
18
84afc5193d38057e2e2badf9c889ea87d80d8fbf
base_layer.py
270,737
6
45
input_mask
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
71
0
80,552
11
1
6
def get_item_details(item_code): return frappe.db.sql( , item_code, as_dict=True, )[0]
erpnext/stock/doctype/serial_no/serial_no.py
37
erpnext
{ "docstring": "select name, has_batch_no, docstatus,\n\t\tis_stock_item, has_serial_no, serial_no_series\n\t\tfrom tabItem where name=%s", "language": "en", "n_whitespaces": 8, "n_words": 11, "vocab_size": 11 }
8
Python
8
494bd9ef78313436f0424b918f200dab8fc7c20b
serial_no.py
67,737
8
24
get_item_details
https://github.com/frappe/erpnext.git
style: format code with black
2
0
14,611
9
6
20
def collect_qtqml_files(self): # No-op if requested Qt-based package is not available. if self.version is None: return [], [] # Not all PyQt5/PySide2 installs have QML files. In this case, location['Qml2ImportsPath'] is empty. # Furthermore, even if location path is provided, the directory itself may not exist. # # https://github.com/pyinstaller/pyinstaller/pull/3229#issuecomment-359735031 # https://github.com/pyinstaller/pyinstaller/issues/3864 # # In Qt 6, Qml2ImportsPath was deprecated in favor of QmlImportsPath. The former is not available in PySide6 # 6.4.0 anymore (but is in PyQt6 6.4.0). Use the new QmlImportsPath if available. if 'QmlImportsPath' in self.location: qml_src_dir = self.location['QmlImportsPath'] else: qml_src_dir = self.location['Qml2ImportsPath'] if not qml_src_dir or not os.path.isdir(qml_src_dir): logger.warning('%s: QML directory %r does not exist. QML files not packaged.', self, qml_src_dir) return [], [] qml_dst_dir = os.path.join(self.qt_rel_dir, 'qml') datas = [(qml_src_dir, qml_dst_dir)] binaries = [ # Produce ``/path/to/Qt/Qml/path_to_qml_binary/qml_binary, PyQt5/Qt/Qml/path_to_qml_binary``. ( qml_plugin_file, os.path.join(qml_dst_dir, os.path.dirname(os.path.relpath(qml_plugin_file, qml_src_dir))) ) for qml_plugin_file in misc.dlls_in_subdirs(qml_src_dir) ] return binaries, datas
PyInstaller/utils/hooks/qt/__init__.py
243
pyinstaller
{ "docstring": "\n Collect additional binaries and data for QtQml module.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
146
Python
99
d789a7daa7712716c89259b987349917a89aece7
__init__.py
264,031
19
144
collect_qtqml_files
https://github.com/pyinstaller/pyinstaller.git
hookutils: reorganize the Qt hook utilities Reorganize the Qt module information to provide information necessary to deal with variations between different python Qt bindings (PySide2, PyQt5, PySide6, and PyQt6). Replace the existing table-like dictionary with list of entries, which is easier to format and document. From this list, we now generate two dictionaries; one that maps Qt module (shared library) names to the module info entries (the same role as the old dictionary), and one that maps python module names to the module info entries. The latter is necessary to accommodate python modules that do not have corresponding Qt shared libraries (header-only Qt modules, such as QtAxContainer; or statically-linked module, such as QSci), but we still need to provide information about plugins or translation files. The new information list is based on manual inspection of source code for Qt 5.15 and 6.3, and should provide comprehensive information about all plugin names and translation file basenames. In addition, most of the helper functions, which take a reference to the `QtLibraryInfo` class as their first argument, have been turned into methods of the `QtLibraryInfo` class. The corresponding hooks have also been adjusted.
397
0
77,570
15
3
8
def pytest_collection_modifyitems(items): for item in items: if inspect.iscoroutinefunction(item.obj): item.add_marker("asyncio") assert not inspect.isasyncgenfunction(item.obj)
IPython/conftest.py
64
ipython
{ "docstring": "This function is automatically run by pytest passing all collected test\n functions.\n\n We use it to add asyncio marker to all async tests and assert we don't use\n test functions that are async generators which wouldn't make sense.\n ", "language": "en", "n_whitespaces": 50, "n_words": 38, "vocab_size": 33 }
12
Python
12
82d1a374575d9785708f144976cf139c76c7acb7
conftest.py
208,416
5
37
pytest_collection_modifyitems
https://github.com/ipython/ipython.git
make sure to run async tests there are some `async def` tests, but they are skipped without `mark("asyncio")`
43
0
52,319
12
1
6
def filter(self, *args, **kwargs): self._not_support_combined_queries("filter") return self._filter_or_exclude(False, args, kwargs)
django/db/models/query.py
48
django
{ "docstring": "\n Return a new QuerySet instance with the args ANDed to the existing\n set.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 12 }
9
Python
9
9c19aff7c7561e3a82978a272ecdaad40dda5c00
query.py
205,772
3
29
filter
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
30
0
51,206
8
2
25
def _output_non_grouped(self) -> None: output_dir = self._args.output_dir os.makedirs(output_dir, exist_ok=True) description = f"{'Copying' if self._args.keep_original else 'Moving'} and renaming" for idx, source in enumerate(tqdm(self._sorter.sorted_filelist, desc=description, file=sys.stdout, leave=False)): dest = os.path.join(output_dir, f"{idx:06d}_{os.path.basename(source)}") self._sort_file(source, dest)
tools/sort/sort.py
165
faceswap
{ "docstring": " Output non-grouped files.\n\n These are files which are sorted but not binned, so just the filename gets updated\n ", "language": "en", "n_whitespaces": 33, "n_words": 18, "vocab_size": 17 }
32
Python
30
98d01760e469fd2108eed8d0b0a1ba6297c3177c
sort.py
101,607
14
81
_output_non_grouped
https://github.com/deepfakes/faceswap.git
Overhaul sort: - Standardize image data reading and writing - Optimize loading (just one pass required) - Make all sort groups binnable (to greater or lesser results) - Add sort by pitch - Deprecate multiple options - linting, docs + locales
212
0
21,015
15
3
4
def insert(self, index, ins_string):
pipenv/patched/notpip/_vendor/pyparsing/results.py
19
""" Inserts new element at location index in the list of parsed tokens. Similar to ``list.insert()``. Example:: numlist = Word(nums)[...] print(numlist.parse_string("0 123 321"))
pipenv
{ "docstring": "\n Inserts new element at location index in the list of parsed tokens.\n\n Similar to ``list.insert()``.\n\n Example::\n\n numlist = Word(nums)[...]\n print(numlist.parse_string(\"0 123 321\")) # -> ['0', '123', '321']\n\n # use a parse action to insert the parse location in the front of the parsed results", "language": "en", "n_whitespaces": 98, "n_words": 44, "vocab_size": 34 }
4
Python
4
f3166e673fe8d40277b804d35d77dcdb760fc3b3
results.py
20,623
7
64
insert
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
11
1
3,460
4
1
5
def mark_scope_as_experimental(): with configure_scope() as scope: scope.set_tag(EXPERIMENT_TAG, True)
src/sentry/utils/sdk.py
39
sentry
{ "docstring": "\n Set the experimental tag on the SDK scope for outgoing crashes and transactions.\n\n Marking the scope will cause these crashes and transaction to be sent to a separate experimental dsn.\n ", "language": "en", "n_whitespaces": 40, "n_words": 30, "vocab_size": 23 }
8
Python
8
9288539aeeac52990705aa6dd0abaebe0d12da21
sdk.py
92,157
3
20
mark_scope_as_experimental
https://github.com/getsentry/sentry.git
poc(sdk): Add experimental dsn for upcoming perf work (#36000) This adds an experimental dsn to the MultiplexingTransport to intentionally send specific flagged events solely to a separate dsn, which will help us avoid troubles with ingesting random errors into our main Sentry project.
21
0
18,879
10
1
10
async def test_handle_mqtt_on_callback(hass, caplog, mqtt_mock, mqtt_client_mock): # Simulate an ACK for mid == 1, this will call mqtt_mock._mqtt_handle_mid(mid) mqtt_client_mock.on_publish(mqtt_client_mock, None, 1) await hass.async_block_till_done() # Make sure the ACK has been received await hass.async_block_till_done() # Now call publish without call back, this will call _wait_for_mid(msg_info.mid) await mqtt.async_publish(hass, "no_callback/test-topic", "test-payload") # Since the mid event was already set, we should not see any timeout await hass.async_block_till_done() assert ( "Transmitting message on no_callback/test-topic: 'test-payload', mid: 1" in caplog.text ) assert "No ACK from MQTT server" not in caplog.text
tests/components/mqtt/test_init.py
117
core
{ "docstring": "Test receiving an ACK callback before waiting for it.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
85
Python
64
845bf80e725af8c921915906b0f796c7a8164d11
test_init.py
292,654
11
66
test_handle_mqtt_on_callback
https://github.com/home-assistant/core.git
Mqtt improve test coverage (#66279) Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
138
0
91,728
9
2
8
def enhanced_hue_supported(self) -> bool: return ( self.color_capabilities is not None and lighting.Color.ColorCapabilities.Enhanced_hue in self.color_capabilities )
homeassistant/components/zha/core/channels/lighting.py
47
core
{ "docstring": "Return True if the channel supports enhanced hue and saturation.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
15
Python
14
df67a8cd4f8df91a153778009a74be1e3876ca53
lighting.py
303,468
6
29
enhanced_hue_supported
https://github.com/home-assistant/core.git
Fix ZHA light color temp support (#76305)
58
0
102,288
11
1
3
def set_pipeline(self, pipeline): self.pipeline = pipeline
src/sentry/pipeline/provider.py
23
sentry
{ "docstring": "\n Used by the pipeline to give the provider access to the executing pipeline.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 10 }
6
Python
6
d246d2b6d3e014270941209e54f2f12e09ad9a81
provider.py
97,870
2
13
set_pipeline
https://github.com/getsentry/sentry.git
ref(py): Split up large file (#32862) Co-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>
20
0
19,496
7
2
25
def _setup_dynamic(self) -> None: display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1) config = self._read_config_template() aci = self._create_ansible_core_ci() response = aci.start() if not self.args.explain: credentials = response['aws']['credentials'] values = dict( ACCESS_KEY=credentials['access_key'], SECRET_KEY=credentials['secret_key'], SECURITY_TOKEN=credentials['session_token'], REGION='us-east-1', ) display.sensitive.add(values['SECRET_KEY']) display.sensitive.add(values['SECURITY_TOKEN']) config = self._populate_config_template(config, values) self._write_config(config)
test/lib/ansible_test/_internal/commands/integration/cloud/aws.py
217
ansible
{ "docstring": "Request AWS credentials through the Ansible Core CI service.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
41
Python
35
3eb0485dd92c88cc92152d3656d94492db44b183
aws.py
267,843
18
128
_setup_dynamic
https://github.com/ansible/ansible.git
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
216
0
79,123
13
1
7
def managed(self): # type: () -> bool return t.cast(bool, self._get_cloud_config(self._MANAGED))
test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py
38
ansible
{ "docstring": "True if resources are managed by ansible-test, otherwise False.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
10
Python
10
a06fa496d3f837cca3c437ab6e9858525633d147
__init__.py
266,742
2
22
managed
https://github.com/ansible/ansible.git
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
25
0
78,553
10
3
22
def test_simple_imputer_constant_keep_empty_features(array_type, keep_empty_features): X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]]) X = _convert_container(X, array_type) fill_value = 10 imputer = SimpleImputer( strategy="constant", fill_value=fill_value, keep_empty_features=keep_empty_features, ) for method in ["fit_transform", "transform"]: X_imputed = getattr(imputer, method)(X) assert X_imputed.shape == X.shape constant_feature = ( X_imputed[:, 0].A if array_type == "sparse" else X_imputed[:, 0] ) assert_array_equal(constant_feature, fill_value) @pytest.mark.parametrize("array_type", ["array", "sparse"]) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) @pytest.mark.parametrize("keep_empty_features", [True, False])
sklearn/impute/tests/test_impute.py
273
@pytest.mark.parametrize("array_type", ["array", "sparse"]) @pytest.mark.parametrize("strategy", ["mean", "median", "most_frequent"]) @pytest.mark.parametrize("keep_empty_features", [True, False])
scikit-learn
{ "docstring": "Check the behaviour of `keep_empty_features` with `strategy='constant'.\n For backward compatibility, a column full of missing values will always be\n fill and never dropped.\n ", "language": "en", "n_whitespaces": 32, "n_words": 23, "vocab_size": 22 }
63
Python
53
d8fa96c29828e3ca79ddd5d7466521ac4d95213c
test_impute.py
261,585
16
125
test_simple_imputer_constant_keep_empty_features
https://github.com/scikit-learn/scikit-learn.git
ENH keep features with all missing values during imputation (#24770) Co-authored-by: Chiara Marmo <cmarmo@users.noreply.github.com> Co-authored-by: Julien Jerphanion <git@jjerphan.xyz> Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com> Co-authored-by: Vitor SRG <vitorssrg@gmail.com> Fixes https://github.com/scikit-learn/scikit-learn/pull/16695 Fixes https://github.com/scikit-learn/scikit-learn/issues/16426 Fixes https://github.com/scikit-learn/scikit-learn/issues/16977
148
1
76,879
13
1
15
def test_copy_with_credential(self): expression = "col1, col2" op = DatabricksCopyIntoOperator( file_location=COPY_FILE_LOCATION, file_format='CSV', table_name='test', task_id=TASK_ID, expression_list=expression, credential={'AZURE_SAS_TOKEN': 'abc'}, ) assert ( op._create_sql_query() == f.strip() )
tests/providers/databricks/operators/test_databricks_sql.py
102
airflow
{ "docstring": "COPY INTO test\nFROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}' WITH (CREDENTIAL (AZURE_SAS_TOKEN = 'abc') ))\nFILEFORMAT = CSV\n", "language": "en", "n_whitespaces": 14, "n_words": 17, "vocab_size": 15 }
23
Python
21
401419432082d222b823e4f2a66f21e5cc3ab28d
test_databricks_sql.py
45,916
17
56
test_copy_with_credential
https://github.com/apache/airflow.git
Add new options to DatabricksCopyIntoOperator (#22076) This includes: * `encryption` - to specify encryption options for a given location * `credential` - to specify authentication options for a given location * `validate` - to control validation of schema & data
145
0
8,743
12
1
11
def _earlystop_notify_tuner(self, data): _logger.debug('Early stop notify tuner data: [%s]', data) data['type'] = MetricType.FINAL data['value'] = dump(data['value']) self.enqueue_command(CommandType.ReportMetricData, data)
nni/runtime/msg_dispatcher.py
80
nni
{ "docstring": "Send last intermediate result as final result to tuner in case the\n trial is early stopped.\n ", "language": "en", "n_whitespaces": 30, "n_words": 16, "vocab_size": 15 }
18
Python
16
98c1a77f61900d486f46d284c49fb65675dbee6a
msg_dispatcher.py
112,868
5
46
_earlystop_notify_tuner
https://github.com/microsoft/nni.git
Support multiple HPO experiments in one process (#4855)
53
0
24,779
10
8
36
def gradient(scalar_field, doit=True): coord_sys = _get_coord_systems(scalar_field) if len(coord_sys) == 0: return Vector.zero elif len(coord_sys) == 1: coord_sys = next(iter(coord_sys)) h1, h2, h3 = coord_sys.lame_coefficients() i, j, k = coord_sys.base_vectors() x, y, z = coord_sys.base_scalars() vx = Derivative(scalar_field, x) / h1 vy = Derivative(scalar_field, y) / h2 vz = Derivative(scalar_field, z) / h3 if doit: return (vx * i + vy * j + vz * k).doit() return vx * i + vy * j + vz * k else: if isinstance(scalar_field, (Add, VectorAdd)): return VectorAdd.fromiter(gradient(i) for i in scalar_field.args) if isinstance(scalar_field, (Mul, VectorMul)): s = _split_mul_args_wrt_coordsys(scalar_field) return VectorAdd.fromiter(scalar_field / i * gradient(i) for i in s) return Gradient(scalar_field)
sympy/vector/operators.py
329
sympy
{ "docstring": "\n Returns the vector gradient of a scalar field computed wrt the\n base scalars of the given coordinate system.\n\n Parameters\n ==========\n\n scalar_field : SymPy Expr\n The scalar field to compute the gradient of\n\n doit : bool\n If True, the result is returned after calling .doit() on\n each component. Else, the returned expression contains\n Derivative instances\n\n Examples\n ========\n\n >>> from sympy.vector import CoordSys3D, gradient\n >>> R = CoordSys3D('R')\n >>> s1 = R.x*R.y*R.z\n >>> gradient(s1)\n R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k\n >>> s2 = 5*R.x**2*R.z\n >>> gradient(s2)\n 10*R.x*R.z*R.i + 5*R.x**2*R.k\n\n ", "language": "en", "n_whitespaces": 166, "n_words": 86, "vocab_size": 64 }
108
Python
60
975df9b627556d176039ba3a0f3a2e3a3df9686c
operators.py
196,453
22
212
gradient
https://github.com/sympy/sympy.git
Fixed removals not fully performed earlier
258
0
47,935
16
3
13
def add_store(source, store, retcode=False, saltenv="base"): source = __salt__["cp.cache_file"](source, saltenv) # Since we're allowing a path, let's make sure it exists if not os.path.exists(source): msg = "cert_file not found: ".format(source) raise CommandExecutionError(msg) cmd = 'certutil.exe -addstore {} "{}"'.format(store, source) if retcode: return __salt__["cmd.retcode"](cmd) else: return __salt__["cmd.run"](cmd)
salt/modules/win_certutil.py
144
salt
{ "docstring": "\n Add the given cert into the given Certificate Store\n\n source (str):\n The source certificate file. This is either the path to a local file or\n a file from the file server in the form of ``salt://path/to/file``\n\n store (str):\n The certificate store to add the certificate to\n\n retcode (bool):\n If ``True``, return the retcode instead of stdout. Default is ``False``\n\n saltenv (str):\n The salt environment to use. This is ignored if the path is local\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' certutil.add_store salt://cert.cer TrustedPublisher\n salt '*' certutil.add_store C:\\\\path\\\\to\\\\local.cer TrustedPublisher\n ", "language": "en", "n_whitespaces": 163, "n_words": 89, "vocab_size": 54 }
45
Python
40
53b3ebc92648c2081c58865713b50a2859ae8310
win_certutil.py
215,877
10
82
add_store
https://github.com/saltstack/salt.git
Fix win_certutil module to handle paths with spaces
94
0
54,222
11
1
4
def base_dir(self) -> str: return self._base_dir
python/ray/data/datasource/partitioning.py
22
ray
{ "docstring": "Gets the original base directory supplied during object construction.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
6
Python
6
9f4cb9b3c9c27ae21bf7807595973231b6814648
partitioning.py
138,432
3
12
base_dir
https://github.com/ray-project/ray.git
[Datasets] Add Path Partitioning Support for All Content Types (#23624) Adds a content-type-agnostic partition parser with support for filtering files. Also adds some corner-case bug fixes and usability improvements for supporting more robust input path types.
20
0
31,421
6
1
13
def test_stylesheet_apply_takes_final_rule_in_specificity_clash(): css = ".a {background: red; color: lime;} .b {background: blue;}" stylesheet = _make_stylesheet(css) node = DOMNode(classes="a b", id="c") stylesheet.apply(node) assert node.styles.color == Color(0, 255, 0) # color: lime assert node.styles.background == Color(0, 0, 255) # background: blue
tests/css/test_stylesheet.py
105
textual
{ "docstring": ".a and .b both contain background and have same specificity, so .b wins\n since it was declared last - the background should be blue.", "language": "en", "n_whitespaces": 26, "n_words": 24, "vocab_size": 21 }
39
Python
31
4dd0d9fae43583638f34257f97d5749ca4f2c00c
test_stylesheet.py
183,837
7
62
test_stylesheet_apply_takes_final_rule_in_specificity_clash
https://github.com/Textualize/textual.git
Add various additional tests around CSS specificity
62
0
44,359
10
1
14
async def test_check_loop_async_integration(caplog): with pytest.raises(RuntimeError), patch( "homeassistant.util.async_.extract_stack", return_value=[ Mock( filename="/home/paulus/homeassistant/core.py", lineno="23", line="do_something()", ), Mock( filename="/home/paulus/homeassistant/components/hue/light.py", lineno="23", line="self.light.is_on", ), Mock( filename="/home/paulus/aiohue/lights.py", lineno="2", line="something()", ), ], ): hasync.check_loop() assert ( "Detected blocking call inside the event loop. This is causing stability issues. " "Please report issue for hue doing blocking calls at " "homeassistant/components/hue/light.py, line 23: self.light.is_on" in caplog.text )
tests/util/test_async.py
158
core
{ "docstring": "Test check_loop detects and raises when called from event loop from integration context.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
58
Python
51
dc58bc375ae203e3d394225f9c3a5a14d43cb2f3
test_async.py
309,258
28
88
test_check_loop_async_integration
https://github.com/home-assistant/core.git
Warn on`time.sleep` in event loop (#63766) Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
330
0
107,965
15
1
56
def test_callback(self): # ensure that we are correctly testing the fallback when "get_extra_attributes" # is not implemented. mapping_provider = self.provider._user_mapping_provider with self.assertRaises(AttributeError): _ = mapping_provider.get_extra_attributes token = { "type": "bearer", "id_token": "id_token", "access_token": "access_token", } username = "bar" userinfo = { "sub": "foo", "username": username, } expected_user_id = "@%s:%s" % (username, self.hs.hostname) self.provider._exchange_code = simple_async_mock(return_value=token) self.provider._parse_id_token = simple_async_mock(return_value=userinfo) self.provider._fetch_userinfo = simple_async_mock(return_value=userinfo) auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() code = "code" state = "state" nonce = "nonce" client_redirect_url = "http://client/redirect" ip_address = "10.0.0.1" session = self._generate_oidc_session_token(state, nonce, client_redirect_url) request = _build_callback_request(code, state, session, ip_address=ip_address) self.get_success(self.handler.handle_oidc_callback(request)) auth_handler.complete_sso_login.assert_called_once_with( expected_user_id, "oidc", request, client_redirect_url, None, new_user=True, auth_provider_session_id=None, ) self.provider._exchange_code.assert_called_once_with(code) self.provider._parse_id_token.assert_called_once_with(token, nonce=nonce) self.provider._fetch_userinfo.assert_not_called() self.render_error.assert_not_called() # Handle mapping errors with patch.object( self.provider, "_remote_id_from_userinfo", new=Mock(side_effect=MappingException()), ): self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("mapping_error") # Handle ID token errors self.provider._parse_id_token = simple_async_mock(raises=Exception()) self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_token") auth_handler.complete_sso_login.reset_mock() self.provider._exchange_code.reset_mock() self.provider._parse_id_token.reset_mock() self.provider._fetch_userinfo.reset_mock() # With userinfo fetching self.provider._user_profile_method = "userinfo_endpoint" token = { "type": "bearer", "access_token": "access_token", } self.provider._exchange_code = simple_async_mock(return_value=token) self.get_success(self.handler.handle_oidc_callback(request)) auth_handler.complete_sso_login.assert_called_once_with( expected_user_id, "oidc", request, client_redirect_url, None, new_user=False, auth_provider_session_id=None, ) self.provider._exchange_code.assert_called_once_with(code) self.provider._parse_id_token.assert_not_called() self.provider._fetch_userinfo.assert_called_once_with(token) self.render_error.assert_not_called() # With an ID token, userinfo fetching and sid in the ID token self.provider._user_profile_method = "userinfo_endpoint" token = { "type": "bearer", "access_token": "access_token", "id_token": "id_token", } id_token = { "sid": "abcdefgh", } self.provider._parse_id_token = simple_async_mock(return_value=id_token) self.provider._exchange_code = simple_async_mock(return_value=token) auth_handler.complete_sso_login.reset_mock() self.provider._fetch_userinfo.reset_mock() self.get_success(self.handler.handle_oidc_callback(request)) auth_handler.complete_sso_login.assert_called_once_with( expected_user_id, "oidc", request, client_redirect_url, None, new_user=False, auth_provider_session_id=id_token["sid"], ) self.provider._exchange_code.assert_called_once_with(code) self.provider._parse_id_token.assert_called_once_with(token, nonce=nonce) self.provider._fetch_userinfo.assert_called_once_with(token) self.render_error.assert_not_called() # Handle userinfo fetching error self.provider._fetch_userinfo = simple_async_mock(raises=Exception()) self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("fetch_error") # Handle code exchange failure from synapse.handlers.oidc import OidcError self.provider._exchange_code = simple_async_mock( raises=OidcError("invalid_request") ) self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request")
tests/handlers/test_oidc.py
1,151
synapse
{ "docstring": "Code callback works and display errors if something went wrong.\n\n A lot of scenarios are tested here:\n - when the callback works, with userinfo from ID token\n - when the user mapping fails\n - when ID token verification fails\n - when the callback works, with userinfo fetched from the userinfo endpoint\n - when the userinfo fetching fails\n - when the code exchange fails\n ", "language": "en", "n_whitespaces": 125, "n_words": 63, "vocab_size": 35 }
247
Python
131
a121507cfec0ffce45a89f5a1019034eda5b0c70
test_oidc.py
246,326
111
684
test_callback
https://github.com/matrix-org/synapse.git
Adds misc missing type hints (#11953)
1,236
0
71,155
15
1
6
def bicubic_degradation(x, sf=3): x = util.imresize_np(x, scale=1 / sf) return x
ldm/modules/image_degradation/bsrgan.py
42
stablediffusion
{ "docstring": "\n Args:\n x: HxWxC image, [0, 1]\n sf: down-scale factor\n Return:\n bicubicly downsampled LR image\n ", "language": "en", "n_whitespaces": 45, "n_words": 14, "vocab_size": 14 }
11
Python
10
ca86da3a30c4e080d4db8c25fca73de843663cb4
bsrgan.py
157,491
3
26
bicubic_degradation
https://github.com/Stability-AI/stablediffusion.git
release more models
20
0
36,943
10
2
46
def plot(self, ax, box_kws, flier_kws, line_kws): self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws) self.annotate_axes(ax) if self.orient == "h": ax.invert_yaxis() _categorical_docs = dict( # Shared narrative docs categorical_narrative=dedent(), new_categorical_narrative=dedent(), # Shared function parameters input_params=dedent(), string_input_params=dedent(), categorical_data=dedent(), long_form_data=dedent(), order_vars=dedent(), stat_api_params=dedent(), orient=dedent(), color=dedent(), palette=dedent(), hue_norm=dedent(), saturation=dedent(), capsize=dedent(), errwidth=dedent(), width=dedent(), dodge=dedent(), linewidth=dedent(), native_scale=dedent(), formatter=dedent(), legend=dedent(), ax_in=dedent(), ax_out=dedent(), # Shared see also boxplot=dedent(), violinplot=dedent(), stripplot=dedent(), swarmplot=dedent(), barplot=dedent(), countplot=dedent(), pointplot=dedent(), catplot=dedent(), boxenplot=dedent(), ) _categorical_docs.update(_facet_docs)
seaborn/categorical.py
537
seaborn
{ "docstring": "Make the plot.\\\n .. note::\n This function always treats one of the variables as categorical and\n draws data at ordinal positions (0, 1, ... n) on the relevant axis,\n even when the data has a numeric or date type.\n\n See the :ref:`tutorial <categorical_tutorial>` for more information.\\\n \\\n .. note::\n By default, this function treats one of the variables as categorical\n and draws data at ordinal positions (0, 1, ... n) on the relevant axis.\n This can be disabled with the `native_scale` parameter.\n\n See the :ref:`tutorial <categorical_tutorial>` for more information.\\\n \\\n x, y, hue : names of variables in ``data`` or vector data, optional\n Inputs for plotting long-form data. See examples for interpretation.\\\n \\\n x, y, hue : names of variables in ``data``\n Inputs for plotting long-form data. See examples for interpretation.\\\n \\\n data : DataFrame, array, or list of arrays, optional\n Dataset for plotting. If ``x`` and ``y`` are absent, this is\n interpreted as wide-form. Otherwise it is expected to be long-form.\\\n \\\n data : DataFrame\n Long-form (tidy) dataset for plotting. Each column should correspond\n to a variable, and each row should correspond to an observation.\\\n \\\n order, hue_order : lists of strings, optional\n Order to plot the categorical levels in; otherwise the levels are\n inferred from the data objects.\\\n \\\n estimator : string or callable that maps vector -> scalar, optional\n Statistical function to estimate within each categorical bin.\n errorbar : string, (string, number) tuple, or callable\n Name of errorbar method (either \"ci\", \"pi\", \"se\", or \"sd\"), or a tuple\n with a method name and a level parameter, or a function that maps from a\n vector to a (min, max) interval.\n n_boot : int, optional\n Number of bootstrap samples used to compute confidence intervals.\n units : name of variable in ``data`` or vector data, optional\n Identifier of sampling units, which will be used to perform a\n multilevel bootstrap and account for repeated measures design.\n seed : int, numpy.random.Generator, or numpy.random.RandomState, optional\n Seed or random number generator for reproducible bootstrapping.\\\n \\\n orient : \"v\" | \"h\", optional\n Orientation of the plot (vertical or horizontal). This is usually\n inferred based on the type of the input variables, but it can be used\n to resolve ambiguity when both `x` and `y` are numeric or when\n plotting wide-form data.\\\n \\\n color : matplotlib color, optional\n Single color for the elements in the plot.\\\n \\\n palette : palette name, list, or dict, optional\n Color palette that maps the hue variable. If the palette is a dictionary,\n keys should be names of levels and values should be matplotlib colors.\\\n \\\n hue_norm : tuple or :class:`matplotlib.colors.Normalize` object\n Normalization in data units for colormap applied to the `hue`\n variable when it is numeric. Not relevant if `hue` is categorical.\\\n \\\n saturation : float, optional\n Proportion of the original saturation to draw colors at. Large patches\n often look better with slightly desaturated colors, but set this to\n `1` if you want the plot colors to perfectly match the input color.\\\n \\\n capsize : float, optional\n Width of the \"caps\" on error bars./\n \\\n errwidth : float, optional\n Thickness of error bar lines (and caps).\\\n \\\n width : float, optional\n Width of a full element when not using hue nesting, or width of all the\n elements for one level of the major grouping variable.\\\n \\\n dodge : bool, optional\n When hue nesting is used, whether elements should be shifted along the\n categorical axis.\\\n \\\n linewidth : float, optional\n Width of the gray lines that frame the plot elements.\\\n \\\n native_scale : bool, optional\n When True, numeric or datetime values on the categorical axis will maintain\n their original scaling rather than being converted to fixed indices.\\\n \\\n formatter : callable, optional\n Function for converting categorical data into strings. Affects both grouping\n and tick labels.\\\n \\\nlegend : \"auto\", \"brief\", \"full\", or False\n How to draw the legend. If \"brief\", numeric `hue` and `size`\n variables will be represented with a sample of evenly spaced values.\n If \"full\", every group will get an entry in the legend. If \"auto\",\n choose between brief or full representation based on number of levels.\n If `False`, no legend data is added and no legend is drawn.\n \\\n ax : matplotlib Axes, optional\n Axes object to draw the plot onto, otherwise uses the current Axes.\\\n \\\n ax : matplotlib Axes\n Returns the Axes object with the plot drawn onto it.\\\n \\\n boxplot : A traditional box-and-whisker plot with a similar API.\\\n \\\n violinplot : A combination of boxplot and kernel density estimation.\\\n \\\n stripplot : A scatterplot where one variable is categorical. Can be used\n in conjunction with other plots to show each observation.\\\n \\\n swarmplot : A categorical scatterplot where the points do not overlap. Can\n be used with other plots to show each observation.\\\n \\\n barplot : Show point estimates and confidence intervals using bars.\\\n \\\n countplot : Show the counts of observations in each categorical bin.\\\n \\\n pointplot : Show point estimates and confidence intervals using scatterplot\n glyphs.\\\n \\\n catplot : Combine a categorical plot with a :class:`FacetGrid`.\\\n \\\n boxenplot : An enhanced boxplot for larger datasets.\\\n ", "language": "en", "n_whitespaces": 1442, "n_words": 843, "vocab_size": 378 }
65
Python
59
b1db0f72627e9fae8fda261514392d53906384cf
categorical.py
42,167
5
44
plot
https://github.com/mwaskom/seaborn.git
Cleanup and merge #2909 (#2955) * Sorting boxenplot * Boxenplot separate kws Removed `kwargs` which were used to draw the median lines and scatter plot of outliers previously. Added separate kwargs - `box_kws`, `line_kws` (drawing the median lines) and `flier_kws` (for the scatter of outliers). Updated the matching docstring. * In the previous commit most code on the categorical.py file was auto-reformatted. Here it is reverted and only the changes to `seaborn.categorical.boxenplot` and `seaborn.categorical._LVPlotter` are kept. * Reinserted blank lines in docstring. * - Removed redundant indention in `boxenplot` function - Removed commented out code in the `plot` function * Removed default kwargs from `plot` * Removing commented out code * Reverted to ternary expressions * Replaced default kwargs assignment to box_kws Disentangled the nested for loop for default kwargs assignment * Removed remaining `kwargs` item in docstring * Resolved incorrect reference in the box_kws item on the docstring. * Resolved incorrect descriptions for box_kws, line_kws and flier_kws. * Changed line_kws update to source arguments frmo box_kws if there is only a single data point. * Added line_kws test * Added flier_kws test, renamed line_kws test * Tests - further work is required in expanding the tests. Two current issues (a) most are not testing when multiple categories are used on the x-axis, but only a single one. (b) the tests for the box_kws functionality are very slim. * Fix lint issues * Fix pinned tests * Update release notes * Cleanup boxenplot colors test Co-authored-by: EitanHemed <37670372+EitanHemed@users.noreply.github.com>
206
0
7,489
10
1
9
def get_all_transfers(date, shareholder): condition = " " # if company: # condition = 'AND company = %(company)s ' return frappe.db.sql( .format( condition=condition ), {"date": date, "shareholder": shareholder}, as_dict=1, )
erpnext/accounts/report/share_ledger/share_ledger.py
70
erpnext
{ "docstring": "SELECT * FROM `tabShare Transfer`\n\t\tWHERE (DATE(date) <= %(date)s AND from_shareholder = %(shareholder)s {condition})\n\t\tOR (DATE(date) <= %(date)s AND to_shareholder = %(shareholder)s {condition})\n\t\tORDER BY date", "language": "en", "n_whitespaces": 22, "n_words": 26, "vocab_size": 19 }
29
Python
24
494bd9ef78313436f0424b918f200dab8fc7c20b
share_ledger.py
65,350
12
41
get_all_transfers
https://github.com/frappe/erpnext.git
style: format code with black
18
0
13,868
10
3
11
def sqeuclidean_row_norms(X, num_threads): if X.dtype == np.float64: return np.asarray(_sqeuclidean_row_norms64(X, num_threads)) if X.dtype == np.float32: return np.asarray(_sqeuclidean_row_norms32(X, num_threads)) raise ValueError( "Only float64 or float32 datasets are supported at this time, " f"got: X.dtype={X.dtype}." )
sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py
100
scikit-learn
{ "docstring": "Compute the squared euclidean norm of the rows of X in parallel.\n\n Parameters\n ----------\n X : ndarray or CSR matrix of shape (n_samples, n_features)\n Input data. Must be c-contiguous.\n\n num_threads : int\n The number of OpenMP threads to use.\n\n Returns\n -------\n sqeuclidean_row_norms : ndarray of shape (n_samples,)\n Arrays containing the squared euclidean norm of each row of X.\n ", "language": "en", "n_whitespaces": 103, "n_words": 58, "vocab_size": 42 }
33
Python
28
239e16319116ab7445c0557bb08783ab2d60673d
_dispatcher.py
261,541
9
57
sqeuclidean_row_norms
https://github.com/scikit-learn/scikit-learn.git
MAINT Introduce `MiddleTermComputer`, an abstraction generalizing `GEMMTermComputer` (#24807) Co-authored-by: Julien Jerphanion <git@jjerphan.xyz> Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org>
76
0
76,852
11
1
3
def test_groupby_extension_agg(self): super().test_groupby_extension_agg()
pandas/tests/extension/json/test_json.py
26
pandas
{ "docstring": "\n This fails when we get to tm.assert_series_equal when left.index\n contains dictionaries, which are not hashable.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
3
Python
3
24652cf178c12562585639cba39c46d62b95f107
test_json.py
165,739
2
13
test_groupby_extension_agg
https://github.com/pandas-dev/pandas.git
TST: Convert skip -> xfail (#46427)
17
0
39,701
9
1
20
def test_login_deactivate_user_error(self) -> None: user_profile = self.example_user("hamlet") realm = user_profile.realm self.assertTrue(email_auth_enabled(realm)) url = f"{realm.uri}/login/?" + urlencode({"is_deactivated": user_profile.delivery_email}) result = self.client_get(url) self.assertEqual(result.status_code, 200) self.assert_in_response( f"Your account {user_profile.delivery_email} has been deactivated.", result ) auth_dict = realm.authentication_methods_dict() auth_dict["Email"] = False do_set_realm_authentication_methods(realm, auth_dict, acting_user=None) result = self.client_get(url) self.assertEqual(result.status_code, 200) self.assert_in_response( f"Your account {user_profile.delivery_email} has been deactivated.", result )
zerver/tests/test_signup.py
222
zulip
{ "docstring": "\n This is meant to test whether the error message signaled by the\n is_deactivated is shown independently of whether the Email\n backend is enabled.\n ", "language": "en", "n_whitespaces": 52, "n_words": 23, "vocab_size": 18 }
54
Python
34
61de767967bb6af077165075260eda3293f95970
test_signup.py
84,965
23
122
test_login_deactivate_user_error
https://github.com/zulip/zulip.git
login page: Show form-independent errors even if email auth disabled. These used to only be shown conditional on the {% if password_auth_enabled %} in the template. Meaning that if you had an org with email auth disabled and a deactivated user tried to log in, they wouldn't see the error shown and get confused. This switches the position of where these error will be shown (above the login+password form instead of below it), but it looks fine.
188
0
17,898
12
1
4
def location_name(self) -> str | None: return self._location_name
homeassistant/components/mqtt/device_tracker/schema_discovery.py
25
core
{ "docstring": "Return a location name for the current location of the device.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 9 }
8
Python
8
bcae6d604e2967c7475f0caa4b1b5e4e76ab88bf
schema_discovery.py
289,964
3
14
location_name
https://github.com/home-assistant/core.git
Improve MQTT type hints part 8 (#81034) * Improve typing device_tracker discovery * Improve typing device_tracker yaml * Add test source_type attribute * Follow up comment * Initialize at `__init__` not at class level. * Use full name for return variable * Correct import, remove assert * Use AsyncSeeCallback
22
0
89,090
6
1
3
def __await__(self): return self.__run().__await__()
gradio/utils.py
30
gradio
{ "docstring": "\n Wrap Request's __await__ magic function to create request calls which are executed in one line.\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 15 }
4
Python
4
51c8c34486bfddca5948e46e498de44e21ab6496
utils.py
180,422
2
16
__await__
https://github.com/gradio-app/gradio.git
Async Request Class (#1595) * Implement Request class and its tests. * Add new requirements * Reformat codebase. * Fix formatting. * Add library level requirements. * Convert validated_data property to get_validated_data function. * Fix the client fixture. * Update test/test_utils.py * Update test/test_utils.py * Fix the client fixture. * Add missing initialization for Request._validated_data * Fix async test problem with test_tunneling.py * Update gradio/utils.py * Update gradio/utils.py * Fix formatting. Co-authored-by: Ömer Faruk Özdemir <farukozderim@gmail.com>
18
0
43,164
9
1
10
async def enter_async_context(self, cm): _cm_type = type(cm) _exit = _cm_type.__aexit__ result = await _cm_type.__aenter__(cm) self._push_async_cm_exit(cm, _exit) return result
python3.10.4/Lib/contextlib.py
64
XX-Net
{ "docstring": "Enters the supplied async context manager.\n\n If successful, also pushes its __aexit__ method as a callback and\n returns the result of the __aenter__ method.\n ", "language": "en", "n_whitespaces": 45, "n_words": 24, "vocab_size": 22 }
18
Python
15
8198943edd73a363c266633e1aa5b2a9e9c9f526
contextlib.py
221,717
6
38
enter_async_context
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
60
0
56,485
9
1
5
def strip_to_restore(self, text): text, puncs = self._strip_to_restore(text) return text, puncs
TTS/tts/utils/text/punctuation.py
37
TTS
{ "docstring": "Remove punctuations from text to restore them later.\n\n Args:\n text (str): The text to be processed.\n\n Examples ::\n\n \"This is. example !\" -> [[\"This is\", \"example\"], [\".\", \"!\"]]\n\n ", "language": "en", "n_whitespaces": 71, "n_words": 28, "vocab_size": 25 }
10
Python
8
8d85af84cd5f1748f979fddcbc4aab1449f61ecb
punctuation.py
261,975
3
22
strip_to_restore
https://github.com/coqui-ai/TTS.git
Implement Punctuation class
31
0
77,090
8
8
30
def batch_encode_candidates(self, text, **kwargs): r # Always using a fixed sequence length to encode in order to stack candidates into a batch. kwargs["padding"] = PaddingStrategy.MAX_LENGTH batch_text = text batch_text_pair = kwargs.pop("text_pair", None) return_tensors = kwargs.pop("return_tensors", None) output_data = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(batch_text): if batch_text_pair is not None: candidate_text_pair = batch_text_pair[idx] else: candidate_text_pair = None encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs) encoded_input_ids = encoded_candidates.get("input_ids") encoded_attention_mask = encoded_candidates.get("attention_mask") encoded_token_type_ids = encoded_candidates.get("token_type_ids") if encoded_input_ids is not None: output_data["input_ids"].append(encoded_input_ids) if encoded_attention_mask is not None: output_data["attention_mask"].append(encoded_attention_mask) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(encoded_token_type_ids) output_data = dict((key, item) for key, item in output_data.items() if len(item) != 0) return BatchEncoding(output_data, tensor_type=return_tensors)
src/transformers/models/realm/tokenization_realm.py
351
transformers
{ "docstring": "\n Encode a batch of text or text pair. This method is similar to regular __call__ method but has the following\n differences:\n\n 1. Handle additional num_candidate axis. (batch_size, num_candidates, text)\n 2. Always pad the sequences to *max_length*.\n 3. Must specify *max_length* in order to stack packs of candidates into a batch.\n\n - single sequence: `[CLS] X [SEP]`\n - pair of sequences: `[CLS] A [SEP] B [SEP]`\n\n Args:\n text (`List[List[str]]`):\n The batch of sequences to be encoded. Each sequence must be in this format: (batch_size,\n num_candidates, text).\n text_pair (`List[List[str]]`, *optional*):\n The batch of sequences to be encoded. Each sequence must be in this format: (batch_size,\n num_candidates, text).\n **kwargs:\n Keyword arguments of the __call__ method.\n\n Returns:\n [`BatchEncoding`]: Encoded text or text pair.\n\n Example:\n\n ```python\n >>> from transformers import RealmTokenizer\n\n >>> # batch_size = 2, num_candidates = 2\n >>> text = [[\"Hello world!\", \"Nice to meet you!\"], [\"The cute cat.\", \"The adorable dog.\"]]\n\n >>> tokenizer = RealmTokenizer.from_pretrained(\"qqaatw/realm-cc-news-pretrained-encoder\")\n >>> tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors=\"pt\")\n ```", "language": "en", "n_whitespaces": 418, "n_words": 160, "vocab_size": 106 }
112
Python
74
22454ae492eca4bb749fa6d770dffc91d17dab87
tokenization_realm.py
34,115
62
213
batch_encode_candidates
https://github.com/huggingface/transformers.git
Add REALM (#13292) * REALM initial commit * Retriever OK (Update new_gelu). * Encoder prediction score OK * Encoder pretrained model OK * Update retriever comments * Update docs, tests, and imports * Prune unused models * Make embedder as a module `RealmEmbedder` * Add RealmRetrieverOutput * Update tokenization * Pass all tests in test_modeling_realm.py * Prune RealmModel * Update docs * Add training test. * Remove completed TODO * Style & Quality * Prune `RealmModel` * Fixup * Changes: 1. Remove RealmTokenizerFast 2. Update docstrings 3. Add a method to RealmTokenizer to handle candidates tokenization. * Fix up * Style * Add tokenization tests * Update `from_pretrained` tests * Apply suggestions * Style & Quality * Copy BERT model * Fix comment to avoid docstring copying * Make RealmBertModel private * Fix bug * Style * Basic QA * Save * Complete reader logits * Add searcher * Complete searcher & reader * Move block records init to constructor * Fix training bug * Add some outputs to RealmReader * Add finetuned checkpoint variable names parsing * Fix bug * Update REALM config * Add RealmForOpenQA * Update convert_tfrecord logits * Fix bugs * Complete imports * Update docs * Update naming * Add brute-force searcher * Pass realm model tests * Style * Exclude RealmReader from common tests * Fix * Fix * convert docs * up * up * more make style * up * upload * up * Fix * Update src/transformers/__init__.py * adapt testing * change modeling code * fix test * up * up * up * correct more * make retriever work * update * make style * finish main structure * Resolve merge conflict * Make everything work * Style * Fixup * Fixup * Update training test * fix retriever * remove hardcoded path * Fix * Fix modeling test * Update model links * Initial retrieval test * Fix modeling test * Complete retrieval tests * Fix * style * Fix tests * Fix docstring example * Minor fix of retrieval test * Update license headers and docs * Apply suggestions from code review * Style * Apply suggestions from code review * Add an example to RealmEmbedder * Fix Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
395
0
6,202
13
5
12
def weight_intercept(self, coef): if not self.base_loss.is_multiclass: if self.fit_intercept: intercept = coef[-1] weights = coef[:-1] else: intercept = 0.0 weights = coef else: # reshape to (n_classes, n_dof) if coef.ndim == 1: weights = coef.reshape((self.base_loss.n_classes, -1), order="F") else: weights = coef if self.fit_intercept: intercept = weights[:, -1] weights = weights[:, :-1] else: intercept = 0.0 return weights, intercept
sklearn/linear_model/_linear_loss.py
185
scikit-learn
{ "docstring": "Helper function to get coefficients and intercept.\n\n Parameters\n ----------\n coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)\n Coefficients of a linear model.\n If shape (n_classes * n_dof,), the classes of one feature are contiguous,\n i.e. one reconstructs the 2d-array via\n coef.reshape((n_classes, -1), order=\"F\").\n\n Returns\n -------\n weights : ndarray of shape (n_features,) or (n_classes, n_features)\n Coefficients without intercept term.\n intercept : float or ndarray of shape (n_classes,)\n Intercept terms.\n ", "language": "en", "n_whitespaces": 194, "n_words": 72, "vocab_size": 52 }
57
Python
31
ff9344f3d8d11d38fa3a2497199113e5bac9537c
_linear_loss.py
261,401
19
116
weight_intercept
https://github.com/scikit-learn/scikit-learn.git
FEA add (single) Cholesky Newton solver to GLMs (#24637) * FEA add NewtonSolver, CholeskyNewtonSolver and QRCholeskyNewtonSolver * ENH better singular hessian special solve * CLN fix some typos found by reviewer * TST assert ConvergenceWarning is raised * MNT add BaseCholeskyNewtonSolver * WIP colinear design in GLMs * FIX _solve_singular * FIX false unpacking in * TST add tests for unpenalized GLMs * TST fix solutions of glm_dataset * ENH add SVDFallbackSolver * CLN remove SVDFallbackSolver * ENH use gradient step for singular hessians * ENH print iteration number in warnings * TST improve test_linalg_warning_with_newton_solver * CLN LinAlgWarning fron scipy.linalg * ENH more robust hessian * ENH increase maxls for lbfgs to make it more robust * ENH add hessian_warning for too many negative hessian values * CLN some warning messages * ENH add lbfgs_step * ENH use lbfgs_step for hessian_warning * TST make them pass * TST tweek rtol for lbfgs * TST add rigoros test for GLMs * TST improve test_warm_start * ENH improve lbfgs options for better convergence * CLN fix test_warm_start * TST fix assert singular values in datasets * CLN address most review comments * ENH enable more vebosity levels for lbfgs * DOC add whatsnew * CLN remove xfail and clean a bit * CLN docstring about minimum norm * More informative repr for the glm_dataset fixture cases * Forgot to run black * CLN remove unnecessary filterwarnings * CLN address review comments * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * CLN add comment for lbfgs ftol=64 * machine precision * CLN XXX code comment * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * CLN link issue and remove code snippet in comment * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * CLN add catch_warnings * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * [all random seeds] test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * Trigger with -Werror [all random seeds] test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * ENH increase maxls to 50 * [all random seeds] test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * Revert "Trigger with -Werror [all random seeds]" This reverts commit 99f4cf99ca41b4ad2bdad537ad60f936970e3a88. * TST add catch_warnings to filterwarnings * TST adapt tests for newton solvers * CLN cleaner gradient step with gradient_times_newton * DOC add whatsnew * ENH always use lbfgs as fallback * TST adapt rtol * TST fix test_linalg_warning_with_newton_solver * CLN address some review comments * Improve tests related to convergence warning on collinear data * overfit -> fit * Typo in comment * Apply suggestions from code review * ENH fallback_lbfgs_solve - Do not use lbfgs steps, fall back complete to lbfgs * ENH adapt rtol * Improve test_linalg_warning_with_newton_solver * Better comments * Fixed Hessian casing and improved warning messages * [all random seeds] test_linalg_warning_with_newton_solver * Ignore ConvergenceWarnings for now if convergence is good * CLN remove counting of warnings * ENH fall back to lbfgs if line search did not converge * DOC better comment on performance bottleneck * Update GLM related examples to use the new solver * CLN address reviewer comments * EXA improve some wordings * CLN do not pop "solver in parameter constraints * CLN fix typos * DOC fix docstring * CLN remove solver newton-qr-cholesky * DOC update PR number in whatsnew * CLN address review comments * CLN remove unnecessary catch_warnings * CLN address some review comments * DOC more precise whatsnew * CLN use init_zero_coef * CLN use and test init_zero_coef * CLN address some review comments * CLN mark NewtonSolver as private by leading underscore * CLN exact comments for inner_solve * TST add test_newton_solver_verbosity * TST extend test_newton_solver_verbosity * TST logic in test_glm_regression_unpenalized * TST use count_nonzero * CLN remove super rare line search checks * MNT move Newton solver to new file _newton_solver.py Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Julien Jerphanion <git@jjerphan.xyz>
297
0
76,808
16
3
27
def _run_executable(self, prog, args, run_from_path, runtime): # Run the test in a clean environment to make sure they're really self-contained. prog_env = copy.deepcopy(os.environ) prog_env['PATH'] = '' del prog_env['PATH'] # For Windows we need to keep minimal PATH for successful running of some tests. if is_win: # Minimum Windows PATH is in most cases: C:\Windows\system32;C:\Windows prog_env['PATH'] = os.pathsep.join(winutils.get_system_path()) exe_path = prog if run_from_path: # Run executable in the temp directory. Add the directory containing the executable to $PATH. Basically, # pretend we are a shell executing the program from $PATH. prog_cwd = str(self._tmpdir) prog_name = os.path.basename(prog) prog_env['PATH'] = os.pathsep.join([prog_env.get('PATH', ''), os.path.dirname(prog)]) else: # Run executable in the directory where it is. prog_cwd = os.path.dirname(prog) # The executable will be called with argv[0] as relative not absolute path. prog_name = os.path.join(os.curdir, os.path.basename(prog)) args = [prog_name] + args # Using sys.stdout/sys.stderr for subprocess fixes printing messages in Windows command prompt. Py.test is then # able to collect stdout/sterr messages and display them if a test fails. return self._run_executable_(args, exe_path, prog_env, prog_cwd, runtime)
PyInstaller/utils/conftest.py
275
pyinstaller
{ "docstring": "\n Run executable created by PyInstaller.\n\n :param args: CLI options to pass to the created executable.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 13 }
169
Python
115
be94db4587c16f3f6498d64d563c7ce740e11863
conftest.py
263,985
16
165
_run_executable
https://github.com/pyinstaller/pyinstaller.git
tests: do not attempt to re-run executable after a failed run Remove the re-runs of a failed executable. This functionality was originally intended for executables whose execution timed out (when running in "regular" mode, where we expect the program to exit). However, it ended up subtly broken ever since cf9dfec8018c96511f8c8caffc2b2e965350f2f9 changed the program exit code for unhandled exceptions from -1 to 1 to match the behavior of python interpreter. Consequently, every time that a test executable fails due to unhandled exception, it is re-run again. This unnecessarily prolongs the test time (especially for onefile executables) and bloats/obfuscates the captured test output (since we end up with bootloader log of two runs instead of just one). Remove this built-in re-run functionality altogether, because re-runs usually mask some other issue that should be fixed.
390
0
77,541
14
7
14
def unscale_gradients(self, optimizer=None): if self.state.use_fp16 and self.native_amp: if optimizer is None: # TODO: this unscales all optimizers where we should only unscale the one where parameters are. optimizer = self._optimizers elif not isinstance(optimizer, (tuple, list)): optimizer = [optimizer] for opt in optimizer: while isinstance(opt, AcceleratedOptimizer): opt = opt.optimizer self.scaler.unscale_(opt)
src/accelerate/accelerator.py
119
accelerate
{ "docstring": "\n Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.\n\n Args:\n optimizer (`torch.optim.Optimizer` or `List[torch.optim.Optimizer]`, *optional*):\n The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers\n that were passed to [`~Accelerator.prepare`].\n ", "language": "en", "n_whitespaces": 107, "n_words": 44, "vocab_size": 39 }
49
Python
42
fb5ed62c102c0323486b89805e1888495de3db15
accelerator.py
337,281
10
75
unscale_gradients
https://github.com/huggingface/accelerate.git
Convert documentation to the new front (#271) * Main conversion * Doc styling * Style * New front deploy * Fixes * Fixes * Fix new docstrings * Style
190
0
120,995
13
1
32
def test_coin_api_load_df_for_ta(get_bitcoin, mocker): mock_load = mocker.patch( base + "due_diligence.pycoingecko_model.CoinGeckoAPI.get_coin_market_chart_by_id" ) _, symbol = get_bitcoin coin_map_df = prepare_all_coins_df().set_index("Symbol").loc[symbol.upper()].iloc[0] with open( "tests/openbb_terminal/cryptocurrency/json/test_cryptocurrency_helpers/btc_usd_test_data.json", encoding="utf8", ) as f: sample_return = json.load(f) mock_load.return_value = sample_return mock_return, vs = load_ta_data( coin_map_df=coin_map_df, source="cg", currency="usd", days=30, ) assert mock_return.shape == (31, 4) assert vs == "usd" @pytest.mark.record_stdout @pytest.mark.vcr
tests/openbb_terminal/cryptocurrency/test_cryptocurrency_helpers.py
205
@pytest.mark.record_stdout @pytest.mark.vcr
OpenBBTerminal
{ "docstring": "\n Mock load function through get_coin_market_chart_by_id.\n Mock returns a dict saved as .json\n ", "language": "en", "n_whitespaces": 22, "n_words": 12, "vocab_size": 11 }
50
Python
39
9068ad01249c1e1adaca3ef9a704d70da7e3a17b
test_cryptocurrency_helpers.py
284,452
21
110
test_coin_api_load_df_for_ta
https://github.com/OpenBB-finance/OpenBBTerminal.git
Refactored Crypto Tests (#1743) * Refactored tests * Removed unused command * Added tests * Tests : remove cassettes files + add fixture * Black * Tests : skip tests Co-authored-by: didierlopes.eth <dro.lopes@campus.fct.unl.pt> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
147
1
84,723
14
1
7
def choose_parent_view(self, request): kwargs = {"model_admin": self} view_class = self.choose_parent_view_class return view_class.as_view(**kwargs)(request)
wagtail/contrib/modeladmin/options.py
54
wagtail
{ "docstring": "\n Instantiates a class-based view to allows a parent page to be chosen\n for a new object, where the assigned model extends Wagtail's Page\n model, and there is more than one potential parent for new instances.\n The view class used can be overridden by changing the\n 'choose_parent_view_class' attribute.\n ", "language": "en", "n_whitespaces": 90, "n_words": 47, "vocab_size": 38 }
12
Python
11
d10f15e55806c6944827d801cd9c2d53f5da4186
options.py
73,176
4
31
choose_parent_view
https://github.com/wagtail/wagtail.git
Reformat with black
40
0
15,972
9
1
10
def is_within_directory(directory, target): # type: (str, str) -> bool abs_directory = os.path.abspath(directory) abs_target = os.path.abspath(target) prefix = os.path.commonprefix([abs_directory, abs_target]) return prefix == abs_directory
.venv/lib/python3.8/site-packages/pip/_internal/utils/unpacking.py
75
transferlearning
{ "docstring": "\n Return true if the absolute path of target is within the directory\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 11 }
23
Python
19
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
unpacking.py
61,321
5
46
is_within_directory
https://github.com/jindongwang/transferlearning.git
upd; format
41
0
12,507
9