hexsha
stringlengths 40
40
| repo
stringlengths 5
121
| path
stringlengths 4
227
| license
sequence | language
stringclasses 1
value | identifier
stringlengths 1
107
| return_type
stringlengths 2
237
⌀ | original_string
stringlengths 75
13.4k
| original_docstring
stringlengths 13
12.9k
| docstring
stringlengths 13
2.57k
| docstring_tokens
sequence | code
stringlengths 23
1.88k
| code_tokens
sequence | short_docstring
stringlengths 1
1.32k
| short_docstring_tokens
sequence | comment
sequence | parameters
list | docstring_params
dict | code_with_imports
stringlengths 23
1.88k
| idxs
int64 0
611k
| cluster
int64 0
1.02k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
53dd8a0925cb840f7d65106fc569ddb33735d4c0 | nicoddemus/scancode-toolkit | src/summarycode/score.py | [
"Apache-2.0",
"CC0-1.0"
] | Python | has_spdx_licenses | <not_specific> | def has_spdx_licenses(resource):
"""
Return True if a Resource licenses are all known SPDX licenses.
"""
if resource.scan_errors:
return False
for detected_license in resource.licenses:
if not detected_license.get('spdx_license_key'):
return False
return True |
Return True if a Resource licenses are all known SPDX licenses.
| Return True if a Resource licenses are all known SPDX licenses. | [
"Return",
"True",
"if",
"a",
"Resource",
"licenses",
"are",
"all",
"known",
"SPDX",
"licenses",
"."
] | def has_spdx_licenses(resource):
if resource.scan_errors:
return False
for detected_license in resource.licenses:
if not detected_license.get('spdx_license_key'):
return False
return True | [
"def",
"has_spdx_licenses",
"(",
"resource",
")",
":",
"if",
"resource",
".",
"scan_errors",
":",
"return",
"False",
"for",
"detected_license",
"in",
"resource",
".",
"licenses",
":",
"if",
"not",
"detected_license",
".",
"get",
"(",
"'spdx_license_key'",
")",
":",
"return",
"False",
"return",
"True"
] | Return True if a Resource licenses are all known SPDX licenses. | [
"Return",
"True",
"if",
"a",
"Resource",
"licenses",
"are",
"all",
"known",
"SPDX",
"licenses",
"."
] | [
"\"\"\"\n Return True if a Resource licenses are all known SPDX licenses.\n \"\"\""
] | [
{
"param": "resource",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "resource",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def has_spdx_licenses(resource):
if resource.scan_errors:
return False
for detected_license in resource.licenses:
if not detected_license.get('spdx_license_key'):
return False
return True | 220 | 122 |
c8fee5216e1c4d71fdfd904795395554a066d348 | wdeconinck/gridtools | pyutils/pyutils/env.py | [
"BSD-3-Clause"
] | Python | hostname | <not_specific> | def hostname():
"""Host name of the current machine.
Example:
>>> hostname()
'keschln-0002'
"""
hostname = platform.node()
return hostname | Host name of the current machine.
Example:
>>> hostname()
'keschln-0002'
| Host name of the current machine. | [
"Host",
"name",
"of",
"the",
"current",
"machine",
"."
] | def hostname():
hostname = platform.node()
return hostname | [
"def",
"hostname",
"(",
")",
":",
"hostname",
"=",
"platform",
".",
"node",
"(",
")",
"return",
"hostname"
] | Host name of the current machine. | [
"Host",
"name",
"of",
"the",
"current",
"machine",
"."
] | [
"\"\"\"Host name of the current machine.\n\n Example:\n >>> hostname()\n 'keschln-0002'\n \"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": [
{
"identifier": "examples",
"docstring": null,
"docstring_tokens": [
"None"
]
}
]
} | import platform
def hostname():
hostname = platform.node()
return hostname | 221 | 623 |
4e89eb6fba46e5102cb45631747923769feead74 | dcoles/pycurl-requests | pycurl_requests/__init__.py | [
"MIT"
] | Python | patch_requests | null | def patch_requests():
"""Patch Requests library with PycURL Requests"""
import sys
for module in ('requests', 'requests.api', 'requests.cookies', 'requests.exceptions',
'requests.models', 'requests.sessions', 'requests.structures'):
sys.modules[module] = sys.modules[module.replace('requests', __name__, 1)] | Patch Requests library with PycURL Requests | Patch Requests library with PycURL Requests | [
"Patch",
"Requests",
"library",
"with",
"PycURL",
"Requests"
] | def patch_requests():
import sys
for module in ('requests', 'requests.api', 'requests.cookies', 'requests.exceptions',
'requests.models', 'requests.sessions', 'requests.structures'):
sys.modules[module] = sys.modules[module.replace('requests', __name__, 1)] | [
"def",
"patch_requests",
"(",
")",
":",
"import",
"sys",
"for",
"module",
"in",
"(",
"'requests'",
",",
"'requests.api'",
",",
"'requests.cookies'",
",",
"'requests.exceptions'",
",",
"'requests.models'",
",",
"'requests.sessions'",
",",
"'requests.structures'",
")",
":",
"sys",
".",
"modules",
"[",
"module",
"]",
"=",
"sys",
".",
"modules",
"[",
"module",
".",
"replace",
"(",
"'requests'",
",",
"__name__",
",",
"1",
")",
"]"
] | Patch Requests library with PycURL Requests | [
"Patch",
"Requests",
"library",
"with",
"PycURL",
"Requests"
] | [
"\"\"\"Patch Requests library with PycURL Requests\"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | import sys
def patch_requests():
import sys
for module in ('requests', 'requests.api', 'requests.cookies', 'requests.exceptions',
'requests.models', 'requests.sessions', 'requests.structures'):
sys.modules[module] = sys.modules[module.replace('requests', __name__, 1)] | 222 | 143 |
d9a936894ffe6f37df0d87b4949b7732a658c2f7 | emanjavacas/multilemma | multi/dataset.py | [
"MIT"
] | Python | readlines | null | def readlines(path, maxlen=35):
"""
path: str, path to file with data
maxlen: int, maximum sentence length
"""
with open(path) as f:
# skip header
next(f)
sent, tasks = [], collections.defaultdict(list)
for line in f:
line = line.strip()
# new sentence marker
if not line:
yield sent, dict(tasks)
sent, tasks = [], collections.defaultdict(list)
# actual line
else:
token, lemma, pos, *_ = line.split()
# accumulate
sent.append(token)
for task, data in {'lemma': lemma, 'pos': pos}.items():
tasks[task].append(data)
if len(sent) >= maxlen:
yield sent, dict(tasks)
sent, tasks = [], collections.defaultdict(list)
if sent:
yield sent, dict(tasks) |
path: str, path to file with data
maxlen: int, maximum sentence length
| str, path to file with data
maxlen: int, maximum sentence length | [
"str",
"path",
"to",
"file",
"with",
"data",
"maxlen",
":",
"int",
"maximum",
"sentence",
"length"
] | def readlines(path, maxlen=35):
with open(path) as f:
next(f)
sent, tasks = [], collections.defaultdict(list)
for line in f:
line = line.strip()
if not line:
yield sent, dict(tasks)
sent, tasks = [], collections.defaultdict(list)
else:
token, lemma, pos, *_ = line.split()
sent.append(token)
for task, data in {'lemma': lemma, 'pos': pos}.items():
tasks[task].append(data)
if len(sent) >= maxlen:
yield sent, dict(tasks)
sent, tasks = [], collections.defaultdict(list)
if sent:
yield sent, dict(tasks) | [
"def",
"readlines",
"(",
"path",
",",
"maxlen",
"=",
"35",
")",
":",
"with",
"open",
"(",
"path",
")",
"as",
"f",
":",
"next",
"(",
"f",
")",
"sent",
",",
"tasks",
"=",
"[",
"]",
",",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"not",
"line",
":",
"yield",
"sent",
",",
"dict",
"(",
"tasks",
")",
"sent",
",",
"tasks",
"=",
"[",
"]",
",",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"else",
":",
"token",
",",
"lemma",
",",
"pos",
",",
"*",
"_",
"=",
"line",
".",
"split",
"(",
")",
"sent",
".",
"append",
"(",
"token",
")",
"for",
"task",
",",
"data",
"in",
"{",
"'lemma'",
":",
"lemma",
",",
"'pos'",
":",
"pos",
"}",
".",
"items",
"(",
")",
":",
"tasks",
"[",
"task",
"]",
".",
"append",
"(",
"data",
")",
"if",
"len",
"(",
"sent",
")",
">=",
"maxlen",
":",
"yield",
"sent",
",",
"dict",
"(",
"tasks",
")",
"sent",
",",
"tasks",
"=",
"[",
"]",
",",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"if",
"sent",
":",
"yield",
"sent",
",",
"dict",
"(",
"tasks",
")"
] | path: str, path to file with data
maxlen: int, maximum sentence length | [
"path",
":",
"str",
"path",
"to",
"file",
"with",
"data",
"maxlen",
":",
"int",
"maximum",
"sentence",
"length"
] | [
"\"\"\"\n path: str, path to file with data\n maxlen: int, maximum sentence length\n \"\"\"",
"# skip header",
"# new sentence marker",
"# actual line",
"# accumulate"
] | [
{
"param": "path",
"type": null
},
{
"param": "maxlen",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "maxlen",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import collections
def readlines(path, maxlen=35):
with open(path) as f:
next(f)
sent, tasks = [], collections.defaultdict(list)
for line in f:
line = line.strip()
if not line:
yield sent, dict(tasks)
sent, tasks = [], collections.defaultdict(list)
else:
token, lemma, pos, *_ = line.split()
sent.append(token)
for task, data in {'lemma': lemma, 'pos': pos}.items():
tasks[task].append(data)
if len(sent) >= maxlen:
yield sent, dict(tasks)
sent, tasks = [], collections.defaultdict(list)
if sent:
yield sent, dict(tasks) | 223 | 517 |
3fcd4b0ed0f4ba4beb907bb63dba5c8cc7be28c2 | ampledata/aiscot | aiscot/functions.py | [
"Apache-2.0"
] | Python | read_known_craft | list | def read_known_craft(csv_file: str) -> list:
"""Reads the FILTER_CSV file into a `list`"""
all_rows: list = []
with open(csv_file) as csv_fd:
reader = csv.DictReader(csv_fd)
for row in reader:
all_rows.append(row)
return all_rows | Reads the FILTER_CSV file into a `list` | Reads the FILTER_CSV file into a `list` | [
"Reads",
"the",
"FILTER_CSV",
"file",
"into",
"a",
"`",
"list",
"`"
] | def read_known_craft(csv_file: str) -> list:
all_rows: list = []
with open(csv_file) as csv_fd:
reader = csv.DictReader(csv_fd)
for row in reader:
all_rows.append(row)
return all_rows | [
"def",
"read_known_craft",
"(",
"csv_file",
":",
"str",
")",
"->",
"list",
":",
"all_rows",
":",
"list",
"=",
"[",
"]",
"with",
"open",
"(",
"csv_file",
")",
"as",
"csv_fd",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"csv_fd",
")",
"for",
"row",
"in",
"reader",
":",
"all_rows",
".",
"append",
"(",
"row",
")",
"return",
"all_rows"
] | Reads the FILTER_CSV file into a `list` | [
"Reads",
"the",
"FILTER_CSV",
"file",
"into",
"a",
"`",
"list",
"`"
] | [
"\"\"\"Reads the FILTER_CSV file into a `list`\"\"\""
] | [
{
"param": "csv_file",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "csv_file",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import csv
def read_known_craft(csv_file: str) -> list:
all_rows: list = []
with open(csv_file) as csv_fd:
reader = csv.DictReader(csv_fd)
for row in reader:
all_rows.append(row)
return all_rows | 224 | 714 |
50d2d2af6b067d034ca17b67c67e1e7ee67f4805 | PrateekMunjal/TorchAL | setup.py | [
"MIT"
] | Python | read_requirements | <not_specific> | def read_requirements():
"""Retrieves the list of packages mentioned in requirements.txt"""
req_list = []
with open("requirements.txt", "r") as f:
for line in f.readlines():
line = line.rstrip("\n")
req_list.append(line)
return req_list | Retrieves the list of packages mentioned in requirements.txt | Retrieves the list of packages mentioned in requirements.txt | [
"Retrieves",
"the",
"list",
"of",
"packages",
"mentioned",
"in",
"requirements",
".",
"txt"
] | def read_requirements():
req_list = []
with open("requirements.txt", "r") as f:
for line in f.readlines():
line = line.rstrip("\n")
req_list.append(line)
return req_list | [
"def",
"read_requirements",
"(",
")",
":",
"req_list",
"=",
"[",
"]",
"with",
"open",
"(",
"\"requirements.txt\"",
",",
"\"r\"",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
"\"\\n\"",
")",
"req_list",
".",
"append",
"(",
"line",
")",
"return",
"req_list"
] | Retrieves the list of packages mentioned in requirements.txt | [
"Retrieves",
"the",
"list",
"of",
"packages",
"mentioned",
"in",
"requirements",
".",
"txt"
] | [
"\"\"\"Retrieves the list of packages mentioned in requirements.txt\"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | def read_requirements():
req_list = []
with open("requirements.txt", "r") as f:
for line in f.readlines():
line = line.rstrip("\n")
req_list.append(line)
return req_list | 225 | 115 |
e85420853d90d443b439595c4f26148bad52fa6c | fallenpegasus/reconbf | reconbf/modules/test_nginx.py | [
"Apache-2.0"
] | Python | _get_parameters | <not_specific> | def _get_parameters(conf, option):
"""Get a statement where the first token is the same as option."""
for statement in conf:
if statement[0] == option:
return statement[1:]
return None | Get a statement where the first token is the same as option. | Get a statement where the first token is the same as option. | [
"Get",
"a",
"statement",
"where",
"the",
"first",
"token",
"is",
"the",
"same",
"as",
"option",
"."
] | def _get_parameters(conf, option):
for statement in conf:
if statement[0] == option:
return statement[1:]
return None | [
"def",
"_get_parameters",
"(",
"conf",
",",
"option",
")",
":",
"for",
"statement",
"in",
"conf",
":",
"if",
"statement",
"[",
"0",
"]",
"==",
"option",
":",
"return",
"statement",
"[",
"1",
":",
"]",
"return",
"None"
] | Get a statement where the first token is the same as option. | [
"Get",
"a",
"statement",
"where",
"the",
"first",
"token",
"is",
"the",
"same",
"as",
"option",
"."
] | [
"\"\"\"Get a statement where the first token is the same as option.\"\"\""
] | [
{
"param": "conf",
"type": null
},
{
"param": "option",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "conf",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "option",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _get_parameters(conf, option):
for statement in conf:
if statement[0] == option:
return statement[1:]
return None | 227 | 0 |
056de883e9b742276b81db971a24d069db53b833 | anonymous-sp-submission/korg-update | korg/Construct.py | [
"MIT"
] | Python | innerContents | <not_specific> | def innerContents(singleModelBody):
"""
This method returns the body of the given
model, as an array. The body is between the
two curly brace { }.
We assume no comments in the model at the moment ...
same with in the properties.
"""
if singleModelBody == False:
return False
i, j = 0, len(singleModelBody) - 1
while (singleModelBody[i] != "{" and i < len(singleModelBody)):
i += 1
while (singleModelBody[j] != "}" and j > 0):
j -= 1
if (i >= j or singleModelBody[i] != "{" or singleModelBody[j] != "}"):
return None
return singleModelBody[i+1:j] |
This method returns the body of the given
model, as an array. The body is between the
two curly brace { }.
We assume no comments in the model at the moment ...
same with in the properties.
| This method returns the body of the given
model, as an array. The body is between the
two curly brace { }.
We assume no comments in the model at the moment
same with in the properties. | [
"This",
"method",
"returns",
"the",
"body",
"of",
"the",
"given",
"model",
"as",
"an",
"array",
".",
"The",
"body",
"is",
"between",
"the",
"two",
"curly",
"brace",
"{",
"}",
".",
"We",
"assume",
"no",
"comments",
"in",
"the",
"model",
"at",
"the",
"moment",
"same",
"with",
"in",
"the",
"properties",
"."
] | def innerContents(singleModelBody):
if singleModelBody == False:
return False
i, j = 0, len(singleModelBody) - 1
while (singleModelBody[i] != "{" and i < len(singleModelBody)):
i += 1
while (singleModelBody[j] != "}" and j > 0):
j -= 1
if (i >= j or singleModelBody[i] != "{" or singleModelBody[j] != "}"):
return None
return singleModelBody[i+1:j] | [
"def",
"innerContents",
"(",
"singleModelBody",
")",
":",
"if",
"singleModelBody",
"==",
"False",
":",
"return",
"False",
"i",
",",
"j",
"=",
"0",
",",
"len",
"(",
"singleModelBody",
")",
"-",
"1",
"while",
"(",
"singleModelBody",
"[",
"i",
"]",
"!=",
"\"{\"",
"and",
"i",
"<",
"len",
"(",
"singleModelBody",
")",
")",
":",
"i",
"+=",
"1",
"while",
"(",
"singleModelBody",
"[",
"j",
"]",
"!=",
"\"}\"",
"and",
"j",
">",
"0",
")",
":",
"j",
"-=",
"1",
"if",
"(",
"i",
">=",
"j",
"or",
"singleModelBody",
"[",
"i",
"]",
"!=",
"\"{\"",
"or",
"singleModelBody",
"[",
"j",
"]",
"!=",
"\"}\"",
")",
":",
"return",
"None",
"return",
"singleModelBody",
"[",
"i",
"+",
"1",
":",
"j",
"]"
] | This method returns the body of the given
model, as an array. | [
"This",
"method",
"returns",
"the",
"body",
"of",
"the",
"given",
"model",
"as",
"an",
"array",
"."
] | [
"\"\"\"\n\tThis method returns the body of the given \n\tmodel, as an array. The body is between the \n\ttwo curly brace { }. \n\tWe assume no comments in the model at the moment ... \n\tsame with in the properties.\n\t\"\"\""
] | [
{
"param": "singleModelBody",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "singleModelBody",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def innerContents(singleModelBody):
if singleModelBody == False:
return False
i, j = 0, len(singleModelBody) - 1
while (singleModelBody[i] != "{" and i < len(singleModelBody)):
i += 1
while (singleModelBody[j] != "}" and j > 0):
j -= 1
if (i >= j or singleModelBody[i] != "{" or singleModelBody[j] != "}"):
return None
return singleModelBody[i+1:j] | 230 | 427 |
fae6e8ec342dce1f1fde1eb1a74389c73b5dd3ac | SAGES-UCSC/Photometry | geom_utils.py | [
"MIT"
] | Python | intersecting | <not_specific> | def intersecting(b1xmin, b1xmax, b1ymin,
b1ymax, b2xmin, b2xmax,
b2ymin, b2ymax):
"Test two boxes to see if they are intersecting. "
xmin = max(b1xmin, b2xmin)
ymin = max(b1ymin, b2ymin)
xmax = min(b1xmax, b2xmax)
ymax = min(b1ymax, b2ymax)
return (xmin < xmax and ymin < ymax) | Test two boxes to see if they are intersecting. | Test two boxes to see if they are intersecting. | [
"Test",
"two",
"boxes",
"to",
"see",
"if",
"they",
"are",
"intersecting",
"."
] | def intersecting(b1xmin, b1xmax, b1ymin,
b1ymax, b2xmin, b2xmax,
b2ymin, b2ymax):
xmin = max(b1xmin, b2xmin)
ymin = max(b1ymin, b2ymin)
xmax = min(b1xmax, b2xmax)
ymax = min(b1ymax, b2ymax)
return (xmin < xmax and ymin < ymax) | [
"def",
"intersecting",
"(",
"b1xmin",
",",
"b1xmax",
",",
"b1ymin",
",",
"b1ymax",
",",
"b2xmin",
",",
"b2xmax",
",",
"b2ymin",
",",
"b2ymax",
")",
":",
"xmin",
"=",
"max",
"(",
"b1xmin",
",",
"b2xmin",
")",
"ymin",
"=",
"max",
"(",
"b1ymin",
",",
"b2ymin",
")",
"xmax",
"=",
"min",
"(",
"b1xmax",
",",
"b2xmax",
")",
"ymax",
"=",
"min",
"(",
"b1ymax",
",",
"b2ymax",
")",
"return",
"(",
"xmin",
"<",
"xmax",
"and",
"ymin",
"<",
"ymax",
")"
] | Test two boxes to see if they are intersecting. | [
"Test",
"two",
"boxes",
"to",
"see",
"if",
"they",
"are",
"intersecting",
"."
] | [
"\"Test two boxes to see if they are intersecting. \""
] | [
{
"param": "b1xmin",
"type": null
},
{
"param": "b1xmax",
"type": null
},
{
"param": "b1ymin",
"type": null
},
{
"param": "b1ymax",
"type": null
},
{
"param": "b2xmin",
"type": null
},
{
"param": "b2xmax",
"type": null
},
{
"param": "b2ymin",
"type": null
},
{
"param": "b2ymax",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "b1xmin",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "b1xmax",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "b1ymin",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "b1ymax",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "b2xmin",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "b2xmax",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "b2ymin",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "b2ymax",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def intersecting(b1xmin, b1xmax, b1ymin,
b1ymax, b2xmin, b2xmax,
b2ymin, b2ymax):
xmin = max(b1xmin, b2xmin)
ymin = max(b1ymin, b2ymin)
xmax = min(b1xmax, b2xmax)
ymax = min(b1ymax, b2ymax)
return (xmin < xmax and ymin < ymax) | 231 | 744 |
2f31373c3f5abbf795fea22fbc84a14a9e9e75ba | TheRaizer/Binary-Artificial-Neural-Network | scripts/saveloaddata.py | [
"MIT"
] | Python | check_theta_save | null | def check_theta_save(dims, theta, theta_file, dims_file):
""" Asks the user and may save the learned parameters and dimensions
Preconditions:
dims: list of int length >= 2
theta: dict
theta_file: string
dims_file: string
Parameters:
dims: the dimensions of the neural network model
theta: learned parameters of the model
theta_file: file path to save the parameters
dims_file: file path to save the dimensions
Postconditions:
May or may not save the learned parameters as well as the dimensions used to obtain said parameters.
"""
saveDecision = input("Type 'save' or 'yes' to save the trained parameters, otherwise type anything: ")
if saveDecision == 'yes' or saveDecision == 'save':
# open the files for binary writing
theta_file = open(theta_file, "wb")
dims_file = open(dims_file, "wb")
# use the pickle import to save the dict and list
pickle.dump(theta, theta_file)
pickle.dump(dims, dims_file)
# close the files
dims_file.close()
theta_file.close() | Asks the user and may save the learned parameters and dimensions
Preconditions:
dims: list of int length >= 2
theta: dict
theta_file: string
dims_file: string
Parameters:
dims: the dimensions of the neural network model
theta: learned parameters of the model
theta_file: file path to save the parameters
dims_file: file path to save the dimensions
Postconditions:
May or may not save the learned parameters as well as the dimensions used to obtain said parameters.
|
the dimensions of the neural network model
theta: learned parameters of the model
theta_file: file path to save the parameters
dims_file: file path to save the dimensions
May or may not save the learned parameters as well as the dimensions used to obtain said parameters. | [
"the",
"dimensions",
"of",
"the",
"neural",
"network",
"model",
"theta",
":",
"learned",
"parameters",
"of",
"the",
"model",
"theta_file",
":",
"file",
"path",
"to",
"save",
"the",
"parameters",
"dims_file",
":",
"file",
"path",
"to",
"save",
"the",
"dimensions",
"May",
"or",
"may",
"not",
"save",
"the",
"learned",
"parameters",
"as",
"well",
"as",
"the",
"dimensions",
"used",
"to",
"obtain",
"said",
"parameters",
"."
] | def check_theta_save(dims, theta, theta_file, dims_file):
saveDecision = input("Type 'save' or 'yes' to save the trained parameters, otherwise type anything: ")
if saveDecision == 'yes' or saveDecision == 'save':
theta_file = open(theta_file, "wb")
dims_file = open(dims_file, "wb")
pickle.dump(theta, theta_file)
pickle.dump(dims, dims_file)
dims_file.close()
theta_file.close() | [
"def",
"check_theta_save",
"(",
"dims",
",",
"theta",
",",
"theta_file",
",",
"dims_file",
")",
":",
"saveDecision",
"=",
"input",
"(",
"\"Type 'save' or 'yes' to save the trained parameters, otherwise type anything: \"",
")",
"if",
"saveDecision",
"==",
"'yes'",
"or",
"saveDecision",
"==",
"'save'",
":",
"theta_file",
"=",
"open",
"(",
"theta_file",
",",
"\"wb\"",
")",
"dims_file",
"=",
"open",
"(",
"dims_file",
",",
"\"wb\"",
")",
"pickle",
".",
"dump",
"(",
"theta",
",",
"theta_file",
")",
"pickle",
".",
"dump",
"(",
"dims",
",",
"dims_file",
")",
"dims_file",
".",
"close",
"(",
")",
"theta_file",
".",
"close",
"(",
")"
] | Asks the user and may save the learned parameters and dimensions
Preconditions:
dims: list of int length >= 2
theta: dict
theta_file: string
dims_file: string | [
"Asks",
"the",
"user",
"and",
"may",
"save",
"the",
"learned",
"parameters",
"and",
"dimensions",
"Preconditions",
":",
"dims",
":",
"list",
"of",
"int",
"length",
">",
"=",
"2",
"theta",
":",
"dict",
"theta_file",
":",
"string",
"dims_file",
":",
"string"
] | [
"\"\"\" Asks the user and may save the learned parameters and dimensions\n\n Preconditions:\n dims: list of int length >= 2\n theta: dict\n theta_file: string\n dims_file: string\n\n Parameters:\n dims: the dimensions of the neural network model\n theta: learned parameters of the model\n theta_file: file path to save the parameters\n dims_file: file path to save the dimensions\n\n Postconditions:\n May or may not save the learned parameters as well as the dimensions used to obtain said parameters.\n \"\"\"",
"# open the files for binary writing",
"# use the pickle import to save the dict and list",
"# close the files"
] | [
{
"param": "dims",
"type": null
},
{
"param": "theta",
"type": null
},
{
"param": "theta_file",
"type": null
},
{
"param": "dims_file",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "dims",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "theta",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "theta_file",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "dims_file",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import pickle
def check_theta_save(dims, theta, theta_file, dims_file):
saveDecision = input("Type 'save' or 'yes' to save the trained parameters, otherwise type anything: ")
if saveDecision == 'yes' or saveDecision == 'save':
theta_file = open(theta_file, "wb")
dims_file = open(dims_file, "wb")
pickle.dump(theta, theta_file)
pickle.dump(dims, dims_file)
dims_file.close()
theta_file.close() | 232 | 424 |
aa920625cca390e96fb0bdf05e07ad6d32e1ced4 | rlhatton/RossROS | rossros_asyncio.py | [
"MIT"
] | Python | gather | null | async def gather(producer_consumer_list):
"""
Function that uses asyncio.gather to concurrently
execute a set of ConsumerProducer functions
"""
# Make a new list of producer_consumers by evaluating the input list
# (this evaluation matches syntax with rossros.py)
producer_consumer_list2 = []
for pc in producer_consumer_list:
producer_consumer_list2.append(pc())
await asyncio.gather(*producer_consumer_list2) |
Function that uses asyncio.gather to concurrently
execute a set of ConsumerProducer functions
| Function that uses asyncio.gather to concurrently
execute a set of ConsumerProducer functions | [
"Function",
"that",
"uses",
"asyncio",
".",
"gather",
"to",
"concurrently",
"execute",
"a",
"set",
"of",
"ConsumerProducer",
"functions"
] | async def gather(producer_consumer_list):
producer_consumer_list2 = []
for pc in producer_consumer_list:
producer_consumer_list2.append(pc())
await asyncio.gather(*producer_consumer_list2) | [
"async",
"def",
"gather",
"(",
"producer_consumer_list",
")",
":",
"producer_consumer_list2",
"=",
"[",
"]",
"for",
"pc",
"in",
"producer_consumer_list",
":",
"producer_consumer_list2",
".",
"append",
"(",
"pc",
"(",
")",
")",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"producer_consumer_list2",
")"
] | Function that uses asyncio.gather to concurrently
execute a set of ConsumerProducer functions | [
"Function",
"that",
"uses",
"asyncio",
".",
"gather",
"to",
"concurrently",
"execute",
"a",
"set",
"of",
"ConsumerProducer",
"functions"
] | [
"\"\"\"\n Function that uses asyncio.gather to concurrently\n execute a set of ConsumerProducer functions\n \"\"\"",
"# Make a new list of producer_consumers by evaluating the input list",
"# (this evaluation matches syntax with rossros.py)"
] | [
{
"param": "producer_consumer_list",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "producer_consumer_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import asyncio
async def gather(producer_consumer_list):
producer_consumer_list2 = []
for pc in producer_consumer_list:
producer_consumer_list2.append(pc())
await asyncio.gather(*producer_consumer_list2) | 233 | 610 |
d38821cc9e19e65ee830a3ee5c2305293a30cde4 | pauliacomi/pyGAPS | src/pygaps/utilities/python_utilities.py | [
"MIT"
] | Python | deep_merge | <not_specific> | def deep_merge(a, b, path=None, update=True):
"""Recursive updates of a dictionary."""
if path is None:
path = []
for key, val in b.items():
if key in a:
if (
isinstance(a[key], abc.Mapping)
and isinstance(val, abc.Mapping)
):
deep_merge(a[key], val, path + [str(key)], update)
elif a[key] == val:
pass # same leaf value
elif update:
a[key] = val
else:
raise Exception(f"Conflict at {'.'.join(path + [str(key)])}")
else:
a[key] = val
return a | Recursive updates of a dictionary. | Recursive updates of a dictionary. | [
"Recursive",
"updates",
"of",
"a",
"dictionary",
"."
] | def deep_merge(a, b, path=None, update=True):
if path is None:
path = []
for key, val in b.items():
if key in a:
if (
isinstance(a[key], abc.Mapping)
and isinstance(val, abc.Mapping)
):
deep_merge(a[key], val, path + [str(key)], update)
elif a[key] == val:
pass
elif update:
a[key] = val
else:
raise Exception(f"Conflict at {'.'.join(path + [str(key)])}")
else:
a[key] = val
return a | [
"def",
"deep_merge",
"(",
"a",
",",
"b",
",",
"path",
"=",
"None",
",",
"update",
"=",
"True",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"[",
"]",
"for",
"key",
",",
"val",
"in",
"b",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"a",
":",
"if",
"(",
"isinstance",
"(",
"a",
"[",
"key",
"]",
",",
"abc",
".",
"Mapping",
")",
"and",
"isinstance",
"(",
"val",
",",
"abc",
".",
"Mapping",
")",
")",
":",
"deep_merge",
"(",
"a",
"[",
"key",
"]",
",",
"val",
",",
"path",
"+",
"[",
"str",
"(",
"key",
")",
"]",
",",
"update",
")",
"elif",
"a",
"[",
"key",
"]",
"==",
"val",
":",
"pass",
"elif",
"update",
":",
"a",
"[",
"key",
"]",
"=",
"val",
"else",
":",
"raise",
"Exception",
"(",
"f\"Conflict at {'.'.join(path + [str(key)])}\"",
")",
"else",
":",
"a",
"[",
"key",
"]",
"=",
"val",
"return",
"a"
] | Recursive updates of a dictionary. | [
"Recursive",
"updates",
"of",
"a",
"dictionary",
"."
] | [
"\"\"\"Recursive updates of a dictionary.\"\"\"",
"# same leaf value"
] | [
{
"param": "a",
"type": null
},
{
"param": "b",
"type": null
},
{
"param": "path",
"type": null
},
{
"param": "update",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "a",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "b",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "update",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import abc
def deep_merge(a, b, path=None, update=True):
if path is None:
path = []
for key, val in b.items():
if key in a:
if (
isinstance(a[key], abc.Mapping)
and isinstance(val, abc.Mapping)
):
deep_merge(a[key], val, path + [str(key)], update)
elif a[key] == val:
pass
elif update:
a[key] = val
else:
raise Exception(f"Conflict at {'.'.join(path + [str(key)])}")
else:
a[key] = val
return a | 234 | 18 |
999ce4fc00465ddbd76d2f7fe6be34ef9df1385a | cisagov/gophish-tools | src/util/validate.py | [
"CC0-1.0"
] | Python | validate_assessment_id | <not_specific> | def validate_assessment_id(assessment_id):
"""Validate that the provided assessment_id is matching the valid assessment_id format. Example: RV1234.
Args:
assessment_id (string): Assessment identifier to validate.
Returns:
match: The result of a regular expression match.
"""
return re.match(r"^RV\d{4,5}$", assessment_id) | Validate that the provided assessment_id is matching the valid assessment_id format. Example: RV1234.
Args:
assessment_id (string): Assessment identifier to validate.
Returns:
match: The result of a regular expression match.
| Validate that the provided assessment_id is matching the valid assessment_id format. | [
"Validate",
"that",
"the",
"provided",
"assessment_id",
"is",
"matching",
"the",
"valid",
"assessment_id",
"format",
"."
] | def validate_assessment_id(assessment_id):
return re.match(r"^RV\d{4,5}$", assessment_id) | [
"def",
"validate_assessment_id",
"(",
"assessment_id",
")",
":",
"return",
"re",
".",
"match",
"(",
"r\"^RV\\d{4,5}$\"",
",",
"assessment_id",
")"
] | Validate that the provided assessment_id is matching the valid assessment_id format. | [
"Validate",
"that",
"the",
"provided",
"assessment_id",
"is",
"matching",
"the",
"valid",
"assessment_id",
"format",
"."
] | [
"\"\"\"Validate that the provided assessment_id is matching the valid assessment_id format. Example: RV1234.\n\n Args:\n assessment_id (string): Assessment identifier to validate.\n\n Returns:\n match: The result of a regular expression match.\n \"\"\""
] | [
{
"param": "assessment_id",
"type": null
}
] | {
"returns": [
{
"docstring": "The result of a regular expression match.",
"docstring_tokens": [
"The",
"result",
"of",
"a",
"regular",
"expression",
"match",
"."
],
"type": "match"
}
],
"raises": [],
"params": [
{
"identifier": "assessment_id",
"type": null,
"docstring": "Assessment identifier to validate.",
"docstring_tokens": [
"Assessment",
"identifier",
"to",
"validate",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | import re
def validate_assessment_id(assessment_id):
return re.match(r"^RV\d{4,5}$", assessment_id) | 235 | 441 |
f4dc4f096ba44186d134736e30884a6e8186d074 | ngeor/instarepo | instarepo/fixers/discovery.py | [
"MIT"
] | Python | classes_in_module | <not_specific> | def classes_in_module(module):
"""
Gets the classes defined in the given module
"""
module_dict = module.__dict__
return (
module_dict[c]
for c in module_dict
if (
isinstance(module_dict[c], type)
and module_dict[c].__module__ == module.__name__
)
) |
Gets the classes defined in the given module
| Gets the classes defined in the given module | [
"Gets",
"the",
"classes",
"defined",
"in",
"the",
"given",
"module"
] | def classes_in_module(module):
module_dict = module.__dict__
return (
module_dict[c]
for c in module_dict
if (
isinstance(module_dict[c], type)
and module_dict[c].__module__ == module.__name__
)
) | [
"def",
"classes_in_module",
"(",
"module",
")",
":",
"module_dict",
"=",
"module",
".",
"__dict__",
"return",
"(",
"module_dict",
"[",
"c",
"]",
"for",
"c",
"in",
"module_dict",
"if",
"(",
"isinstance",
"(",
"module_dict",
"[",
"c",
"]",
",",
"type",
")",
"and",
"module_dict",
"[",
"c",
"]",
".",
"__module__",
"==",
"module",
".",
"__name__",
")",
")"
] | Gets the classes defined in the given module | [
"Gets",
"the",
"classes",
"defined",
"in",
"the",
"given",
"module"
] | [
"\"\"\"\n Gets the classes defined in the given module\n \"\"\""
] | [
{
"param": "module",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "module",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def classes_in_module(module):
module_dict = module.__dict__
return (
module_dict[c]
for c in module_dict
if (
isinstance(module_dict[c], type)
and module_dict[c].__module__ == module.__name__
)
) | 236 | 793 |
c4c2bedda00e5004bb7b286995e7f6f666fbd78e | bsamseth/vehicle-routing-problem | route.py | [
"MIT"
] | Python | pmx_1 | <not_specific> | def pmx_1(p1, p2):
"""Perform Partially Mapped Crossover on p1 and p2, yielding one child."""
cut1 = random.randint(0, len(p1) - 2)
cut2 = random.randint(cut1, len(p1) - 1)
c1 = [None] * len(p1)
c1[cut1:cut2] = p1[cut1:cut2]
# fill in parts of p2[cut1:cut2] not in child c1
for i in [i for i in p2[cut1:cut2] if i not in c1]:
k = p2.index(p1[p2.index(i)])
while True:
if c1[k] is None:
c1[k] = i
break
else:
k = p2.index(p1[k])
# Fill in remaining from p2
for i in range(len(p2)):
if c1[i] is None:
c1[i] = p2[i]
assert len(set(c1)) == len(p1), "Crossover lost something."
return c1 | Perform Partially Mapped Crossover on p1 and p2, yielding one child. | Perform Partially Mapped Crossover on p1 and p2, yielding one child. | [
"Perform",
"Partially",
"Mapped",
"Crossover",
"on",
"p1",
"and",
"p2",
"yielding",
"one",
"child",
"."
] | def pmx_1(p1, p2):
cut1 = random.randint(0, len(p1) - 2)
cut2 = random.randint(cut1, len(p1) - 1)
c1 = [None] * len(p1)
c1[cut1:cut2] = p1[cut1:cut2]
for i in [i for i in p2[cut1:cut2] if i not in c1]:
k = p2.index(p1[p2.index(i)])
while True:
if c1[k] is None:
c1[k] = i
break
else:
k = p2.index(p1[k])
for i in range(len(p2)):
if c1[i] is None:
c1[i] = p2[i]
assert len(set(c1)) == len(p1), "Crossover lost something."
return c1 | [
"def",
"pmx_1",
"(",
"p1",
",",
"p2",
")",
":",
"cut1",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"p1",
")",
"-",
"2",
")",
"cut2",
"=",
"random",
".",
"randint",
"(",
"cut1",
",",
"len",
"(",
"p1",
")",
"-",
"1",
")",
"c1",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"p1",
")",
"c1",
"[",
"cut1",
":",
"cut2",
"]",
"=",
"p1",
"[",
"cut1",
":",
"cut2",
"]",
"for",
"i",
"in",
"[",
"i",
"for",
"i",
"in",
"p2",
"[",
"cut1",
":",
"cut2",
"]",
"if",
"i",
"not",
"in",
"c1",
"]",
":",
"k",
"=",
"p2",
".",
"index",
"(",
"p1",
"[",
"p2",
".",
"index",
"(",
"i",
")",
"]",
")",
"while",
"True",
":",
"if",
"c1",
"[",
"k",
"]",
"is",
"None",
":",
"c1",
"[",
"k",
"]",
"=",
"i",
"break",
"else",
":",
"k",
"=",
"p2",
".",
"index",
"(",
"p1",
"[",
"k",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"p2",
")",
")",
":",
"if",
"c1",
"[",
"i",
"]",
"is",
"None",
":",
"c1",
"[",
"i",
"]",
"=",
"p2",
"[",
"i",
"]",
"assert",
"len",
"(",
"set",
"(",
"c1",
")",
")",
"==",
"len",
"(",
"p1",
")",
",",
"\"Crossover lost something.\"",
"return",
"c1"
] | Perform Partially Mapped Crossover on p1 and p2, yielding one child. | [
"Perform",
"Partially",
"Mapped",
"Crossover",
"on",
"p1",
"and",
"p2",
"yielding",
"one",
"child",
"."
] | [
"\"\"\"Perform Partially Mapped Crossover on p1 and p2, yielding one child.\"\"\"",
"# fill in parts of p2[cut1:cut2] not in child c1",
"# Fill in remaining from p2"
] | [
{
"param": "p1",
"type": null
},
{
"param": "p2",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "p1",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "p2",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import random
def pmx_1(p1, p2):
cut1 = random.randint(0, len(p1) - 2)
cut2 = random.randint(cut1, len(p1) - 1)
c1 = [None] * len(p1)
c1[cut1:cut2] = p1[cut1:cut2]
for i in [i for i in p2[cut1:cut2] if i not in c1]:
k = p2.index(p1[p2.index(i)])
while True:
if c1[k] is None:
c1[k] = i
break
else:
k = p2.index(p1[k])
for i in range(len(p2)):
if c1[i] is None:
c1[i] = p2[i]
assert len(set(c1)) == len(p1), "Crossover lost something."
return c1 | 237 | 939 |
5e7295ed6b582d75f094f4261cdb8e8fc86296bb | kvh/ramp | ramp/store.py | [
"MIT"
] | Python | loadpickle | <not_specific> | def loadpickle(fname):
"""
Load pickled object from `fname`
"""
return pickle.load(open(fname, 'rb')) |
Load pickled object from `fname`
| Load pickled object from `fname` | [
"Load",
"pickled",
"object",
"from",
"`",
"fname",
"`"
] | def loadpickle(fname):
return pickle.load(open(fname, 'rb')) | [
"def",
"loadpickle",
"(",
"fname",
")",
":",
"return",
"pickle",
".",
"load",
"(",
"open",
"(",
"fname",
",",
"'rb'",
")",
")"
] | Load pickled object from `fname` | [
"Load",
"pickled",
"object",
"from",
"`",
"fname",
"`"
] | [
"\"\"\"\n Load pickled object from `fname`\n \"\"\""
] | [
{
"param": "fname",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "fname",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import pickle
def loadpickle(fname):
return pickle.load(open(fname, 'rb')) | 238 | 254 |
47573097a8aeb6b65782e6d1d574e7cb1a1759a7 | lordofkhaos/Chargen | chargen.py | [
"MIT"
] | Python | printProgressBar | null | def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '#'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '.' * (length - filledLength)
print('\r%s [%s] %s%% %s' % (prefix, bar, percent, suffix))
# Print New Line on Complete
if iteration == total:
print() |
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
| Call in a loop to create terminal progress bar | [
"Call",
"in",
"a",
"loop",
"to",
"create",
"terminal",
"progress",
"bar"
] | def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '#'):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '.' * (length - filledLength)
print('\r%s [%s] %s%% %s' % (prefix, bar, percent, suffix))
if iteration == total:
print() | [
"def",
"printProgressBar",
"(",
"iteration",
",",
"total",
",",
"prefix",
"=",
"''",
",",
"suffix",
"=",
"''",
",",
"decimals",
"=",
"1",
",",
"length",
"=",
"100",
",",
"fill",
"=",
"'#'",
")",
":",
"percent",
"=",
"(",
"\"{0:.\"",
"+",
"str",
"(",
"decimals",
")",
"+",
"\"f}\"",
")",
".",
"format",
"(",
"100",
"*",
"(",
"iteration",
"/",
"float",
"(",
"total",
")",
")",
")",
"filledLength",
"=",
"int",
"(",
"length",
"*",
"iteration",
"//",
"total",
")",
"bar",
"=",
"fill",
"*",
"filledLength",
"+",
"'.'",
"*",
"(",
"length",
"-",
"filledLength",
")",
"print",
"(",
"'\\r%s [%s] %s%% %s'",
"%",
"(",
"prefix",
",",
"bar",
",",
"percent",
",",
"suffix",
")",
")",
"if",
"iteration",
"==",
"total",
":",
"print",
"(",
")"
] | Call in a loop to create terminal progress bar | [
"Call",
"in",
"a",
"loop",
"to",
"create",
"terminal",
"progress",
"bar"
] | [
"\"\"\"\r\n\tCall in a loop to create terminal progress bar\r\n\t@params:\r\n\t\titeration - Required : current iteration (Int)\r\n\t\ttotal\t - Required : total iterations (Int)\r\n\t\tprefix\t - Optional : prefix string (Str)\r\n\t\tsuffix\t - Optional : suffix string (Str)\r\n\t\tdecimals\t- Optional : positive number of decimals in percent complete (Int)\r\n\t\tlength\t - Optional : character length of bar (Int)\r\n\t\tfill\t\t- Optional : bar fill character (Str)\r\n\t\"\"\"",
"# Print New Line on Complete\r"
] | [
{
"param": "iteration",
"type": null
},
{
"param": "total",
"type": null
},
{
"param": "prefix",
"type": null
},
{
"param": "suffix",
"type": null
},
{
"param": "decimals",
"type": null
},
{
"param": "length",
"type": null
},
{
"param": "fill",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "iteration",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "total",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "prefix",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "suffix",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "decimals",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "length",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "fill",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": [
{
"identifier": "params",
"docstring": "iteration - Required : current iteration (Int)\ntotal - Required : total iterations (Int)\nprefix - Optional : prefix string (Str)\nsuffix - Optional : suffix string (Str)\ndecimals - Optional : positive number of decimals in percent complete (Int)\nlength - Optional : character length of bar (Int)\nfill - Optional : bar fill character (Str)",
"docstring_tokens": [
"iteration",
"-",
"Required",
":",
"current",
"iteration",
"(",
"Int",
")",
"total",
"-",
"Required",
":",
"total",
"iterations",
"(",
"Int",
")",
"prefix",
"-",
"Optional",
":",
"prefix",
"string",
"(",
"Str",
")",
"suffix",
"-",
"Optional",
":",
"suffix",
"string",
"(",
"Str",
")",
"decimals",
"-",
"Optional",
":",
"positive",
"number",
"of",
"decimals",
"in",
"percent",
"complete",
"(",
"Int",
")",
"length",
"-",
"Optional",
":",
"character",
"length",
"of",
"bar",
"(",
"Int",
")",
"fill",
"-",
"Optional",
":",
"bar",
"fill",
"character",
"(",
"Str",
")"
]
}
]
} | def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '#'):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '.' * (length - filledLength)
print('\r%s [%s] %s%% %s' % (prefix, bar, percent, suffix))
if iteration == total:
print() | 239 | 634 |
b5f806e50be08692d8b5d8948224a07379bd6c6c | BadIdeaFactory/truthgoggles-phrases | congressionalrecord/fileIterator.py | [
"Apache-2.0"
] | Python | findSpeaker | <not_specific> | def findSpeaker(contents):
"""
takes in the congressional record and returns the speaker of the next speech
:param contents: String containing the congressional record
:return: String that is the name of the speaker of the next speech in contents (including their prefix)
if more speeches in contents, otherwise returns "zzzzzzzzzzzzzzzzzzz"
"""
speakerSearch = re.search('[M][r,s]{1,2}'r'. ''[A-Z]+[.]', contents) # finds the next occurunce of a speaker
try:
speaker = speakerSearch.group(0) # returns the name of the speaker of the next speech in contents
except:
speaker = "zzzzzzzzzzzzzzzzzzz" # returns "zzzzzzzzzzzzzzzzzzz" if no new speech can be found in contents
return speaker |
takes in the congressional record and returns the speaker of the next speech
:param contents: String containing the congressional record
:return: String that is the name of the speaker of the next speech in contents (including their prefix)
if more speeches in contents, otherwise returns "zzzzzzzzzzzzzzzzzzz"
| takes in the congressional record and returns the speaker of the next speech | [
"takes",
"in",
"the",
"congressional",
"record",
"and",
"returns",
"the",
"speaker",
"of",
"the",
"next",
"speech"
] | def findSpeaker(contents):
speakerSearch = re.search('[M][r,s]{1,2}'r'. ''[A-Z]+[.]', contents)
try:
speaker = speakerSearch.group(0)
except:
speaker = "zzzzzzzzzzzzzzzzzzz"
return speaker | [
"def",
"findSpeaker",
"(",
"contents",
")",
":",
"speakerSearch",
"=",
"re",
".",
"search",
"(",
"'[M][r,s]{1,2}'",
"r'. '",
"'[A-Z]+[.]'",
",",
"contents",
")",
"try",
":",
"speaker",
"=",
"speakerSearch",
".",
"group",
"(",
"0",
")",
"except",
":",
"speaker",
"=",
"\"zzzzzzzzzzzzzzzzzzz\"",
"return",
"speaker"
] | takes in the congressional record and returns the speaker of the next speech | [
"takes",
"in",
"the",
"congressional",
"record",
"and",
"returns",
"the",
"speaker",
"of",
"the",
"next",
"speech"
] | [
"\"\"\"\n takes in the congressional record and returns the speaker of the next speech\n\n :param contents: String containing the congressional record\n :return: String that is the name of the speaker of the next speech in contents (including their prefix)\n if more speeches in contents, otherwise returns \"zzzzzzzzzzzzzzzzzzz\"\n \"\"\"",
"# finds the next occurunce of a speaker",
"# returns the name of the speaker of the next speech in contents",
"# returns \"zzzzzzzzzzzzzzzzzzz\" if no new speech can be found in contents"
] | [
{
"param": "contents",
"type": null
}
] | {
"returns": [
{
"docstring": "String that is the name of the speaker of the next speech in contents (including their prefix)\nif more speeches in contents, otherwise returns \"zzzzzzzzzzzzzzzzzzz\"",
"docstring_tokens": [
"String",
"that",
"is",
"the",
"name",
"of",
"the",
"speaker",
"of",
"the",
"next",
"speech",
"in",
"contents",
"(",
"including",
"their",
"prefix",
")",
"if",
"more",
"speeches",
"in",
"contents",
"otherwise",
"returns",
"\"",
"zzzzzzzzzzzzzzzzzzz",
"\""
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "contents",
"type": null,
"docstring": "String containing the congressional record",
"docstring_tokens": [
"String",
"containing",
"the",
"congressional",
"record"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
def findSpeaker(contents):
speakerSearch = re.search('[M][r,s]{1,2}'r'. ''[A-Z]+[.]', contents)
try:
speaker = speakerSearch.group(0)
except:
speaker = "zzzzzzzzzzzzzzzzzzz"
return speaker | 240 | 322 |
71ff8f4e769ea2dcb7c49b3161566ee6e8b7be0c | AlexMGitHub/TheWholeEnchilada | tests/integration/conftest.py | [
"MIT"
] | Python | credentials | <not_specific> | def credentials():
"""Return MySQL root credentials stored in Docker secrets files."""
with open(os.environ['MYSQL_ROOT_PASSWORD_FILE'], 'r') as secret_file:
password = secret_file.read()
return 'root', password | Return MySQL root credentials stored in Docker secrets files. | Return MySQL root credentials stored in Docker secrets files. | [
"Return",
"MySQL",
"root",
"credentials",
"stored",
"in",
"Docker",
"secrets",
"files",
"."
] | def credentials():
with open(os.environ['MYSQL_ROOT_PASSWORD_FILE'], 'r') as secret_file:
password = secret_file.read()
return 'root', password | [
"def",
"credentials",
"(",
")",
":",
"with",
"open",
"(",
"os",
".",
"environ",
"[",
"'MYSQL_ROOT_PASSWORD_FILE'",
"]",
",",
"'r'",
")",
"as",
"secret_file",
":",
"password",
"=",
"secret_file",
".",
"read",
"(",
")",
"return",
"'root'",
",",
"password"
] | Return MySQL root credentials stored in Docker secrets files. | [
"Return",
"MySQL",
"root",
"credentials",
"stored",
"in",
"Docker",
"secrets",
"files",
"."
] | [
"\"\"\"Return MySQL root credentials stored in Docker secrets files.\"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | import os
def credentials():
with open(os.environ['MYSQL_ROOT_PASSWORD_FILE'], 'r') as secret_file:
password = secret_file.read()
return 'root', password | 241 | 710 |
1df323f2b86d8320c70cd2d3287fa3901d405f25 | NREL/open-oa | operational_analysis/toolkits/unit_conversion.py | [
"BSD-3-Clause"
] | Python | compute_gross_energy | <not_specific> | def compute_gross_energy(
net_energy, avail_losses, curt_losses, avail_type="frac", curt_type="frac"
):
"""
This function computes gross energy for a wind plant or turbine by adding reported availability and
curtailment losses to reported net energy. Account is made of whether availabilty or curtailment loss data
is reported in energy ('energy') or fractional units ('frac'). If in energy units, this function assumes that net
energy, availability loss, and curtailment loss are all reported in the same units
Args:
net energy (numpy array of Pandas series): reported net energy for wind plant or turbine
avail (numpy array of Pandas series): reported availability losses for wind plant or turbine
curt (numpy array of Pandas series): reported curtailment losses for wind plant or turbine
Returns:
gross (numpy array of Pandas series): calculated gross energy for wind plant or turbine
"""
if (avail_type == "frac") & (curt_type == "frac"):
gross = net_energy / (1 - avail_losses - curt_losses)
elif (avail_type == "frac") & (curt_type == "energy"):
gross = net_energy / (1 - avail_losses) + curt_losses
elif (avail_type == "energy") & (curt_type == "frac"):
gross = net_energy / (1 - curt_losses) + avail_losses
elif (avail_type == "energy") & (curt_type == "energy"):
gross = net_energy + curt_losses + avail_losses
if len(gross[gross < net_energy]) > 0:
raise Exception("Gross energy cannot be less than net energy. Check your input values")
if (len(avail_losses[avail_losses < 0]) > 0) | (len(curt_losses[curt_losses < 0]) > 0):
raise Exception(
"Cannot have negative availability or curtailment input values. Check your data"
)
return gross |
This function computes gross energy for a wind plant or turbine by adding reported availability and
curtailment losses to reported net energy. Account is made of whether availabilty or curtailment loss data
is reported in energy ('energy') or fractional units ('frac'). If in energy units, this function assumes that net
energy, availability loss, and curtailment loss are all reported in the same units
Args:
net energy (numpy array of Pandas series): reported net energy for wind plant or turbine
avail (numpy array of Pandas series): reported availability losses for wind plant or turbine
curt (numpy array of Pandas series): reported curtailment losses for wind plant or turbine
Returns:
gross (numpy array of Pandas series): calculated gross energy for wind plant or turbine
| This function computes gross energy for a wind plant or turbine by adding reported availability and
curtailment losses to reported net energy. Account is made of whether availabilty or curtailment loss data
is reported in energy ('energy') or fractional units ('frac'). If in energy units, this function assumes that net
energy, availability loss, and curtailment loss are all reported in the same units | [
"This",
"function",
"computes",
"gross",
"energy",
"for",
"a",
"wind",
"plant",
"or",
"turbine",
"by",
"adding",
"reported",
"availability",
"and",
"curtailment",
"losses",
"to",
"reported",
"net",
"energy",
".",
"Account",
"is",
"made",
"of",
"whether",
"availabilty",
"or",
"curtailment",
"loss",
"data",
"is",
"reported",
"in",
"energy",
"(",
"'",
"energy",
"'",
")",
"or",
"fractional",
"units",
"(",
"'",
"frac",
"'",
")",
".",
"If",
"in",
"energy",
"units",
"this",
"function",
"assumes",
"that",
"net",
"energy",
"availability",
"loss",
"and",
"curtailment",
"loss",
"are",
"all",
"reported",
"in",
"the",
"same",
"units"
] | def compute_gross_energy(
net_energy, avail_losses, curt_losses, avail_type="frac", curt_type="frac"
):
if (avail_type == "frac") & (curt_type == "frac"):
gross = net_energy / (1 - avail_losses - curt_losses)
elif (avail_type == "frac") & (curt_type == "energy"):
gross = net_energy / (1 - avail_losses) + curt_losses
elif (avail_type == "energy") & (curt_type == "frac"):
gross = net_energy / (1 - curt_losses) + avail_losses
elif (avail_type == "energy") & (curt_type == "energy"):
gross = net_energy + curt_losses + avail_losses
if len(gross[gross < net_energy]) > 0:
raise Exception("Gross energy cannot be less than net energy. Check your input values")
if (len(avail_losses[avail_losses < 0]) > 0) | (len(curt_losses[curt_losses < 0]) > 0):
raise Exception(
"Cannot have negative availability or curtailment input values. Check your data"
)
return gross | [
"def",
"compute_gross_energy",
"(",
"net_energy",
",",
"avail_losses",
",",
"curt_losses",
",",
"avail_type",
"=",
"\"frac\"",
",",
"curt_type",
"=",
"\"frac\"",
")",
":",
"if",
"(",
"avail_type",
"==",
"\"frac\"",
")",
"&",
"(",
"curt_type",
"==",
"\"frac\"",
")",
":",
"gross",
"=",
"net_energy",
"/",
"(",
"1",
"-",
"avail_losses",
"-",
"curt_losses",
")",
"elif",
"(",
"avail_type",
"==",
"\"frac\"",
")",
"&",
"(",
"curt_type",
"==",
"\"energy\"",
")",
":",
"gross",
"=",
"net_energy",
"/",
"(",
"1",
"-",
"avail_losses",
")",
"+",
"curt_losses",
"elif",
"(",
"avail_type",
"==",
"\"energy\"",
")",
"&",
"(",
"curt_type",
"==",
"\"frac\"",
")",
":",
"gross",
"=",
"net_energy",
"/",
"(",
"1",
"-",
"curt_losses",
")",
"+",
"avail_losses",
"elif",
"(",
"avail_type",
"==",
"\"energy\"",
")",
"&",
"(",
"curt_type",
"==",
"\"energy\"",
")",
":",
"gross",
"=",
"net_energy",
"+",
"curt_losses",
"+",
"avail_losses",
"if",
"len",
"(",
"gross",
"[",
"gross",
"<",
"net_energy",
"]",
")",
">",
"0",
":",
"raise",
"Exception",
"(",
"\"Gross energy cannot be less than net energy. Check your input values\"",
")",
"if",
"(",
"len",
"(",
"avail_losses",
"[",
"avail_losses",
"<",
"0",
"]",
")",
">",
"0",
")",
"|",
"(",
"len",
"(",
"curt_losses",
"[",
"curt_losses",
"<",
"0",
"]",
")",
">",
"0",
")",
":",
"raise",
"Exception",
"(",
"\"Cannot have negative availability or curtailment input values. Check your data\"",
")",
"return",
"gross"
] | This function computes gross energy for a wind plant or turbine by adding reported availability and
curtailment losses to reported net energy. | [
"This",
"function",
"computes",
"gross",
"energy",
"for",
"a",
"wind",
"plant",
"or",
"turbine",
"by",
"adding",
"reported",
"availability",
"and",
"curtailment",
"losses",
"to",
"reported",
"net",
"energy",
"."
] | [
"\"\"\"\n This function computes gross energy for a wind plant or turbine by adding reported availability and\n curtailment losses to reported net energy. Account is made of whether availabilty or curtailment loss data\n is reported in energy ('energy') or fractional units ('frac'). If in energy units, this function assumes that net\n energy, availability loss, and curtailment loss are all reported in the same units\n\n Args:\n net energy (numpy array of Pandas series): reported net energy for wind plant or turbine\n avail (numpy array of Pandas series): reported availability losses for wind plant or turbine\n curt (numpy array of Pandas series): reported curtailment losses for wind plant or turbine\n\n Returns:\n gross (numpy array of Pandas series): calculated gross energy for wind plant or turbine\n \"\"\""
] | [
{
"param": "net_energy",
"type": null
},
{
"param": "avail_losses",
"type": null
},
{
"param": "curt_losses",
"type": null
},
{
"param": "avail_type",
"type": null
},
{
"param": "curt_type",
"type": null
}
] | {
"returns": [
{
"docstring": "gross (numpy array of Pandas series): calculated gross energy for wind plant or turbine",
"docstring_tokens": [
"gross",
"(",
"numpy",
"array",
"of",
"Pandas",
"series",
")",
":",
"calculated",
"gross",
"energy",
"for",
"wind",
"plant",
"or",
"turbine"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "net_energy",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "avail_losses",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "curt_losses",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "avail_type",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "curt_type",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "net energy",
"type": null,
"docstring": "reported net energy for wind plant or turbine",
"docstring_tokens": [
"reported",
"net",
"energy",
"for",
"wind",
"plant",
"or",
"turbine"
],
"default": null,
"is_optional": false
},
{
"identifier": "avail",
"type": null,
"docstring": "reported availability losses for wind plant or turbine",
"docstring_tokens": [
"reported",
"availability",
"losses",
"for",
"wind",
"plant",
"or",
"turbine"
],
"default": null,
"is_optional": false
},
{
"identifier": "curt",
"type": null,
"docstring": "reported curtailment losses for wind plant or turbine",
"docstring_tokens": [
"reported",
"curtailment",
"losses",
"for",
"wind",
"plant",
"or",
"turbine"
],
"default": null,
"is_optional": false
}
],
"others": []
} | def compute_gross_energy(
net_energy, avail_losses, curt_losses, avail_type="frac", curt_type="frac"
):
if (avail_type == "frac") & (curt_type == "frac"):
gross = net_energy / (1 - avail_losses - curt_losses)
elif (avail_type == "frac") & (curt_type == "energy"):
gross = net_energy / (1 - avail_losses) + curt_losses
elif (avail_type == "energy") & (curt_type == "frac"):
gross = net_energy / (1 - curt_losses) + avail_losses
elif (avail_type == "energy") & (curt_type == "energy"):
gross = net_energy + curt_losses + avail_losses
if len(gross[gross < net_energy]) > 0:
raise Exception("Gross energy cannot be less than net energy. Check your input values")
if (len(avail_losses[avail_losses < 0]) > 0) | (len(curt_losses[curt_losses < 0]) > 0):
raise Exception(
"Cannot have negative availability or curtailment input values. Check your data"
)
return gross | 242 | 318 |
61b687ec33e1f7dbda061990fcdc72e05604f148 | open-data/ckanext-recombinant | ckanext/recombinant/logic.py | [
"MIT"
] | Python | _datastore_match | <not_specific> | def _datastore_match(fs, fields):
"""
return True if existing datastore column fields include fields
defined in fs.
"""
# XXX: does not check types or extra columns at this time
existing = set(c['id'] for c in fields)
return all(f['datastore_id'] in existing for f in fs) |
return True if existing datastore column fields include fields
defined in fs.
| return True if existing datastore column fields include fields
defined in fs. | [
"return",
"True",
"if",
"existing",
"datastore",
"column",
"fields",
"include",
"fields",
"defined",
"in",
"fs",
"."
] | def _datastore_match(fs, fields):
existing = set(c['id'] for c in fields)
return all(f['datastore_id'] in existing for f in fs) | [
"def",
"_datastore_match",
"(",
"fs",
",",
"fields",
")",
":",
"existing",
"=",
"set",
"(",
"c",
"[",
"'id'",
"]",
"for",
"c",
"in",
"fields",
")",
"return",
"all",
"(",
"f",
"[",
"'datastore_id'",
"]",
"in",
"existing",
"for",
"f",
"in",
"fs",
")"
] | return True if existing datastore column fields include fields
defined in fs. | [
"return",
"True",
"if",
"existing",
"datastore",
"column",
"fields",
"include",
"fields",
"defined",
"in",
"fs",
"."
] | [
"\"\"\"\n return True if existing datastore column fields include fields\n defined in fs.\n \"\"\"",
"# XXX: does not check types or extra columns at this time"
] | [
{
"param": "fs",
"type": null
},
{
"param": "fields",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "fs",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "fields",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _datastore_match(fs, fields):
existing = set(c['id'] for c in fields)
return all(f['datastore_id'] in existing for f in fs) | 243 | 966 |
03f89cbf0a367d3bf5477d8e0667a23640053e15 | fareliner/jasypt4py | jasypt4py/encryptor.py | [
"Apache-2.0"
] | Python | pad | <not_specific> | def pad(block_size, s):
"""
Pad a string to the provided block size when using fixed block ciphers.
:param block_size: int - the cipher block size
:param s: str - the string to pad
:return: a padded string that can be fed to the cipher
"""
return s + (block_size - len(s) % block_size) * chr(block_size - len(s) % block_size) |
Pad a string to the provided block size when using fixed block ciphers.
:param block_size: int - the cipher block size
:param s: str - the string to pad
:return: a padded string that can be fed to the cipher
| Pad a string to the provided block size when using fixed block ciphers. | [
"Pad",
"a",
"string",
"to",
"the",
"provided",
"block",
"size",
"when",
"using",
"fixed",
"block",
"ciphers",
"."
] | def pad(block_size, s):
return s + (block_size - len(s) % block_size) * chr(block_size - len(s) % block_size) | [
"def",
"pad",
"(",
"block_size",
",",
"s",
")",
":",
"return",
"s",
"+",
"(",
"block_size",
"-",
"len",
"(",
"s",
")",
"%",
"block_size",
")",
"*",
"chr",
"(",
"block_size",
"-",
"len",
"(",
"s",
")",
"%",
"block_size",
")"
] | Pad a string to the provided block size when using fixed block ciphers. | [
"Pad",
"a",
"string",
"to",
"the",
"provided",
"block",
"size",
"when",
"using",
"fixed",
"block",
"ciphers",
"."
] | [
"\"\"\"\n Pad a string to the provided block size when using fixed block ciphers.\n\n :param block_size: int - the cipher block size\n :param s: str - the string to pad\n :return: a padded string that can be fed to the cipher\n \"\"\""
] | [
{
"param": "block_size",
"type": null
},
{
"param": "s",
"type": null
}
] | {
"returns": [
{
"docstring": "a padded string that can be fed to the cipher",
"docstring_tokens": [
"a",
"padded",
"string",
"that",
"can",
"be",
"fed",
"to",
"the",
"cipher"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "block_size",
"type": null,
"docstring": "the cipher block size",
"docstring_tokens": [
"the",
"cipher",
"block",
"size"
],
"default": null,
"is_optional": null
},
{
"identifier": "s",
"type": null,
"docstring": "the string to pad",
"docstring_tokens": [
"the",
"string",
"to",
"pad"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def pad(block_size, s):
return s + (block_size - len(s) % block_size) * chr(block_size - len(s) % block_size) | 244 | 848 |
1019f1ee3c9ea9e5b733acbcb037707a57212894 | dzitkowskik/TwitterSentimentAnalysis | TwitterSentimentAnalysis/downloaders.py | [
"MIT",
"Unlicense"
] | Python | undersample | <not_specific> | def undersample(sentiment, manual_grade, threshold=0.4):
"""
This function defines if a tweet with certain sentiment and manual_grade should be saved or not.
It is used for undersampling data with a sentiment close to 0.0
:param sentiment: calculated word sentiment
:param manual_grade: manually set sentiment (if exists)
:param threshold: all values between -+threshold are considered as redundant
:return: true if record is redundant and false if it should be saved
"""
if abs(sentiment) < threshold and manual_grade is None:
return True
else:
return False |
This function defines if a tweet with certain sentiment and manual_grade should be saved or not.
It is used for undersampling data with a sentiment close to 0.0
:param sentiment: calculated word sentiment
:param manual_grade: manually set sentiment (if exists)
:param threshold: all values between -+threshold are considered as redundant
:return: true if record is redundant and false if it should be saved
| This function defines if a tweet with certain sentiment and manual_grade should be saved or not.
It is used for undersampling data with a sentiment close to 0.0 | [
"This",
"function",
"defines",
"if",
"a",
"tweet",
"with",
"certain",
"sentiment",
"and",
"manual_grade",
"should",
"be",
"saved",
"or",
"not",
".",
"It",
"is",
"used",
"for",
"undersampling",
"data",
"with",
"a",
"sentiment",
"close",
"to",
"0",
".",
"0"
] | def undersample(sentiment, manual_grade, threshold=0.4):
if abs(sentiment) < threshold and manual_grade is None:
return True
else:
return False | [
"def",
"undersample",
"(",
"sentiment",
",",
"manual_grade",
",",
"threshold",
"=",
"0.4",
")",
":",
"if",
"abs",
"(",
"sentiment",
")",
"<",
"threshold",
"and",
"manual_grade",
"is",
"None",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | This function defines if a tweet with certain sentiment and manual_grade should be saved or not. | [
"This",
"function",
"defines",
"if",
"a",
"tweet",
"with",
"certain",
"sentiment",
"and",
"manual_grade",
"should",
"be",
"saved",
"or",
"not",
"."
] | [
"\"\"\"\n This function defines if a tweet with certain sentiment and manual_grade should be saved or not.\n It is used for undersampling data with a sentiment close to 0.0\n :param sentiment: calculated word sentiment\n :param manual_grade: manually set sentiment (if exists)\n :param threshold: all values between -+threshold are considered as redundant\n :return: true if record is redundant and false if it should be saved\n \"\"\""
] | [
{
"param": "sentiment",
"type": null
},
{
"param": "manual_grade",
"type": null
},
{
"param": "threshold",
"type": null
}
] | {
"returns": [
{
"docstring": "true if record is redundant and false if it should be saved",
"docstring_tokens": [
"true",
"if",
"record",
"is",
"redundant",
"and",
"false",
"if",
"it",
"should",
"be",
"saved"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "sentiment",
"type": null,
"docstring": "calculated word sentiment",
"docstring_tokens": [
"calculated",
"word",
"sentiment"
],
"default": null,
"is_optional": null
},
{
"identifier": "manual_grade",
"type": null,
"docstring": "manually set sentiment (if exists)",
"docstring_tokens": [
"manually",
"set",
"sentiment",
"(",
"if",
"exists",
")"
],
"default": null,
"is_optional": null
},
{
"identifier": "threshold",
"type": null,
"docstring": "all values between -+threshold are considered as redundant",
"docstring_tokens": [
"all",
"values",
"between",
"-",
"+",
"threshold",
"are",
"considered",
"as",
"redundant"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def undersample(sentiment, manual_grade, threshold=0.4):
if abs(sentiment) < threshold and manual_grade is None:
return True
else:
return False | 245 | 455 |
c4c9f7c50cd34aaff7f2f0e1b3bbb2fb1ebc75ba | leggerf/CMSSpark | src/python/CMSSpark/data_aggregation.py | [
"MIT"
] | Python | quiet_logs | null | def quiet_logs(sc):
"""
Sets logger's level to ERROR so INFO logs would not show up.
"""
print('Will set log level to ERROR')
logger = sc._jvm.org.apache.log4j
logger.LogManager.getRootLogger().setLevel(logger.Level.ERROR)
print('Did set log level to ERROR') |
Sets logger's level to ERROR so INFO logs would not show up.
| Sets logger's level to ERROR so INFO logs would not show up. | [
"Sets",
"logger",
"'",
"s",
"level",
"to",
"ERROR",
"so",
"INFO",
"logs",
"would",
"not",
"show",
"up",
"."
] | def quiet_logs(sc):
print('Will set log level to ERROR')
logger = sc._jvm.org.apache.log4j
logger.LogManager.getRootLogger().setLevel(logger.Level.ERROR)
print('Did set log level to ERROR') | [
"def",
"quiet_logs",
"(",
"sc",
")",
":",
"print",
"(",
"'Will set log level to ERROR'",
")",
"logger",
"=",
"sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"log4j",
"logger",
".",
"LogManager",
".",
"getRootLogger",
"(",
")",
".",
"setLevel",
"(",
"logger",
".",
"Level",
".",
"ERROR",
")",
"print",
"(",
"'Did set log level to ERROR'",
")"
] | Sets logger's level to ERROR so INFO logs would not show up. | [
"Sets",
"logger",
"'",
"s",
"level",
"to",
"ERROR",
"so",
"INFO",
"logs",
"would",
"not",
"show",
"up",
"."
] | [
"\"\"\"\n Sets logger's level to ERROR so INFO logs would not show up.\n \"\"\""
] | [
{
"param": "sc",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "sc",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def quiet_logs(sc):
print('Will set log level to ERROR')
logger = sc._jvm.org.apache.log4j
logger.LogManager.getRootLogger().setLevel(logger.Level.ERROR)
print('Did set log level to ERROR') | 246 | 335 |
3cfbb1723a01664144b0920dac6e0e6324f0da46 | RonnyPfannschmidt/khal | khal/icalendar.py | [
"MIT"
] | Python | invalid_timezone | <not_specific> | def invalid_timezone(prop):
"""check if an icalendar property has a timezone attached we don't understand"""
if hasattr(prop.dt, 'tzinfo') and prop.dt.tzinfo is None and 'TZID' in prop.params:
return True
else:
return False | check if an icalendar property has a timezone attached we don't understand | check if an icalendar property has a timezone attached we don't understand | [
"check",
"if",
"an",
"icalendar",
"property",
"has",
"a",
"timezone",
"attached",
"we",
"don",
"'",
"t",
"understand"
] | def invalid_timezone(prop):
if hasattr(prop.dt, 'tzinfo') and prop.dt.tzinfo is None and 'TZID' in prop.params:
return True
else:
return False | [
"def",
"invalid_timezone",
"(",
"prop",
")",
":",
"if",
"hasattr",
"(",
"prop",
".",
"dt",
",",
"'tzinfo'",
")",
"and",
"prop",
".",
"dt",
".",
"tzinfo",
"is",
"None",
"and",
"'TZID'",
"in",
"prop",
".",
"params",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | check if an icalendar property has a timezone attached we don't understand | [
"check",
"if",
"an",
"icalendar",
"property",
"has",
"a",
"timezone",
"attached",
"we",
"don",
"'",
"t",
"understand"
] | [
"\"\"\"check if an icalendar property has a timezone attached we don't understand\"\"\""
] | [
{
"param": "prop",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "prop",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def invalid_timezone(prop):
if hasattr(prop.dt, 'tzinfo') and prop.dt.tzinfo is None and 'TZID' in prop.params:
return True
else:
return False | 247 | 702 |
8539e65608c4313ba209669bf572ce9ab261e429 | stdgy/foosball_backend | api.py | [
"MIT"
] | Python | can_team_score | <not_specific> | def can_team_score(team):
"""Check whether a team is allowed to score again"""
# Check that team doesn't already have 10 points
teams = team.game.teams
curr_score = len(filter(lambda x: x.own_goal is False, team.scores))
if len(teams) == 2:
other_team = None
if teams[0].id == team.id:
other_team = teams[1]
else:
other_team = teams[0]
curr_score += len(filter(lambda x: x.own_goal is True, other_team.scores))
if curr_score >= 10:
return False
return True | Check whether a team is allowed to score again | Check whether a team is allowed to score again | [
"Check",
"whether",
"a",
"team",
"is",
"allowed",
"to",
"score",
"again"
] | def can_team_score(team):
teams = team.game.teams
curr_score = len(filter(lambda x: x.own_goal is False, team.scores))
if len(teams) == 2:
other_team = None
if teams[0].id == team.id:
other_team = teams[1]
else:
other_team = teams[0]
curr_score += len(filter(lambda x: x.own_goal is True, other_team.scores))
if curr_score >= 10:
return False
return True | [
"def",
"can_team_score",
"(",
"team",
")",
":",
"teams",
"=",
"team",
".",
"game",
".",
"teams",
"curr_score",
"=",
"len",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"own_goal",
"is",
"False",
",",
"team",
".",
"scores",
")",
")",
"if",
"len",
"(",
"teams",
")",
"==",
"2",
":",
"other_team",
"=",
"None",
"if",
"teams",
"[",
"0",
"]",
".",
"id",
"==",
"team",
".",
"id",
":",
"other_team",
"=",
"teams",
"[",
"1",
"]",
"else",
":",
"other_team",
"=",
"teams",
"[",
"0",
"]",
"curr_score",
"+=",
"len",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"own_goal",
"is",
"True",
",",
"other_team",
".",
"scores",
")",
")",
"if",
"curr_score",
">=",
"10",
":",
"return",
"False",
"return",
"True"
] | Check whether a team is allowed to score again | [
"Check",
"whether",
"a",
"team",
"is",
"allowed",
"to",
"score",
"again"
] | [
"\"\"\"Check whether a team is allowed to score again\"\"\"",
"# Check that team doesn't already have 10 points"
] | [
{
"param": "team",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "team",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def can_team_score(team):
teams = team.game.teams
curr_score = len(filter(lambda x: x.own_goal is False, team.scores))
if len(teams) == 2:
other_team = None
if teams[0].id == team.id:
other_team = teams[1]
else:
other_team = teams[0]
curr_score += len(filter(lambda x: x.own_goal is True, other_team.scores))
if curr_score >= 10:
return False
return True | 248 | 601 |
a50b6f436ea37d382001e0855c57f7faaaf47241 | darshitsharma/jax | jax/experimental/maps.py | [
"Apache-2.0"
] | Python | _unzip_axis_resources | <not_specific> | def _unzip_axis_resources(axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]],
resource_env: ResourceEnv):
"""Splits axis_resources into separate dicts for physical and loop resources."""
physical_axis_resources = {}
loop_axis_resources = {}
loop_resource_axes = resource_env.loop_resource_axes
for axis, raxes in axis_resources.items():
first_loop = 0
for raxis in raxes:
if raxis in loop_resource_axes:
break
else:
first_loop += 1
physical_axis_resources[axis] = raxes[:first_loop]
loop_resources = loop_axis_resources[axis] = raxes[first_loop:]
if not all(name in loop_resource_axes for name in loop_resources):
raise NotImplementedError("Loop resources cannot appear before mesh axes "
"in the resource_axis argument")
return physical_axis_resources, loop_axis_resources | Splits axis_resources into separate dicts for physical and loop resources. | Splits axis_resources into separate dicts for physical and loop resources. | [
"Splits",
"axis_resources",
"into",
"separate",
"dicts",
"for",
"physical",
"and",
"loop",
"resources",
"."
] | def _unzip_axis_resources(axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]],
resource_env: ResourceEnv):
physical_axis_resources = {}
loop_axis_resources = {}
loop_resource_axes = resource_env.loop_resource_axes
for axis, raxes in axis_resources.items():
first_loop = 0
for raxis in raxes:
if raxis in loop_resource_axes:
break
else:
first_loop += 1
physical_axis_resources[axis] = raxes[:first_loop]
loop_resources = loop_axis_resources[axis] = raxes[first_loop:]
if not all(name in loop_resource_axes for name in loop_resources):
raise NotImplementedError("Loop resources cannot appear before mesh axes "
"in the resource_axis argument")
return physical_axis_resources, loop_axis_resources | [
"def",
"_unzip_axis_resources",
"(",
"axis_resources",
":",
"Dict",
"[",
"AxisName",
",",
"Tuple",
"[",
"ResourceAxisName",
",",
"...",
"]",
"]",
",",
"resource_env",
":",
"ResourceEnv",
")",
":",
"physical_axis_resources",
"=",
"{",
"}",
"loop_axis_resources",
"=",
"{",
"}",
"loop_resource_axes",
"=",
"resource_env",
".",
"loop_resource_axes",
"for",
"axis",
",",
"raxes",
"in",
"axis_resources",
".",
"items",
"(",
")",
":",
"first_loop",
"=",
"0",
"for",
"raxis",
"in",
"raxes",
":",
"if",
"raxis",
"in",
"loop_resource_axes",
":",
"break",
"else",
":",
"first_loop",
"+=",
"1",
"physical_axis_resources",
"[",
"axis",
"]",
"=",
"raxes",
"[",
":",
"first_loop",
"]",
"loop_resources",
"=",
"loop_axis_resources",
"[",
"axis",
"]",
"=",
"raxes",
"[",
"first_loop",
":",
"]",
"if",
"not",
"all",
"(",
"name",
"in",
"loop_resource_axes",
"for",
"name",
"in",
"loop_resources",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"Loop resources cannot appear before mesh axes \"",
"\"in the resource_axis argument\"",
")",
"return",
"physical_axis_resources",
",",
"loop_axis_resources"
] | Splits axis_resources into separate dicts for physical and loop resources. | [
"Splits",
"axis_resources",
"into",
"separate",
"dicts",
"for",
"physical",
"and",
"loop",
"resources",
"."
] | [
"\"\"\"Splits axis_resources into separate dicts for physical and loop resources.\"\"\""
] | [
{
"param": "axis_resources",
"type": "Dict[AxisName, Tuple[ResourceAxisName, ...]]"
},
{
"param": "resource_env",
"type": "ResourceEnv"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "axis_resources",
"type": "Dict[AxisName, Tuple[ResourceAxisName, ...]]",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "resource_env",
"type": "ResourceEnv",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _unzip_axis_resources(axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]],
resource_env: ResourceEnv):
physical_axis_resources = {}
loop_axis_resources = {}
loop_resource_axes = resource_env.loop_resource_axes
for axis, raxes in axis_resources.items():
first_loop = 0
for raxis in raxes:
if raxis in loop_resource_axes:
break
else:
first_loop += 1
physical_axis_resources[axis] = raxes[:first_loop]
loop_resources = loop_axis_resources[axis] = raxes[first_loop:]
if not all(name in loop_resource_axes for name in loop_resources):
raise NotImplementedError("Loop resources cannot appear before mesh axes "
"in the resource_axis argument")
return physical_axis_resources, loop_axis_resources | 250 | 753 |
9013314cd57369990bc9388ec0930da688c80b83 | brandyn-gilbert/DAT-281-CAPSTONE | Old Code/Base.py | [
"MIT"
] | Python | _set_dir | null | def _set_dir():
"""
Sets the working directory to the github folder.
Also checks to see if you are in a common working dr before prompting to
change.
Returns
-------
None.
"""
os.chdir(os.path.dirname(sys.argv[0])) |
Sets the working directory to the github folder.
Also checks to see if you are in a common working dr before prompting to
change.
Returns
-------
None.
| Sets the working directory to the github folder.
Also checks to see if you are in a common working dr before prompting to
change.
Returns
None. | [
"Sets",
"the",
"working",
"directory",
"to",
"the",
"github",
"folder",
".",
"Also",
"checks",
"to",
"see",
"if",
"you",
"are",
"in",
"a",
"common",
"working",
"dr",
"before",
"prompting",
"to",
"change",
".",
"Returns",
"None",
"."
] | def _set_dir():
os.chdir(os.path.dirname(sys.argv[0])) | [
"def",
"_set_dir",
"(",
")",
":",
"os",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
")"
] | Sets the working directory to the github folder. | [
"Sets",
"the",
"working",
"directory",
"to",
"the",
"github",
"folder",
"."
] | [
"\"\"\"\n Sets the working directory to the github folder.\n Also checks to see if you are in a common working dr before prompting to \n change.\n Returns\n -------\n None.\n\n \"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | import sys
import os
def _set_dir():
os.chdir(os.path.dirname(sys.argv[0])) | 251 | 941 |
3da2b5ae93be0a0d557e50b9c7c6ad6977615736 | kube-HPC/python-wrapper.hkube | hkube_python_wrapper/util/percentile.py | [
"MIT"
] | Python | percentile | <not_specific> | def percentile(N, percent, key=lambda x: x):
"""
Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0 to 100
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if not N:
return None
percent = percent/100.0
k = (len(N)-1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c-k)
d1 = key(N[int(c)]) * (k-f)
return d0+d1 |
Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0 to 100
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
| Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0 to 100
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values | [
"Find",
"the",
"percentile",
"of",
"a",
"list",
"of",
"values",
".",
"@parameter",
"N",
"-",
"is",
"a",
"list",
"of",
"values",
".",
"Note",
"N",
"MUST",
"BE",
"already",
"sorted",
".",
"@parameter",
"percent",
"-",
"a",
"float",
"value",
"from",
"0",
"to",
"100",
"@parameter",
"key",
"-",
"optional",
"key",
"function",
"to",
"compute",
"value",
"from",
"each",
"element",
"of",
"N",
".",
"@return",
"-",
"the",
"percentile",
"of",
"the",
"values"
] | def percentile(N, percent, key=lambda x: x):
if not N:
return None
percent = percent/100.0
k = (len(N)-1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c-k)
d1 = key(N[int(c)]) * (k-f)
return d0+d1 | [
"def",
"percentile",
"(",
"N",
",",
"percent",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
")",
":",
"if",
"not",
"N",
":",
"return",
"None",
"percent",
"=",
"percent",
"/",
"100.0",
"k",
"=",
"(",
"len",
"(",
"N",
")",
"-",
"1",
")",
"*",
"percent",
"f",
"=",
"math",
".",
"floor",
"(",
"k",
")",
"c",
"=",
"math",
".",
"ceil",
"(",
"k",
")",
"if",
"f",
"==",
"c",
":",
"return",
"key",
"(",
"N",
"[",
"int",
"(",
"k",
")",
"]",
")",
"d0",
"=",
"key",
"(",
"N",
"[",
"int",
"(",
"f",
")",
"]",
")",
"*",
"(",
"c",
"-",
"k",
")",
"d1",
"=",
"key",
"(",
"N",
"[",
"int",
"(",
"c",
")",
"]",
")",
"*",
"(",
"k",
"-",
"f",
")",
"return",
"d0",
"+",
"d1"
] | Find the percentile of a list of values. | [
"Find",
"the",
"percentile",
"of",
"a",
"list",
"of",
"values",
"."
] | [
"\"\"\"\n Find the percentile of a list of values.\n\n @parameter N - is a list of values. Note N MUST BE already sorted.\n @parameter percent - a float value from 0 to 100\n @parameter key - optional key function to compute value from each element of N.\n\n @return - the percentile of the values\n \"\"\""
] | [
{
"param": "N",
"type": null
},
{
"param": "percent",
"type": null
},
{
"param": "key",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "N",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "percent",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "key",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import math
def percentile(N, percent, key=lambda x: x):
if not N:
return None
percent = percent/100.0
k = (len(N)-1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c-k)
d1 = key(N[int(c)]) * (k-f)
return d0+d1 | 252 | 185 |
6c2fe0b8e67553bb0453721d8063985b771b9d3f | shahzeb-jadoon/Portfolio | quickhull.py | [
"MIT"
] | Python | convex_elim | <not_specific> | def convex_elim(ordered_points):
""" Helping function for convex_hull() which eliminates repeating points
pre: ordered_points is a list of points sorted in nondecreasing order.
ordered_points comprises of tuples of the form (x, y) where x represents
the x-axis location and y represents the y-axis location of the point
on the cartesian plane
post: retruns a list with distinct points of ordered_points
"""
new_lst = []
for i in range(len(ordered_points) - 1):
if ordered_points[i] != ordered_points[i + 1]:
new_lst.append(ordered_points[i])
new_lst.append(ordered_points[-1])
return new_lst | Helping function for convex_hull() which eliminates repeating points
pre: ordered_points is a list of points sorted in nondecreasing order.
ordered_points comprises of tuples of the form (x, y) where x represents
the x-axis location and y represents the y-axis location of the point
on the cartesian plane
post: retruns a list with distinct points of ordered_points
| Helping function for convex_hull() which eliminates repeating points
pre: ordered_points is a list of points sorted in nondecreasing order.
ordered_points comprises of tuples of the form (x, y) where x represents
the x-axis location and y represents the y-axis location of the point
on the cartesian plane
post: retruns a list with distinct points of ordered_points | [
"Helping",
"function",
"for",
"convex_hull",
"()",
"which",
"eliminates",
"repeating",
"points",
"pre",
":",
"ordered_points",
"is",
"a",
"list",
"of",
"points",
"sorted",
"in",
"nondecreasing",
"order",
".",
"ordered_points",
"comprises",
"of",
"tuples",
"of",
"the",
"form",
"(",
"x",
"y",
")",
"where",
"x",
"represents",
"the",
"x",
"-",
"axis",
"location",
"and",
"y",
"represents",
"the",
"y",
"-",
"axis",
"location",
"of",
"the",
"point",
"on",
"the",
"cartesian",
"plane",
"post",
":",
"retruns",
"a",
"list",
"with",
"distinct",
"points",
"of",
"ordered_points"
] | def convex_elim(ordered_points):
new_lst = []
for i in range(len(ordered_points) - 1):
if ordered_points[i] != ordered_points[i + 1]:
new_lst.append(ordered_points[i])
new_lst.append(ordered_points[-1])
return new_lst | [
"def",
"convex_elim",
"(",
"ordered_points",
")",
":",
"new_lst",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"ordered_points",
")",
"-",
"1",
")",
":",
"if",
"ordered_points",
"[",
"i",
"]",
"!=",
"ordered_points",
"[",
"i",
"+",
"1",
"]",
":",
"new_lst",
".",
"append",
"(",
"ordered_points",
"[",
"i",
"]",
")",
"new_lst",
".",
"append",
"(",
"ordered_points",
"[",
"-",
"1",
"]",
")",
"return",
"new_lst"
] | Helping function for convex_hull() which eliminates repeating points
pre: ordered_points is a list of points sorted in nondecreasing order. | [
"Helping",
"function",
"for",
"convex_hull",
"()",
"which",
"eliminates",
"repeating",
"points",
"pre",
":",
"ordered_points",
"is",
"a",
"list",
"of",
"points",
"sorted",
"in",
"nondecreasing",
"order",
"."
] | [
"\"\"\" Helping function for convex_hull() which eliminates repeating points\n\n pre: ordered_points is a list of points sorted in nondecreasing order.\n ordered_points comprises of tuples of the form (x, y) where x represents\n the x-axis location and y represents the y-axis location of the point\n on the cartesian plane\n post: retruns a list with distinct points of ordered_points\n \"\"\""
] | [
{
"param": "ordered_points",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "ordered_points",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def convex_elim(ordered_points):
new_lst = []
for i in range(len(ordered_points) - 1):
if ordered_points[i] != ordered_points[i + 1]:
new_lst.append(ordered_points[i])
new_lst.append(ordered_points[-1])
return new_lst | 253 | 12 |
5051442090703a7b6d71c2d56d2e6136cd20c437 | sgalpha01/biopython | Bio/SeqUtils/CheckSum.py | [
"BSD-3-Clause"
] | Python | crc32 | <not_specific> | def crc32(seq):
"""Return the crc32 checksum for a sequence (string or Seq object).
Note that the case is important:
>>> crc32("ACGTACGTACGT")
20049947
>>> crc32("acgtACGTacgt")
1688586483
"""
try:
# Assume it's a Seq object
s = bytes(seq)
except TypeError:
# Assume it's a string
s = seq.encode()
return binascii.crc32(s) | Return the crc32 checksum for a sequence (string or Seq object).
Note that the case is important:
>>> crc32("ACGTACGTACGT")
20049947
>>> crc32("acgtACGTacgt")
1688586483
| Return the crc32 checksum for a sequence (string or Seq object).
Note that the case is important.
| [
"Return",
"the",
"crc32",
"checksum",
"for",
"a",
"sequence",
"(",
"string",
"or",
"Seq",
"object",
")",
".",
"Note",
"that",
"the",
"case",
"is",
"important",
"."
] | def crc32(seq):
try:
s = bytes(seq)
except TypeError:
s = seq.encode()
return binascii.crc32(s) | [
"def",
"crc32",
"(",
"seq",
")",
":",
"try",
":",
"s",
"=",
"bytes",
"(",
"seq",
")",
"except",
"TypeError",
":",
"s",
"=",
"seq",
".",
"encode",
"(",
")",
"return",
"binascii",
".",
"crc32",
"(",
"s",
")"
] | Return the crc32 checksum for a sequence (string or Seq object). | [
"Return",
"the",
"crc32",
"checksum",
"for",
"a",
"sequence",
"(",
"string",
"or",
"Seq",
"object",
")",
"."
] | [
"\"\"\"Return the crc32 checksum for a sequence (string or Seq object).\n\n Note that the case is important:\n\n >>> crc32(\"ACGTACGTACGT\")\n 20049947\n >>> crc32(\"acgtACGTacgt\")\n 1688586483\n\n \"\"\"",
"# Assume it's a Seq object",
"# Assume it's a string"
] | [
{
"param": "seq",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "seq",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import binascii
def crc32(seq):
try:
s = bytes(seq)
except TypeError:
s = seq.encode()
return binascii.crc32(s) | 254 | 817 |
a19024d9cb1c4cf7d490591534f95472aebc1796 | kelmore5/python-string-utilities | kelmore_strings/strings.py | [
"MIT"
] | Python | string_ends_with_array | bool | def string_ends_with_array(check: str, ends_with_checks: Iterable[str]) -> bool:
""" Checks if a given strings ends with any of the strings contained within an array
Usage::
>>> x = 'some string'
>>> y: Iterable[str] = ['?', '.', 'end', 'ng']
>>>
>>> StringTools.check.string_ends_with_array(x, y)
True
:param check: Any string
:param ends_with_checks: An array of strings to check for in the passed string
:return: True if the string ends with any of the strings in ends_with_checks else False
:type check: str
:type ends_with_checks: Iterable[str]
:rtype: bool
"""
for string in ends_with_checks:
if check.lower().strip().endswith(string.lower()):
return True
return False | Checks if a given strings ends with any of the strings contained within an array
Usage::
>>> x = 'some string'
>>> y: Iterable[str] = ['?', '.', 'end', 'ng']
>>>
>>> StringTools.check.string_ends_with_array(x, y)
True
:param check: Any string
:param ends_with_checks: An array of strings to check for in the passed string
:return: True if the string ends with any of the strings in ends_with_checks else False
:type check: str
:type ends_with_checks: Iterable[str]
:rtype: bool
| Checks if a given strings ends with any of the strings contained within an array
Usage:.
| [
"Checks",
"if",
"a",
"given",
"strings",
"ends",
"with",
"any",
"of",
"the",
"strings",
"contained",
"within",
"an",
"array",
"Usage",
":",
"."
] | def string_ends_with_array(check: str, ends_with_checks: Iterable[str]) -> bool:
for string in ends_with_checks:
if check.lower().strip().endswith(string.lower()):
return True
return False | [
"def",
"string_ends_with_array",
"(",
"check",
":",
"str",
",",
"ends_with_checks",
":",
"Iterable",
"[",
"str",
"]",
")",
"->",
"bool",
":",
"for",
"string",
"in",
"ends_with_checks",
":",
"if",
"check",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
".",
"endswith",
"(",
"string",
".",
"lower",
"(",
")",
")",
":",
"return",
"True",
"return",
"False"
] | Checks if a given strings ends with any of the strings contained within an array
Usage:: | [
"Checks",
"if",
"a",
"given",
"strings",
"ends",
"with",
"any",
"of",
"the",
"strings",
"contained",
"within",
"an",
"array",
"Usage",
"::"
] | [
"\"\"\" Checks if a given strings ends with any of the strings contained within an array\n\n Usage::\n\n >>> x = 'some string'\n >>> y: Iterable[str] = ['?', '.', 'end', 'ng']\n >>>\n >>> StringTools.check.string_ends_with_array(x, y)\n True\n\n :param check: Any string\n :param ends_with_checks: An array of strings to check for in the passed string\n :return: True if the string ends with any of the strings in ends_with_checks else False\n\n :type check: str\n :type ends_with_checks: Iterable[str]\n :rtype: bool\n \"\"\""
] | [
{
"param": "check",
"type": "str"
},
{
"param": "ends_with_checks",
"type": "Iterable[str]"
}
] | {
"returns": [
{
"docstring": "True if the string ends with any of the strings in ends_with_checks else False",
"docstring_tokens": [
"True",
"if",
"the",
"string",
"ends",
"with",
"any",
"of",
"the",
"strings",
"in",
"ends_with_checks",
"else",
"False"
],
"type": "bool"
}
],
"raises": [],
"params": [
{
"identifier": "check",
"type": "str",
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "ends_with_checks",
"type": "Iterable[str]",
"docstring": "An array of strings to check for in the passed string",
"docstring_tokens": [
"An",
"array",
"of",
"strings",
"to",
"check",
"for",
"in",
"the",
"passed",
"string"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def string_ends_with_array(check: str, ends_with_checks: Iterable[str]) -> bool:
for string in ends_with_checks:
if check.lower().strip().endswith(string.lower()):
return True
return False | 256 | 637 |
386dc94fce2cc45140d1f31dbfc7366f6afa81e0 | z8674558/dash | contrib/seeds/makeseeds.py | [
"MIT"
] | Python | filtermultipayoutaddress | <not_specific> | def filtermultipayoutaddress(mns):
'''Filter out MNs sharing the same payout address'''
hist = collections.defaultdict(list)
for mn in mns:
hist[mn['state']['payoutAddress']].append(mn)
return [mn for mn in mns if len(hist[mn['state']['payoutAddress']]) == 1] | Filter out MNs sharing the same payout address | Filter out MNs sharing the same payout address | [
"Filter",
"out",
"MNs",
"sharing",
"the",
"same",
"payout",
"address"
] | def filtermultipayoutaddress(mns):
hist = collections.defaultdict(list)
for mn in mns:
hist[mn['state']['payoutAddress']].append(mn)
return [mn for mn in mns if len(hist[mn['state']['payoutAddress']]) == 1] | [
"def",
"filtermultipayoutaddress",
"(",
"mns",
")",
":",
"hist",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"mn",
"in",
"mns",
":",
"hist",
"[",
"mn",
"[",
"'state'",
"]",
"[",
"'payoutAddress'",
"]",
"]",
".",
"append",
"(",
"mn",
")",
"return",
"[",
"mn",
"for",
"mn",
"in",
"mns",
"if",
"len",
"(",
"hist",
"[",
"mn",
"[",
"'state'",
"]",
"[",
"'payoutAddress'",
"]",
"]",
")",
"==",
"1",
"]"
] | Filter out MNs sharing the same payout address | [
"Filter",
"out",
"MNs",
"sharing",
"the",
"same",
"payout",
"address"
] | [
"'''Filter out MNs sharing the same payout address'''"
] | [
{
"param": "mns",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "mns",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import collections
def filtermultipayoutaddress(mns):
hist = collections.defaultdict(list)
for mn in mns:
hist[mn['state']['payoutAddress']].append(mn)
return [mn for mn in mns if len(hist[mn['state']['payoutAddress']]) == 1] | 257 | 504 |
23ef448e9bc1f1c918a8d8e7455b41434945852f | Glacier-program/pytorch | chiplet/muse-v3/input/quant_layer.py | [
"Intel"
] | Python | _singleton | <not_specific> | def _singleton(cls):
"""Make a class a Singleton class (only one instance)"""
@functools.wraps(cls)
def wrapper_singleton(*args, **kwargs):
if not wrapper_singleton.instance:
wrapper_singleton.instance = cls(*args, **kwargs)
return wrapper_singleton.instance
wrapper_singleton.instance = None
return wrapper_singleton | Make a class a Singleton class (only one instance) | Make a class a Singleton class (only one instance) | [
"Make",
"a",
"class",
"a",
"Singleton",
"class",
"(",
"only",
"one",
"instance",
")"
] | def _singleton(cls):
@functools.wraps(cls)
def wrapper_singleton(*args, **kwargs):
if not wrapper_singleton.instance:
wrapper_singleton.instance = cls(*args, **kwargs)
return wrapper_singleton.instance
wrapper_singleton.instance = None
return wrapper_singleton | [
"def",
"_singleton",
"(",
"cls",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"cls",
")",
"def",
"wrapper_singleton",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"not",
"wrapper_singleton",
".",
"instance",
":",
"wrapper_singleton",
".",
"instance",
"=",
"cls",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"wrapper_singleton",
".",
"instance",
"wrapper_singleton",
".",
"instance",
"=",
"None",
"return",
"wrapper_singleton"
] | Make a class a Singleton class (only one instance) | [
"Make",
"a",
"class",
"a",
"Singleton",
"class",
"(",
"only",
"one",
"instance",
")"
] | [
"\"\"\"Make a class a Singleton class (only one instance)\"\"\""
] | [
{
"param": "cls",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import functools
def _singleton(cls):
@functools.wraps(cls)
def wrapper_singleton(*args, **kwargs):
if not wrapper_singleton.instance:
wrapper_singleton.instance = cls(*args, **kwargs)
return wrapper_singleton.instance
wrapper_singleton.instance = None
return wrapper_singleton | 258 | 467 |
15fc0e6f6d4f9de2b611c8aba8828620f5f17b66 | a17hq/py_trees | py_trees/console.py | [
"BSD-3-Clause"
] | Python | console_has_colours | <not_specific> | def console_has_colours():
"""
Detects if the console (stdout) has colourising capability.
"""
if os.environ.get("PY_TREES_DISABLE_COLORS"):
return False
# From django.core.management.color.supports_color
# https://github.com/django/django/blob/master/django/core/management/color.py
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or
'ANSICON' in os.environ)
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True |
Detects if the console (stdout) has colourising capability.
| Detects if the console (stdout) has colourising capability. | [
"Detects",
"if",
"the",
"console",
"(",
"stdout",
")",
"has",
"colourising",
"capability",
"."
] | def console_has_colours():
if os.environ.get("PY_TREES_DISABLE_COLORS"):
return False
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or
'ANSICON' in os.environ)
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True | [
"def",
"console_has_colours",
"(",
")",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"PY_TREES_DISABLE_COLORS\"",
")",
":",
"return",
"False",
"plat",
"=",
"sys",
".",
"platform",
"supported_platform",
"=",
"plat",
"!=",
"'Pocket PC'",
"and",
"(",
"plat",
"!=",
"'win32'",
"or",
"'ANSICON'",
"in",
"os",
".",
"environ",
")",
"is_a_tty",
"=",
"hasattr",
"(",
"sys",
".",
"stdout",
",",
"'isatty'",
")",
"and",
"sys",
".",
"stdout",
".",
"isatty",
"(",
")",
"if",
"not",
"supported_platform",
"or",
"not",
"is_a_tty",
":",
"return",
"False",
"return",
"True"
] | Detects if the console (stdout) has colourising capability. | [
"Detects",
"if",
"the",
"console",
"(",
"stdout",
")",
"has",
"colourising",
"capability",
"."
] | [
"\"\"\"\n Detects if the console (stdout) has colourising capability.\n \"\"\"",
"# From django.core.management.color.supports_color",
"# https://github.com/django/django/blob/master/django/core/management/color.py",
"# isatty is not always implemented, #6223."
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | import sys
import os
def console_has_colours():
if os.environ.get("PY_TREES_DISABLE_COLORS"):
return False
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or
'ANSICON' in os.environ)
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True | 259 | 22 |
6c95185ed0944807e107c5bacc67024ea5cceafe | chinchay/graphene-oxide | mtp_2D_7l/vacancies/myread.py | [
"MIT"
] | Python | grep_valence | <not_specific> | def grep_valence(pseudopotential):
"""
Given a UPF pseudopotential file, find the number of valence atoms.
Parameters
----------
pseudopotential: str
Filename of the pseudopotential.
Returns
-------
valence: float
Valence as reported in the pseudopotential.
Raises
------
ValueError
If valence cannot be found in the pseudopotential.
"""
# Example lines
# Sr.pbe-spn-rrkjus_psl.1.0.0.UPF: z_valence="1.000000000000000E+001"
# C.pbe-n-kjpaw_psl.1.0.0.UPF (new ld1.x):
# ...PBC" z_valence="4.000000000000e0" total_p...
# C_ONCV_PBE-1.0.upf: z_valence=" 4.00"
# Ta_pbe_v1.uspp.F.UPF: 13.00000000000 Z valence
with open(pseudopotential) as psfile:
for line in psfile:
if 'z valence' in line.lower():
return float(line.split()[0])
elif 'z_valence' in line.lower():
if line.split()[0] == '<PP_HEADER':
line = list(filter(lambda x: 'z_valence' in x,
line.split(' ')))[0]
return float(line.split('=')[-1].strip().strip('"'))
else:
raise ValueError('Valence missing in {}'.format(pseudopotential)) |
Given a UPF pseudopotential file, find the number of valence atoms.
Parameters
----------
pseudopotential: str
Filename of the pseudopotential.
Returns
-------
valence: float
Valence as reported in the pseudopotential.
Raises
------
ValueError
If valence cannot be found in the pseudopotential.
| Given a UPF pseudopotential file, find the number of valence atoms.
Parameters
str
Filename of the pseudopotential.
Returns
float
Valence as reported in the pseudopotential.
Raises
ValueError
If valence cannot be found in the pseudopotential. | [
"Given",
"a",
"UPF",
"pseudopotential",
"file",
"find",
"the",
"number",
"of",
"valence",
"atoms",
".",
"Parameters",
"str",
"Filename",
"of",
"the",
"pseudopotential",
".",
"Returns",
"float",
"Valence",
"as",
"reported",
"in",
"the",
"pseudopotential",
".",
"Raises",
"ValueError",
"If",
"valence",
"cannot",
"be",
"found",
"in",
"the",
"pseudopotential",
"."
] | def grep_valence(pseudopotential):
with open(pseudopotential) as psfile:
for line in psfile:
if 'z valence' in line.lower():
return float(line.split()[0])
elif 'z_valence' in line.lower():
if line.split()[0] == '<PP_HEADER':
line = list(filter(lambda x: 'z_valence' in x,
line.split(' ')))[0]
return float(line.split('=')[-1].strip().strip('"'))
else:
raise ValueError('Valence missing in {}'.format(pseudopotential)) | [
"def",
"grep_valence",
"(",
"pseudopotential",
")",
":",
"with",
"open",
"(",
"pseudopotential",
")",
"as",
"psfile",
":",
"for",
"line",
"in",
"psfile",
":",
"if",
"'z valence'",
"in",
"line",
".",
"lower",
"(",
")",
":",
"return",
"float",
"(",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"elif",
"'z_valence'",
"in",
"line",
".",
"lower",
"(",
")",
":",
"if",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
"==",
"'<PP_HEADER'",
":",
"line",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"'z_valence'",
"in",
"x",
",",
"line",
".",
"split",
"(",
"' '",
")",
")",
")",
"[",
"0",
"]",
"return",
"float",
"(",
"line",
".",
"split",
"(",
"'='",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
".",
"strip",
"(",
"'\"'",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Valence missing in {}'",
".",
"format",
"(",
"pseudopotential",
")",
")"
] | Given a UPF pseudopotential file, find the number of valence atoms. | [
"Given",
"a",
"UPF",
"pseudopotential",
"file",
"find",
"the",
"number",
"of",
"valence",
"atoms",
"."
] | [
"\"\"\"\n Given a UPF pseudopotential file, find the number of valence atoms.\n\n Parameters\n ----------\n pseudopotential: str\n Filename of the pseudopotential.\n\n Returns\n -------\n valence: float\n Valence as reported in the pseudopotential.\n\n Raises\n ------\n ValueError\n If valence cannot be found in the pseudopotential.\n \"\"\"",
"# Example lines",
"# Sr.pbe-spn-rrkjus_psl.1.0.0.UPF: z_valence=\"1.000000000000000E+001\"",
"# C.pbe-n-kjpaw_psl.1.0.0.UPF (new ld1.x):",
"# ...PBC\" z_valence=\"4.000000000000e0\" total_p...",
"# C_ONCV_PBE-1.0.upf: z_valence=\" 4.00\"",
"# Ta_pbe_v1.uspp.F.UPF: 13.00000000000 Z valence"
] | [
{
"param": "pseudopotential",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "pseudopotential",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def grep_valence(pseudopotential):
with open(pseudopotential) as psfile:
for line in psfile:
if 'z valence' in line.lower():
return float(line.split()[0])
elif 'z_valence' in line.lower():
if line.split()[0] == '<PP_HEADER':
line = list(filter(lambda x: 'z_valence' in x,
line.split(' ')))[0]
return float(line.split('=')[-1].strip().strip('"'))
else:
raise ValueError('Valence missing in {}'.format(pseudopotential)) | 260 | 1,003 |
1030ec3eabe95cd3b665ba533cfd51c3c7cea399 | ICTU/document-as-code | python/domain/audit/tools/make_audit_evidence.py | [
"Apache-2.0"
] | Python | preamble | null | def preamble():
"""
stuff required to make audit evidence work
"""
print("import domain.audit.model.audit_evidence as audit_evidence") |
stuff required to make audit evidence work
| stuff required to make audit evidence work | [
"stuff",
"required",
"to",
"make",
"audit",
"evidence",
"work"
] | def preamble():
print("import domain.audit.model.audit_evidence as audit_evidence") | [
"def",
"preamble",
"(",
")",
":",
"print",
"(",
"\"import domain.audit.model.audit_evidence as audit_evidence\"",
")"
] | stuff required to make audit evidence work | [
"stuff",
"required",
"to",
"make",
"audit",
"evidence",
"work"
] | [
"\"\"\"\n stuff required to make audit evidence work\n \"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | def preamble():
print("import domain.audit.model.audit_evidence as audit_evidence") | 263 | 801 |
40463e98efbae968b87bd8bb3737e5f00784f19b | gabrielegilardi/Sorting | Code_Python/Sorting.py | [
"MIT"
] | Python | bubble_sort | null | def bubble_sort(a):
"""
Sorts a list using the bubble sort method.
"""
for n_pass in range(len(a)-1, 0, -1):
for i in range(n_pass):
# Swap elements
if (a[i] > a[i+1]):
a[i+1], a[i] = a[i], a[i+1] |
Sorts a list using the bubble sort method.
| Sorts a list using the bubble sort method. | [
"Sorts",
"a",
"list",
"using",
"the",
"bubble",
"sort",
"method",
"."
] | def bubble_sort(a):
for n_pass in range(len(a)-1, 0, -1):
for i in range(n_pass):
if (a[i] > a[i+1]):
a[i+1], a[i] = a[i], a[i+1] | [
"def",
"bubble_sort",
"(",
"a",
")",
":",
"for",
"n_pass",
"in",
"range",
"(",
"len",
"(",
"a",
")",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"for",
"i",
"in",
"range",
"(",
"n_pass",
")",
":",
"if",
"(",
"a",
"[",
"i",
"]",
">",
"a",
"[",
"i",
"+",
"1",
"]",
")",
":",
"a",
"[",
"i",
"+",
"1",
"]",
",",
"a",
"[",
"i",
"]",
"=",
"a",
"[",
"i",
"]",
",",
"a",
"[",
"i",
"+",
"1",
"]"
] | Sorts a list using the bubble sort method. | [
"Sorts",
"a",
"list",
"using",
"the",
"bubble",
"sort",
"method",
"."
] | [
"\"\"\"\n Sorts a list using the bubble sort method.\n \"\"\"",
"# Swap elements"
] | [
{
"param": "a",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "a",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def bubble_sort(a):
for n_pass in range(len(a)-1, 0, -1):
for i in range(n_pass):
if (a[i] > a[i+1]):
a[i+1], a[i] = a[i], a[i+1] | 265 | 365 |
37143e425cb914560fd0642087f03fbace1f73d6 | detly/flaskery | flaskery/switches/switches.py | [
"MIT"
] | Python | format_bytes | <not_specific> | def format_bytes(data):
"""
Converts a sequence of bytes to a formatted string, where each byte is
represented as hexadecimal.
"""
return ' '.join('{:02X}'.format(byte) for byte in data) |
Converts a sequence of bytes to a formatted string, where each byte is
represented as hexadecimal.
| Converts a sequence of bytes to a formatted string, where each byte is
represented as hexadecimal. | [
"Converts",
"a",
"sequence",
"of",
"bytes",
"to",
"a",
"formatted",
"string",
"where",
"each",
"byte",
"is",
"represented",
"as",
"hexadecimal",
"."
] | def format_bytes(data):
return ' '.join('{:02X}'.format(byte) for byte in data) | [
"def",
"format_bytes",
"(",
"data",
")",
":",
"return",
"' '",
".",
"join",
"(",
"'{:02X}'",
".",
"format",
"(",
"byte",
")",
"for",
"byte",
"in",
"data",
")"
] | Converts a sequence of bytes to a formatted string, where each byte is
represented as hexadecimal. | [
"Converts",
"a",
"sequence",
"of",
"bytes",
"to",
"a",
"formatted",
"string",
"where",
"each",
"byte",
"is",
"represented",
"as",
"hexadecimal",
"."
] | [
"\"\"\"\n Converts a sequence of bytes to a formatted string, where each byte is\n represented as hexadecimal.\n \"\"\""
] | [
{
"param": "data",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def format_bytes(data):
return ' '.join('{:02X}'.format(byte) for byte in data) | 266 | 565 |
afeedede2ef494da95276aeda9381a5bd80b036b | spyysalo/bionlp_st_2011_supporting | extension/tools/subset.py | [
"ISC"
] | Python | equiv_referenced_ids | <not_specific> | def equiv_referenced_ids(ann):
"""
Given a line with an Equiv annotation, returns a collection
containing the IDs referred to.
"""
fields = ann.split("\t")
if len(fields) < 2:
return []
args = fields[1].split(" ")
return args[1:] |
Given a line with an Equiv annotation, returns a collection
containing the IDs referred to.
| Given a line with an Equiv annotation, returns a collection
containing the IDs referred to. | [
"Given",
"a",
"line",
"with",
"an",
"Equiv",
"annotation",
"returns",
"a",
"collection",
"containing",
"the",
"IDs",
"referred",
"to",
"."
] | def equiv_referenced_ids(ann):
fields = ann.split("\t")
if len(fields) < 2:
return []
args = fields[1].split(" ")
return args[1:] | [
"def",
"equiv_referenced_ids",
"(",
"ann",
")",
":",
"fields",
"=",
"ann",
".",
"split",
"(",
"\"\\t\"",
")",
"if",
"len",
"(",
"fields",
")",
"<",
"2",
":",
"return",
"[",
"]",
"args",
"=",
"fields",
"[",
"1",
"]",
".",
"split",
"(",
"\" \"",
")",
"return",
"args",
"[",
"1",
":",
"]"
] | Given a line with an Equiv annotation, returns a collection
containing the IDs referred to. | [
"Given",
"a",
"line",
"with",
"an",
"Equiv",
"annotation",
"returns",
"a",
"collection",
"containing",
"the",
"IDs",
"referred",
"to",
"."
] | [
"\"\"\"\n Given a line with an Equiv annotation, returns a collection\n containing the IDs referred to.\n \"\"\""
] | [
{
"param": "ann",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "ann",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def equiv_referenced_ids(ann):
fields = ann.split("\t")
if len(fields) < 2:
return []
args = fields[1].split(" ")
return args[1:] | 267 | 132 |
db30533ea5eebb3a32278dda45151c22d3942b6e | saurabhacellere/subversion | subversion/tests/cmdline/mod_dav_svn_tests.py | [
"Apache-2.0"
] | Python | compare | <not_specific> | def compare(lhs, rhs):
"""Implements cmp() for Python 2 and 3 alike"""
if lhs == None:
if rhs == None:
return 0
else:
return -1
else:
if rhs == None:
return 1
else:
return (lhs > rhs) - (lhs < rhs) | Implements cmp() for Python 2 and 3 alike | Implements cmp() for Python 2 and 3 alike | [
"Implements",
"cmp",
"()",
"for",
"Python",
"2",
"and",
"3",
"alike"
] | def compare(lhs, rhs):
if lhs == None:
if rhs == None:
return 0
else:
return -1
else:
if rhs == None:
return 1
else:
return (lhs > rhs) - (lhs < rhs) | [
"def",
"compare",
"(",
"lhs",
",",
"rhs",
")",
":",
"if",
"lhs",
"==",
"None",
":",
"if",
"rhs",
"==",
"None",
":",
"return",
"0",
"else",
":",
"return",
"-",
"1",
"else",
":",
"if",
"rhs",
"==",
"None",
":",
"return",
"1",
"else",
":",
"return",
"(",
"lhs",
">",
"rhs",
")",
"-",
"(",
"lhs",
"<",
"rhs",
")"
] | Implements cmp() for Python 2 and 3 alike | [
"Implements",
"cmp",
"()",
"for",
"Python",
"2",
"and",
"3",
"alike"
] | [
"\"\"\"Implements cmp() for Python 2 and 3 alike\"\"\""
] | [
{
"param": "lhs",
"type": null
},
{
"param": "rhs",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "lhs",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "rhs",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def compare(lhs, rhs):
if lhs == None:
if rhs == None:
return 0
else:
return -1
else:
if rhs == None:
return 1
else:
return (lhs > rhs) - (lhs < rhs) | 268 | 62 |
4e40acacb5c3e4453b6df498e2c8f8cffa75e8f2 | ricklentz/open_model_zoo | tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/_nlp_common.py | [
"Apache-2.0"
] | Python | _is_whitespace | <not_specific> | def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char in [" ", "\t", "\n", "\r"]:
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False | Checks whether `chars` is a whitespace character. | Checks whether `chars` is a whitespace character. | [
"Checks",
"whether",
"`",
"chars",
"`",
"is",
"a",
"whitespace",
"character",
"."
] | def _is_whitespace(char):
if char in [" ", "\t", "\n", "\r"]:
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False | [
"def",
"_is_whitespace",
"(",
"char",
")",
":",
"if",
"char",
"in",
"[",
"\" \"",
",",
"\"\\t\"",
",",
"\"\\n\"",
",",
"\"\\r\"",
"]",
":",
"return",
"True",
"cat",
"=",
"unicodedata",
".",
"category",
"(",
"char",
")",
"if",
"cat",
"==",
"\"Zs\"",
":",
"return",
"True",
"return",
"False"
] | Checks whether `chars` is a whitespace character. | [
"Checks",
"whether",
"`",
"chars",
"`",
"is",
"a",
"whitespace",
"character",
"."
] | [
"\"\"\"Checks whether `chars` is a whitespace character.\"\"\"",
"# \\t, \\n, and \\r are technically control characters but we treat them",
"# as whitespace since they are generally considered as such."
] | [
{
"param": "char",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "char",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import unicodedata
def _is_whitespace(char):
if char in [" ", "\t", "\n", "\r"]:
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False | 269 | 524 |
42284c90ba4d2f741e641db9dae9884bfa233b0d | deephdc/audio-classification-tf | audioclas/misc.py | [
"MIT"
] | Python | open_compressed | <not_specific> | def open_compressed(byte_stream, file_format, output_folder):
"""
Extract and save a stream of bytes of a compressed file from memory.
Parameters
----------
byte_stream : BinaryIO
file_format : str
Compatible file formats: tarballs, zip files
output_folder : str
Folder to extract the stream
Returns
-------
Folder name of the extracted files.
"""
print('Decompressing the file ...')
tar_extensions = ['tar', 'bz2', 'tb2', 'tbz', 'tbz2', 'gz', 'tgz', 'lz', 'lzma', 'tlz', 'xz', 'txz', 'Z', 'tZ']
if file_format in tar_extensions:
tar = tarfile.open(mode="r:{}".format(file_format), fileobj=byte_stream)
tar.extractall(output_folder)
folder_name = tar.getnames()[0]
return os.path.join(output_folder, folder_name)
elif file_format == 'zip':
zf = zipfile.ZipFile(byte_stream)
zf.extractall(output_folder)
# folder_name = zf.namelist()[0].split('/')[0]
# return os.path.join(output_folder, folder_name)
else:
raise ValueError('Invalid file format for the compressed byte_stream') |
Extract and save a stream of bytes of a compressed file from memory.
Parameters
----------
byte_stream : BinaryIO
file_format : str
Compatible file formats: tarballs, zip files
output_folder : str
Folder to extract the stream
Returns
-------
Folder name of the extracted files.
| Extract and save a stream of bytes of a compressed file from memory.
Parameters
byte_stream : BinaryIO
file_format : str
Compatible file formats: tarballs, zip files
output_folder : str
Folder to extract the stream
Returns
Folder name of the extracted files. | [
"Extract",
"and",
"save",
"a",
"stream",
"of",
"bytes",
"of",
"a",
"compressed",
"file",
"from",
"memory",
".",
"Parameters",
"byte_stream",
":",
"BinaryIO",
"file_format",
":",
"str",
"Compatible",
"file",
"formats",
":",
"tarballs",
"zip",
"files",
"output_folder",
":",
"str",
"Folder",
"to",
"extract",
"the",
"stream",
"Returns",
"Folder",
"name",
"of",
"the",
"extracted",
"files",
"."
] | def open_compressed(byte_stream, file_format, output_folder):
print('Decompressing the file ...')
tar_extensions = ['tar', 'bz2', 'tb2', 'tbz', 'tbz2', 'gz', 'tgz', 'lz', 'lzma', 'tlz', 'xz', 'txz', 'Z', 'tZ']
if file_format in tar_extensions:
tar = tarfile.open(mode="r:{}".format(file_format), fileobj=byte_stream)
tar.extractall(output_folder)
folder_name = tar.getnames()[0]
return os.path.join(output_folder, folder_name)
elif file_format == 'zip':
zf = zipfile.ZipFile(byte_stream)
zf.extractall(output_folder)
else:
raise ValueError('Invalid file format for the compressed byte_stream') | [
"def",
"open_compressed",
"(",
"byte_stream",
",",
"file_format",
",",
"output_folder",
")",
":",
"print",
"(",
"'Decompressing the file ...'",
")",
"tar_extensions",
"=",
"[",
"'tar'",
",",
"'bz2'",
",",
"'tb2'",
",",
"'tbz'",
",",
"'tbz2'",
",",
"'gz'",
",",
"'tgz'",
",",
"'lz'",
",",
"'lzma'",
",",
"'tlz'",
",",
"'xz'",
",",
"'txz'",
",",
"'Z'",
",",
"'tZ'",
"]",
"if",
"file_format",
"in",
"tar_extensions",
":",
"tar",
"=",
"tarfile",
".",
"open",
"(",
"mode",
"=",
"\"r:{}\"",
".",
"format",
"(",
"file_format",
")",
",",
"fileobj",
"=",
"byte_stream",
")",
"tar",
".",
"extractall",
"(",
"output_folder",
")",
"folder_name",
"=",
"tar",
".",
"getnames",
"(",
")",
"[",
"0",
"]",
"return",
"os",
".",
"path",
".",
"join",
"(",
"output_folder",
",",
"folder_name",
")",
"elif",
"file_format",
"==",
"'zip'",
":",
"zf",
"=",
"zipfile",
".",
"ZipFile",
"(",
"byte_stream",
")",
"zf",
".",
"extractall",
"(",
"output_folder",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid file format for the compressed byte_stream'",
")"
] | Extract and save a stream of bytes of a compressed file from memory. | [
"Extract",
"and",
"save",
"a",
"stream",
"of",
"bytes",
"of",
"a",
"compressed",
"file",
"from",
"memory",
"."
] | [
"\"\"\"\n Extract and save a stream of bytes of a compressed file from memory.\n\n Parameters\n ----------\n byte_stream : BinaryIO\n file_format : str\n Compatible file formats: tarballs, zip files\n output_folder : str\n Folder to extract the stream\n\n Returns\n -------\n Folder name of the extracted files.\n \"\"\"",
"# folder_name = zf.namelist()[0].split('/')[0]",
"# return os.path.join(output_folder, folder_name)"
] | [
{
"param": "byte_stream",
"type": null
},
{
"param": "file_format",
"type": null
},
{
"param": "output_folder",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "byte_stream",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "file_format",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "output_folder",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import zipfile
import tarfile
import os
def open_compressed(byte_stream, file_format, output_folder):
print('Decompressing the file ...')
tar_extensions = ['tar', 'bz2', 'tb2', 'tbz', 'tbz2', 'gz', 'tgz', 'lz', 'lzma', 'tlz', 'xz', 'txz', 'Z', 'tZ']
if file_format in tar_extensions:
tar = tarfile.open(mode="r:{}".format(file_format), fileobj=byte_stream)
tar.extractall(output_folder)
folder_name = tar.getnames()[0]
return os.path.join(output_folder, folder_name)
elif file_format == 'zip':
zf = zipfile.ZipFile(byte_stream)
zf.extractall(output_folder)
else:
raise ValueError('Invalid file format for the compressed byte_stream') | 270 | 548 |
f878a9062a0116a0e84170ddd70f23523feeb5e8 | machawk1/cocrawler | tests/warc/warc-diff.py | [
"Apache-2.0"
] | Python | munge | <not_specific> | def munge(s):
'''
Remove things known to differ in WARC files:
uuids
WARC-Date: headers
'''
out = ''
for line in s.split('\n'):
if ':uuid:' in line:
line, _, _ = line.partition(':uuid:')
elif line.startswith('WARC-Date:'):
line = 'WARC-Date:'
out += line
return out |
Remove things known to differ in WARC files:
uuids
WARC-Date: headers
| Remove things known to differ in WARC files:
uuids
WARC-Date: headers | [
"Remove",
"things",
"known",
"to",
"differ",
"in",
"WARC",
"files",
":",
"uuids",
"WARC",
"-",
"Date",
":",
"headers"
] | def munge(s):
out = ''
for line in s.split('\n'):
if ':uuid:' in line:
line, _, _ = line.partition(':uuid:')
elif line.startswith('WARC-Date:'):
line = 'WARC-Date:'
out += line
return out | [
"def",
"munge",
"(",
"s",
")",
":",
"out",
"=",
"''",
"for",
"line",
"in",
"s",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"':uuid:'",
"in",
"line",
":",
"line",
",",
"_",
",",
"_",
"=",
"line",
".",
"partition",
"(",
"':uuid:'",
")",
"elif",
"line",
".",
"startswith",
"(",
"'WARC-Date:'",
")",
":",
"line",
"=",
"'WARC-Date:'",
"out",
"+=",
"line",
"return",
"out"
] | Remove things known to differ in WARC files:
uuids
WARC-Date: headers | [
"Remove",
"things",
"known",
"to",
"differ",
"in",
"WARC",
"files",
":",
"uuids",
"WARC",
"-",
"Date",
":",
"headers"
] | [
"'''\n Remove things known to differ in WARC files:\n uuids\n WARC-Date: headers\n '''"
] | [
{
"param": "s",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "s",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def munge(s):
out = ''
for line in s.split('\n'):
if ':uuid:' in line:
line, _, _ = line.partition(':uuid:')
elif line.startswith('WARC-Date:'):
line = 'WARC-Date:'
out += line
return out | 271 | 545 |
52419393e616733a9793920892f60210ef302979 | THU-DA-6D-Pose-Group/self6dpp | lib/dr_utils/rep/helpers.py | [
"Apache-2.0"
] | Python | _assert_dim_eq | null | def _assert_dim_eq(inp, tgt):
"""Asserts that the number of dims in inp is exactly equal to the value
sepecified in tgt.
Args:
inp (torch.Tensor): Input tensor, whose number of dimensions is
to be compared.
tgt (int): Value which the number of dims of inp should equal.
"""
if inp.dim() != tgt:
raise ValueError("Expected input to contain exactly {0} dims. " "Got {1} instead.".format(tgt, inp.dim())) | Asserts that the number of dims in inp is exactly equal to the value
sepecified in tgt.
Args:
inp (torch.Tensor): Input tensor, whose number of dimensions is
to be compared.
tgt (int): Value which the number of dims of inp should equal.
| Asserts that the number of dims in inp is exactly equal to the value
sepecified in tgt. | [
"Asserts",
"that",
"the",
"number",
"of",
"dims",
"in",
"inp",
"is",
"exactly",
"equal",
"to",
"the",
"value",
"sepecified",
"in",
"tgt",
"."
] | def _assert_dim_eq(inp, tgt):
if inp.dim() != tgt:
raise ValueError("Expected input to contain exactly {0} dims. " "Got {1} instead.".format(tgt, inp.dim())) | [
"def",
"_assert_dim_eq",
"(",
"inp",
",",
"tgt",
")",
":",
"if",
"inp",
".",
"dim",
"(",
")",
"!=",
"tgt",
":",
"raise",
"ValueError",
"(",
"\"Expected input to contain exactly {0} dims. \"",
"\"Got {1} instead.\"",
".",
"format",
"(",
"tgt",
",",
"inp",
".",
"dim",
"(",
")",
")",
")"
] | Asserts that the number of dims in inp is exactly equal to the value
sepecified in tgt. | [
"Asserts",
"that",
"the",
"number",
"of",
"dims",
"in",
"inp",
"is",
"exactly",
"equal",
"to",
"the",
"value",
"sepecified",
"in",
"tgt",
"."
] | [
"\"\"\"Asserts that the number of dims in inp is exactly equal to the value\n sepecified in tgt.\n\n Args:\n inp (torch.Tensor): Input tensor, whose number of dimensions is\n to be compared.\n tgt (int): Value which the number of dims of inp should equal.\n \"\"\""
] | [
{
"param": "inp",
"type": null
},
{
"param": "tgt",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "inp",
"type": null,
"docstring": "Input tensor, whose number of dimensions is\nto be compared.",
"docstring_tokens": [
"Input",
"tensor",
"whose",
"number",
"of",
"dimensions",
"is",
"to",
"be",
"compared",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "tgt",
"type": null,
"docstring": "Value which the number of dims of inp should equal.",
"docstring_tokens": [
"Value",
"which",
"the",
"number",
"of",
"dims",
"of",
"inp",
"should",
"equal",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
} | def _assert_dim_eq(inp, tgt):
if inp.dim() != tgt:
raise ValueError("Expected input to contain exactly {0} dims. " "Got {1} instead.".format(tgt, inp.dim())) | 273 | 813 |
2353cefd7c64e1d34edadc5874cce0bee064b11b | YoungYoung619/tf_utils | nps/ops.py | [
"Apache-2.0"
] | Python | assign | <not_specific> | def assign(input, position, value):
"""This function is used to assign the value to the input at the designated position.
Args:
input: An input array with any shape
position: the position where you wanna assign the value, must be a list or ndarray and have
the corresponding dimension with input.
value: the value which you wanna assign, a list or ndarray which must has the same length
with position or length 1.
"""
def modifyValue(inputData, pos, val, k=0):
"""A recursion function which is used to change the input value at designated position
"""
if len(inputData.shape) == 1: ## stop condition
inputData[pos[k]] = val
else:
newInputData = inputData[pos[k]]
k = k + 1
modifyValue(newInputData, pos, val, k)
data = copy.deepcopy(input) ## copy a new from input
if len(value)==1:
for pos in position:
modifyValue(data,pos,value[0])
else:
for pos,val in zip(position,value):
modifyValue(data,pos,val)
return data | This function is used to assign the value to the input at the designated position.
Args:
input: An input array with any shape
position: the position where you wanna assign the value, must be a list or ndarray and have
the corresponding dimension with input.
value: the value which you wanna assign, a list or ndarray which must has the same length
with position or length 1.
| This function is used to assign the value to the input at the designated position. | [
"This",
"function",
"is",
"used",
"to",
"assign",
"the",
"value",
"to",
"the",
"input",
"at",
"the",
"designated",
"position",
"."
] | def assign(input, position, value):
def modifyValue(inputData, pos, val, k=0):
if len(inputData.shape) == 1:
inputData[pos[k]] = val
else:
newInputData = inputData[pos[k]]
k = k + 1
modifyValue(newInputData, pos, val, k)
data = copy.deepcopy(input)
if len(value)==1:
for pos in position:
modifyValue(data,pos,value[0])
else:
for pos,val in zip(position,value):
modifyValue(data,pos,val)
return data | [
"def",
"assign",
"(",
"input",
",",
"position",
",",
"value",
")",
":",
"def",
"modifyValue",
"(",
"inputData",
",",
"pos",
",",
"val",
",",
"k",
"=",
"0",
")",
":",
"\"\"\"A recursion function which is used to change the input value at designated position\n \"\"\"",
"if",
"len",
"(",
"inputData",
".",
"shape",
")",
"==",
"1",
":",
"inputData",
"[",
"pos",
"[",
"k",
"]",
"]",
"=",
"val",
"else",
":",
"newInputData",
"=",
"inputData",
"[",
"pos",
"[",
"k",
"]",
"]",
"k",
"=",
"k",
"+",
"1",
"modifyValue",
"(",
"newInputData",
",",
"pos",
",",
"val",
",",
"k",
")",
"data",
"=",
"copy",
".",
"deepcopy",
"(",
"input",
")",
"if",
"len",
"(",
"value",
")",
"==",
"1",
":",
"for",
"pos",
"in",
"position",
":",
"modifyValue",
"(",
"data",
",",
"pos",
",",
"value",
"[",
"0",
"]",
")",
"else",
":",
"for",
"pos",
",",
"val",
"in",
"zip",
"(",
"position",
",",
"value",
")",
":",
"modifyValue",
"(",
"data",
",",
"pos",
",",
"val",
")",
"return",
"data"
] | This function is used to assign the value to the input at the designated position. | [
"This",
"function",
"is",
"used",
"to",
"assign",
"the",
"value",
"to",
"the",
"input",
"at",
"the",
"designated",
"position",
"."
] | [
"\"\"\"This function is used to assign the value to the input at the designated position.\n\n Args:\n input: An input array with any shape\n position: the position where you wanna assign the value, must be a list or ndarray and have\n the corresponding dimension with input.\n value: the value which you wanna assign, a list or ndarray which must has the same length\n with position or length 1.\n \"\"\"",
"\"\"\"A recursion function which is used to change the input value at designated position\n \"\"\"",
"## stop condition",
"## copy a new from input"
] | [
{
"param": "input",
"type": null
},
{
"param": "position",
"type": null
},
{
"param": "value",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "input",
"type": null,
"docstring": "An input array with any shape",
"docstring_tokens": [
"An",
"input",
"array",
"with",
"any",
"shape"
],
"default": null,
"is_optional": null
},
{
"identifier": "position",
"type": null,
"docstring": "the position where you wanna assign the value, must be a list or ndarray and have\nthe corresponding dimension with input.",
"docstring_tokens": [
"the",
"position",
"where",
"you",
"wanna",
"assign",
"the",
"value",
"must",
"be",
"a",
"list",
"or",
"ndarray",
"and",
"have",
"the",
"corresponding",
"dimension",
"with",
"input",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "value",
"type": null,
"docstring": "the value which you wanna assign, a list or ndarray which must has the same length\nwith position or length 1.",
"docstring_tokens": [
"the",
"value",
"which",
"you",
"wanna",
"assign",
"a",
"list",
"or",
"ndarray",
"which",
"must",
"has",
"the",
"same",
"length",
"with",
"position",
"or",
"length",
"1",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import copy
def assign(input, position, value):
def modifyValue(inputData, pos, val, k=0):
if len(inputData.shape) == 1:
inputData[pos[k]] = val
else:
newInputData = inputData[pos[k]]
k = k + 1
modifyValue(newInputData, pos, val, k)
data = copy.deepcopy(input)
if len(value)==1:
for pos in position:
modifyValue(data,pos,value[0])
else:
for pos,val in zip(position,value):
modifyValue(data,pos,val)
return data | 274 | 92 |
2a01d76e0bdd9aac6668e62b2d97df7cdbe19fb5 | benchislett/Skunkworks | Python3/Conv-Autoencoder/train.py | [
"MIT"
] | Python | train_batch | <not_specific> | def train_batch(model, loss_fn, optimizer, batch):
"""Train the model on a single batch of data,
and return the total loss
"""
optimizer.zero_grad()
x = batch[0].cuda()
pred = model(x)
loss_batch = loss_fn(pred, x)
loss_batch.backward()
optimizer.step()
return loss_batch.item() | Train the model on a single batch of data,
and return the total loss
| Train the model on a single batch of data,
and return the total loss | [
"Train",
"the",
"model",
"on",
"a",
"single",
"batch",
"of",
"data",
"and",
"return",
"the",
"total",
"loss"
] | def train_batch(model, loss_fn, optimizer, batch):
optimizer.zero_grad()
x = batch[0].cuda()
pred = model(x)
loss_batch = loss_fn(pred, x)
loss_batch.backward()
optimizer.step()
return loss_batch.item() | [
"def",
"train_batch",
"(",
"model",
",",
"loss_fn",
",",
"optimizer",
",",
"batch",
")",
":",
"optimizer",
".",
"zero_grad",
"(",
")",
"x",
"=",
"batch",
"[",
"0",
"]",
".",
"cuda",
"(",
")",
"pred",
"=",
"model",
"(",
"x",
")",
"loss_batch",
"=",
"loss_fn",
"(",
"pred",
",",
"x",
")",
"loss_batch",
".",
"backward",
"(",
")",
"optimizer",
".",
"step",
"(",
")",
"return",
"loss_batch",
".",
"item",
"(",
")"
] | Train the model on a single batch of data,
and return the total loss | [
"Train",
"the",
"model",
"on",
"a",
"single",
"batch",
"of",
"data",
"and",
"return",
"the",
"total",
"loss"
] | [
"\"\"\"Train the model on a single batch of data,\n and return the total loss\n\n \"\"\""
] | [
{
"param": "model",
"type": null
},
{
"param": "loss_fn",
"type": null
},
{
"param": "optimizer",
"type": null
},
{
"param": "batch",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "model",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "loss_fn",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "optimizer",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "batch",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def train_batch(model, loss_fn, optimizer, batch):
optimizer.zero_grad()
x = batch[0].cuda()
pred = model(x)
loss_batch = loss_fn(pred, x)
loss_batch.backward()
optimizer.step()
return loss_batch.item() | 276 | 390 |
c4f2a3752d6968e37136bc840962a066d41a8eb7 | markkorput/pycfgr | tests/test_json.py | [
"MIT"
] | Python | cfgr | null | def cfgr(builder):
"""
by default, the Runtime.add_type(<Type>) method will look for a static cfgr method.
"""
builder.addInput('name').string(lambda v,obj: obj.setName(v))
#builder.addInput('reset').apply(lambda inp,obj: inp.event.subscribe(obj.reset))
builder.addInput('reset').signal_to_method(lambda obj: obj.reset)
builder.addOutput('afterReset').from_event(lambda obj: obj.afterResetEvent)
builder.addInput('parent').object(lambda v,obj: obj.setParent(v)) |
by default, the Runtime.add_type(<Type>) method will look for a static cfgr method.
| by default, the Runtime.add_type() method will look for a static cfgr method. | [
"by",
"default",
"the",
"Runtime",
".",
"add_type",
"()",
"method",
"will",
"look",
"for",
"a",
"static",
"cfgr",
"method",
"."
] | def cfgr(builder):
builder.addInput('name').string(lambda v,obj: obj.setName(v))
builder.addInput('reset').signal_to_method(lambda obj: obj.reset)
builder.addOutput('afterReset').from_event(lambda obj: obj.afterResetEvent)
builder.addInput('parent').object(lambda v,obj: obj.setParent(v)) | [
"def",
"cfgr",
"(",
"builder",
")",
":",
"builder",
".",
"addInput",
"(",
"'name'",
")",
".",
"string",
"(",
"lambda",
"v",
",",
"obj",
":",
"obj",
".",
"setName",
"(",
"v",
")",
")",
"builder",
".",
"addInput",
"(",
"'reset'",
")",
".",
"signal_to_method",
"(",
"lambda",
"obj",
":",
"obj",
".",
"reset",
")",
"builder",
".",
"addOutput",
"(",
"'afterReset'",
")",
".",
"from_event",
"(",
"lambda",
"obj",
":",
"obj",
".",
"afterResetEvent",
")",
"builder",
".",
"addInput",
"(",
"'parent'",
")",
".",
"object",
"(",
"lambda",
"v",
",",
"obj",
":",
"obj",
".",
"setParent",
"(",
"v",
")",
")"
] | by default, the Runtime.add_type(<Type>) method will look for a static cfgr method. | [
"by",
"default",
"the",
"Runtime",
".",
"add_type",
"(",
"<Type",
">",
")",
"method",
"will",
"look",
"for",
"a",
"static",
"cfgr",
"method",
"."
] | [
"\"\"\"\n by default, the Runtime.add_type(<Type>) method will look for a static cfgr method.\n \"\"\"",
"#builder.addInput('reset').apply(lambda inp,obj: inp.event.subscribe(obj.reset))"
] | [
{
"param": "builder",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "builder",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def cfgr(builder):
builder.addInput('name').string(lambda v,obj: obj.setName(v))
builder.addInput('reset').signal_to_method(lambda obj: obj.reset)
builder.addOutput('afterReset').from_event(lambda obj: obj.afterResetEvent)
builder.addInput('parent').object(lambda v,obj: obj.setParent(v)) | 277 | 394 |
15fa73cd9ab890b31fd31a3a4e02a4af263c1adf | atlas-forward-calorimeter/noise | read.py | [
"MIT"
] | Python | _to_byte | <not_specific> | def _to_byte(byte):
"""Make sure an object really represents an integer from 0 to 255,
and return the integer.
"""
byte = float(byte)
assert byte.is_integer(), f"Got a non-integer byte: {byte}!"
byte = int(byte)
assert byte >= 0, f"Got a negative value for a byte: {byte}!"
assert byte <= 255, f"Got a byte value bigger than 255: {byte}!"
return byte | Make sure an object really represents an integer from 0 to 255,
and return the integer.
| Make sure an object really represents an integer from 0 to 255,
and return the integer. | [
"Make",
"sure",
"an",
"object",
"really",
"represents",
"an",
"integer",
"from",
"0",
"to",
"255",
"and",
"return",
"the",
"integer",
"."
] | def _to_byte(byte):
byte = float(byte)
assert byte.is_integer(), f"Got a non-integer byte: {byte}!"
byte = int(byte)
assert byte >= 0, f"Got a negative value for a byte: {byte}!"
assert byte <= 255, f"Got a byte value bigger than 255: {byte}!"
return byte | [
"def",
"_to_byte",
"(",
"byte",
")",
":",
"byte",
"=",
"float",
"(",
"byte",
")",
"assert",
"byte",
".",
"is_integer",
"(",
")",
",",
"f\"Got a non-integer byte: {byte}!\"",
"byte",
"=",
"int",
"(",
"byte",
")",
"assert",
"byte",
">=",
"0",
",",
"f\"Got a negative value for a byte: {byte}!\"",
"assert",
"byte",
"<=",
"255",
",",
"f\"Got a byte value bigger than 255: {byte}!\"",
"return",
"byte"
] | Make sure an object really represents an integer from 0 to 255,
and return the integer. | [
"Make",
"sure",
"an",
"object",
"really",
"represents",
"an",
"integer",
"from",
"0",
"to",
"255",
"and",
"return",
"the",
"integer",
"."
] | [
"\"\"\"Make sure an object really represents an integer from 0 to 255,\n and return the integer.\n \"\"\""
] | [
{
"param": "byte",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "byte",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _to_byte(byte):
byte = float(byte)
assert byte.is_integer(), f"Got a non-integer byte: {byte}!"
byte = int(byte)
assert byte >= 0, f"Got a negative value for a byte: {byte}!"
assert byte <= 255, f"Got a byte value bigger than 255: {byte}!"
return byte | 278 | 813 |
4a9a9842ac96e8f422cbd64b38278227294b6297 | dunno99/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/util.py | [
"Apache-2.0"
] | Python | find_slot | <not_specific> | def find_slot(cls, ctx):
"""
Finds an empty slot to set a hardware breakpoint.
@see: clear_bp, set_bp
@type ctx: dict( str S{->} int )
@param ctx: Thread context dictionary.
@rtype: int
@return: Slot (debug register) for hardware breakpoint.
"""
Dr7 = ctx['Dr7']
slot = 0
for m in cls.enableMask:
if (Dr7 & m) == 0:
return slot
slot += 1
return None |
Finds an empty slot to set a hardware breakpoint.
@see: clear_bp, set_bp
@type ctx: dict( str S{->} int )
@param ctx: Thread context dictionary.
@rtype: int
@return: Slot (debug register) for hardware breakpoint.
| Finds an empty slot to set a hardware breakpoint. | [
"Finds",
"an",
"empty",
"slot",
"to",
"set",
"a",
"hardware",
"breakpoint",
"."
] | def find_slot(cls, ctx):
Dr7 = ctx['Dr7']
slot = 0
for m in cls.enableMask:
if (Dr7 & m) == 0:
return slot
slot += 1
return None | [
"def",
"find_slot",
"(",
"cls",
",",
"ctx",
")",
":",
"Dr7",
"=",
"ctx",
"[",
"'Dr7'",
"]",
"slot",
"=",
"0",
"for",
"m",
"in",
"cls",
".",
"enableMask",
":",
"if",
"(",
"Dr7",
"&",
"m",
")",
"==",
"0",
":",
"return",
"slot",
"slot",
"+=",
"1",
"return",
"None"
] | Finds an empty slot to set a hardware breakpoint. | [
"Finds",
"an",
"empty",
"slot",
"to",
"set",
"a",
"hardware",
"breakpoint",
"."
] | [
"\"\"\"\n Finds an empty slot to set a hardware breakpoint.\n\n @see: clear_bp, set_bp\n\n @type ctx: dict( str S{->} int )\n @param ctx: Thread context dictionary.\n\n @rtype: int\n @return: Slot (debug register) for hardware breakpoint.\n \"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "ctx",
"type": null
}
] | {
"returns": [
{
"docstring": "Slot (debug register) for hardware breakpoint.",
"docstring_tokens": [
"Slot",
"(",
"debug",
"register",
")",
"for",
"hardware",
"breakpoint",
"."
],
"type": "int"
}
],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "ctx",
"type": null,
"docstring": "Thread context dictionary.",
"docstring_tokens": [
"Thread",
"context",
"dictionary",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": [
{
"identifier": "see",
"docstring": null,
"docstring_tokens": [
"None"
]
}
]
} | def find_slot(cls, ctx):
Dr7 = ctx['Dr7']
slot = 0
for m in cls.enableMask:
if (Dr7 & m) == 0:
return slot
slot += 1
return None | 280 | 356 |
a09f7ab285d85664fe77807ed13c62af7ef6b1dc | karimbahgat/PythonGis | pythongis/vector/data.py | [
"MIT"
] | Python | Name_generator | null | def Name_generator():
"""Used internally for ensuring default data names are unique for each Python session.
TODO: Maybe make private.
"""
i = 1
while True:
yield "Untitled%s" % i
i += 1 | Used internally for ensuring default data names are unique for each Python session.
TODO: Maybe make private.
| Used internally for ensuring default data names are unique for each Python session.
TODO: Maybe make private. | [
"Used",
"internally",
"for",
"ensuring",
"default",
"data",
"names",
"are",
"unique",
"for",
"each",
"Python",
"session",
".",
"TODO",
":",
"Maybe",
"make",
"private",
"."
] | def Name_generator():
i = 1
while True:
yield "Untitled%s" % i
i += 1 | [
"def",
"Name_generator",
"(",
")",
":",
"i",
"=",
"1",
"while",
"True",
":",
"yield",
"\"Untitled%s\"",
"%",
"i",
"i",
"+=",
"1"
] | Used internally for ensuring default data names are unique for each Python session. | [
"Used",
"internally",
"for",
"ensuring",
"default",
"data",
"names",
"are",
"unique",
"for",
"each",
"Python",
"session",
"."
] | [
"\"\"\"Used internally for ensuring default data names are unique for each Python session.\n TODO: Maybe make private. \n \"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | def Name_generator():
i = 1
while True:
yield "Untitled%s" % i
i += 1 | 282 | 580 |
5fde0a9dbb55ba0fbbdf9a7c7f90eec18b7b5b27 | eho-tacc/episimlab | episimlab/utils/variable.py | [
"BSD-3-Clause"
] | Python | group_dict_by_var | dict | def group_dict_by_var(d: dict) -> dict:
"""Given a dictionary keyed by 2-length tuples, return a dictionary keyed
only by the second element of the tuple.
"""
return {k: d[(proc, k)] for (proc, k) in d} | Given a dictionary keyed by 2-length tuples, return a dictionary keyed
only by the second element of the tuple.
| Given a dictionary keyed by 2-length tuples, return a dictionary keyed
only by the second element of the tuple. | [
"Given",
"a",
"dictionary",
"keyed",
"by",
"2",
"-",
"length",
"tuples",
"return",
"a",
"dictionary",
"keyed",
"only",
"by",
"the",
"second",
"element",
"of",
"the",
"tuple",
"."
] | def group_dict_by_var(d: dict) -> dict:
return {k: d[(proc, k)] for (proc, k) in d} | [
"def",
"group_dict_by_var",
"(",
"d",
":",
"dict",
")",
"->",
"dict",
":",
"return",
"{",
"k",
":",
"d",
"[",
"(",
"proc",
",",
"k",
")",
"]",
"for",
"(",
"proc",
",",
"k",
")",
"in",
"d",
"}"
] | Given a dictionary keyed by 2-length tuples, return a dictionary keyed
only by the second element of the tuple. | [
"Given",
"a",
"dictionary",
"keyed",
"by",
"2",
"-",
"length",
"tuples",
"return",
"a",
"dictionary",
"keyed",
"only",
"by",
"the",
"second",
"element",
"of",
"the",
"tuple",
"."
] | [
"\"\"\"Given a dictionary keyed by 2-length tuples, return a dictionary keyed\n only by the second element of the tuple.\n \"\"\""
] | [
{
"param": "d",
"type": "dict"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "d",
"type": "dict",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def group_dict_by_var(d: dict) -> dict:
return {k: d[(proc, k)] for (proc, k) in d} | 283 | 863 |
55434f4a306721a3af34caa22b67b1942d88c204 | quochuy/hivemind | hive/utils/normalize.py | [
"MIT"
] | Python | rep_log10 | <not_specific> | def rep_log10(rep):
"""Convert raw steemd rep into a UI-ready value centered at 25."""
def _log10(string):
leading_digits = int(string[0:4])
log = math.log10(leading_digits) + 0.00000001
num = len(string) - 1
return num + (log - int(log))
rep = str(rep)
if rep == "0":
return 25
sign = -1 if rep[0] == '-' else 1
if sign < 0:
rep = rep[1:]
out = _log10(rep)
out = max(out - 9, 0) * sign # @ -9, $1 earned is approx magnitude 1
out = (out * 9) + 25 # 9 points per magnitude. center at 25
return round(out, 2) | Convert raw steemd rep into a UI-ready value centered at 25. | Convert raw steemd rep into a UI-ready value centered at 25. | [
"Convert",
"raw",
"steemd",
"rep",
"into",
"a",
"UI",
"-",
"ready",
"value",
"centered",
"at",
"25",
"."
] | def rep_log10(rep):
def _log10(string):
leading_digits = int(string[0:4])
log = math.log10(leading_digits) + 0.00000001
num = len(string) - 1
return num + (log - int(log))
rep = str(rep)
if rep == "0":
return 25
sign = -1 if rep[0] == '-' else 1
if sign < 0:
rep = rep[1:]
out = _log10(rep)
out = max(out - 9, 0) * sign
out = (out * 9) + 25
return round(out, 2) | [
"def",
"rep_log10",
"(",
"rep",
")",
":",
"def",
"_log10",
"(",
"string",
")",
":",
"leading_digits",
"=",
"int",
"(",
"string",
"[",
"0",
":",
"4",
"]",
")",
"log",
"=",
"math",
".",
"log10",
"(",
"leading_digits",
")",
"+",
"0.00000001",
"num",
"=",
"len",
"(",
"string",
")",
"-",
"1",
"return",
"num",
"+",
"(",
"log",
"-",
"int",
"(",
"log",
")",
")",
"rep",
"=",
"str",
"(",
"rep",
")",
"if",
"rep",
"==",
"\"0\"",
":",
"return",
"25",
"sign",
"=",
"-",
"1",
"if",
"rep",
"[",
"0",
"]",
"==",
"'-'",
"else",
"1",
"if",
"sign",
"<",
"0",
":",
"rep",
"=",
"rep",
"[",
"1",
":",
"]",
"out",
"=",
"_log10",
"(",
"rep",
")",
"out",
"=",
"max",
"(",
"out",
"-",
"9",
",",
"0",
")",
"*",
"sign",
"out",
"=",
"(",
"out",
"*",
"9",
")",
"+",
"25",
"return",
"round",
"(",
"out",
",",
"2",
")"
] | Convert raw steemd rep into a UI-ready value centered at 25. | [
"Convert",
"raw",
"steemd",
"rep",
"into",
"a",
"UI",
"-",
"ready",
"value",
"centered",
"at",
"25",
"."
] | [
"\"\"\"Convert raw steemd rep into a UI-ready value centered at 25.\"\"\"",
"# @ -9, $1 earned is approx magnitude 1",
"# 9 points per magnitude. center at 25"
] | [
{
"param": "rep",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "rep",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import math
def rep_log10(rep):
def _log10(string):
leading_digits = int(string[0:4])
log = math.log10(leading_digits) + 0.00000001
num = len(string) - 1
return num + (log - int(log))
rep = str(rep)
if rep == "0":
return 25
sign = -1 if rep[0] == '-' else 1
if sign < 0:
rep = rep[1:]
out = _log10(rep)
out = max(out - 9, 0) * sign
out = (out * 9) + 25
return round(out, 2) | 284 | 750 |
0296b7730d221105d6a32ef9ca5eb3296151dc1c | google/ghost-userspace | experiments/scripts/options.py | [
"Apache-2.0"
] | Python | GetNiceArgs | <not_specific> | def GetNiceArgs(level: int):
"""Returns the command/arguments to set the `nice` level of a new process.
Args:
level: The nice level to set (-20 <= `level` <= 19).
"""
if level < -20 or level > 19:
raise ValueError(
f"The level must be >= -20 and <= 19. The level specified is {level}.")
return ["nice", "-n", str(level)] | Returns the command/arguments to set the `nice` level of a new process.
Args:
level: The nice level to set (-20 <= `level` <= 19).
| Returns the command/arguments to set the `nice` level of a new process. | [
"Returns",
"the",
"command",
"/",
"arguments",
"to",
"set",
"the",
"`",
"nice",
"`",
"level",
"of",
"a",
"new",
"process",
"."
] | def GetNiceArgs(level: int):
if level < -20 or level > 19:
raise ValueError(
f"The level must be >= -20 and <= 19. The level specified is {level}.")
return ["nice", "-n", str(level)] | [
"def",
"GetNiceArgs",
"(",
"level",
":",
"int",
")",
":",
"if",
"level",
"<",
"-",
"20",
"or",
"level",
">",
"19",
":",
"raise",
"ValueError",
"(",
"f\"The level must be >= -20 and <= 19. The level specified is {level}.\"",
")",
"return",
"[",
"\"nice\"",
",",
"\"-n\"",
",",
"str",
"(",
"level",
")",
"]"
] | Returns the command/arguments to set the `nice` level of a new process. | [
"Returns",
"the",
"command",
"/",
"arguments",
"to",
"set",
"the",
"`",
"nice",
"`",
"level",
"of",
"a",
"new",
"process",
"."
] | [
"\"\"\"Returns the command/arguments to set the `nice` level of a new process.\n\n Args:\n level: The nice level to set (-20 <= `level` <= 19).\n \"\"\""
] | [
{
"param": "level",
"type": "int"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "level",
"type": "int",
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def GetNiceArgs(level: int):
if level < -20 or level > 19:
raise ValueError(
f"The level must be >= -20 and <= 19. The level specified is {level}.")
return ["nice", "-n", str(level)] | 285 | 341 |
a1968aba4cc4bcacd944cc1ea4e3cecfe8679083 | ebadkamil/nicos | nicos_ess/utilities/csv_utils.py | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | Python | import_table_from_csv | <not_specific> | def import_table_from_csv(filename):
"""Import tabular data from a csv file.
:param filename: path to csv file
:return: tuple of headers (empty if no headers) and rows
"""
with open(filename, "r") as file:
sniffer = csv.Sniffer()
has_header = sniffer.has_header(file.read(2048))
file.seek(0)
rows = list(csv.reader(file))
if has_header:
return rows[0], rows[1:]
return [], rows | Import tabular data from a csv file.
:param filename: path to csv file
:return: tuple of headers (empty if no headers) and rows
| Import tabular data from a csv file. | [
"Import",
"tabular",
"data",
"from",
"a",
"csv",
"file",
"."
] | def import_table_from_csv(filename):
with open(filename, "r") as file:
sniffer = csv.Sniffer()
has_header = sniffer.has_header(file.read(2048))
file.seek(0)
rows = list(csv.reader(file))
if has_header:
return rows[0], rows[1:]
return [], rows | [
"def",
"import_table_from_csv",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"file",
":",
"sniffer",
"=",
"csv",
".",
"Sniffer",
"(",
")",
"has_header",
"=",
"sniffer",
".",
"has_header",
"(",
"file",
".",
"read",
"(",
"2048",
")",
")",
"file",
".",
"seek",
"(",
"0",
")",
"rows",
"=",
"list",
"(",
"csv",
".",
"reader",
"(",
"file",
")",
")",
"if",
"has_header",
":",
"return",
"rows",
"[",
"0",
"]",
",",
"rows",
"[",
"1",
":",
"]",
"return",
"[",
"]",
",",
"rows"
] | Import tabular data from a csv file. | [
"Import",
"tabular",
"data",
"from",
"a",
"csv",
"file",
"."
] | [
"\"\"\"Import tabular data from a csv file.\n\n :param filename: path to csv file\n :return: tuple of headers (empty if no headers) and rows\n \"\"\""
] | [
{
"param": "filename",
"type": null
}
] | {
"returns": [
{
"docstring": "tuple of headers (empty if no headers) and rows",
"docstring_tokens": [
"tuple",
"of",
"headers",
"(",
"empty",
"if",
"no",
"headers",
")",
"and",
"rows"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "filename",
"type": null,
"docstring": "path to csv file",
"docstring_tokens": [
"path",
"to",
"csv",
"file"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import csv
def import_table_from_csv(filename):
with open(filename, "r") as file:
sniffer = csv.Sniffer()
has_header = sniffer.has_header(file.read(2048))
file.seek(0)
rows = list(csv.reader(file))
if has_header:
return rows[0], rows[1:]
return [], rows | 286 | 224 |
d4440ecf2b493f12a43664f1534a17fc96c4adf5 | wikimedia/user_metrics | user_metrics/etl/aggregator.py | [
"BSD-3-Clause"
] | Python | weighted_rate | <not_specific> | def weighted_rate(iter, **kwargs):
"""
Computes a weighted rate over the elements of the iterator.
"""
def weight_method_default(x):
return 1
weight_idx = kwargs['weight_idx'] if 'weight_idx' in kwargs else 1
val_idx = kwargs['val_idx'] if 'val_idx' in kwargs else 1
weight_method = kwargs['weight_method'] if 'cmp_method' in kwargs else \
weight_method_default
count = 0
total_weight = 0.0
weighted_sum = 0.0
for r in iter.__iter__():
try:
count += 1
weight = weight_method(r[weight_idx])
total_weight += r[weight_idx]
weighted_sum += weight * r[val_idx]
except IndexError:
continue
except TypeError:
continue
if count:
return [count, total_weight, weighted_sum / count]
else:
return [count, total_weight, 0.0] |
Computes a weighted rate over the elements of the iterator.
| Computes a weighted rate over the elements of the iterator. | [
"Computes",
"a",
"weighted",
"rate",
"over",
"the",
"elements",
"of",
"the",
"iterator",
"."
] | def weighted_rate(iter, **kwargs):
def weight_method_default(x):
return 1
weight_idx = kwargs['weight_idx'] if 'weight_idx' in kwargs else 1
val_idx = kwargs['val_idx'] if 'val_idx' in kwargs else 1
weight_method = kwargs['weight_method'] if 'cmp_method' in kwargs else \
weight_method_default
count = 0
total_weight = 0.0
weighted_sum = 0.0
for r in iter.__iter__():
try:
count += 1
weight = weight_method(r[weight_idx])
total_weight += r[weight_idx]
weighted_sum += weight * r[val_idx]
except IndexError:
continue
except TypeError:
continue
if count:
return [count, total_weight, weighted_sum / count]
else:
return [count, total_weight, 0.0] | [
"def",
"weighted_rate",
"(",
"iter",
",",
"**",
"kwargs",
")",
":",
"def",
"weight_method_default",
"(",
"x",
")",
":",
"return",
"1",
"weight_idx",
"=",
"kwargs",
"[",
"'weight_idx'",
"]",
"if",
"'weight_idx'",
"in",
"kwargs",
"else",
"1",
"val_idx",
"=",
"kwargs",
"[",
"'val_idx'",
"]",
"if",
"'val_idx'",
"in",
"kwargs",
"else",
"1",
"weight_method",
"=",
"kwargs",
"[",
"'weight_method'",
"]",
"if",
"'cmp_method'",
"in",
"kwargs",
"else",
"weight_method_default",
"count",
"=",
"0",
"total_weight",
"=",
"0.0",
"weighted_sum",
"=",
"0.0",
"for",
"r",
"in",
"iter",
".",
"__iter__",
"(",
")",
":",
"try",
":",
"count",
"+=",
"1",
"weight",
"=",
"weight_method",
"(",
"r",
"[",
"weight_idx",
"]",
")",
"total_weight",
"+=",
"r",
"[",
"weight_idx",
"]",
"weighted_sum",
"+=",
"weight",
"*",
"r",
"[",
"val_idx",
"]",
"except",
"IndexError",
":",
"continue",
"except",
"TypeError",
":",
"continue",
"if",
"count",
":",
"return",
"[",
"count",
",",
"total_weight",
",",
"weighted_sum",
"/",
"count",
"]",
"else",
":",
"return",
"[",
"count",
",",
"total_weight",
",",
"0.0",
"]"
] | Computes a weighted rate over the elements of the iterator. | [
"Computes",
"a",
"weighted",
"rate",
"over",
"the",
"elements",
"of",
"the",
"iterator",
"."
] | [
"\"\"\"\n Computes a weighted rate over the elements of the iterator.\n \"\"\""
] | [
{
"param": "iter",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "iter",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def weighted_rate(iter, **kwargs):
def weight_method_default(x):
return 1
weight_idx = kwargs['weight_idx'] if 'weight_idx' in kwargs else 1
val_idx = kwargs['val_idx'] if 'val_idx' in kwargs else 1
weight_method = kwargs['weight_method'] if 'cmp_method' in kwargs else \
weight_method_default
count = 0
total_weight = 0.0
weighted_sum = 0.0
for r in iter.__iter__():
try:
count += 1
weight = weight_method(r[weight_idx])
total_weight += r[weight_idx]
weighted_sum += weight * r[val_idx]
except IndexError:
continue
except TypeError:
continue
if count:
return [count, total_weight, weighted_sum / count]
else:
return [count, total_weight, 0.0] | 288 | 272 |
6cc0f76d88f8266436d8e69d67ef3f0bbbfc8643 | rigdenlab/PISACov | pisacov/pisacovmods/contacts.py | [
"BSD-3-Clause"
] | Python | match_maps | <not_specific> | def match_maps(conpredlist,pdbmap):
"""
Obtain several scores for an interface matched contact prediction map
Parameters
----------
conpredlist : Conkit Contact Map
Contact prediction list
pdbmap : str
File path to contact list file containing interface pdb contacts
Returns
-------
matchedmap : Conkit Contact Map
The matched map (pdb interface + contact prediction list).
cnt : int
The number of True Positives.
jaccardindex : float
The Jaccard Index
AvScore : float
The average score of all True Positives.
AccScore : float
The accumulated score of all True Positives.
probability : float
The interface score.
"""
matchedmap = conpredlist.match(pdbmap, add_false_negatives=False,inplace=False)
cnt=0
AccScore=0.0
probability = 1.0
for contact in matchedmap:
if contact.true_positive:
cnt += 1
AccScore += contact.raw_score
probability *= (1.0 - contact.raw_score)
jaccardindex=conpredlist.get_jaccard_index(pdbmap)
probability = 1.0 - probability
if cnt > 0:
AvScore = AccScore / cnt
else:
AvScore=0
return matchedmap, cnt, jaccardindex, AvScore, AccScore, probability |
Obtain several scores for an interface matched contact prediction map
Parameters
----------
conpredlist : Conkit Contact Map
Contact prediction list
pdbmap : str
File path to contact list file containing interface pdb contacts
Returns
-------
matchedmap : Conkit Contact Map
The matched map (pdb interface + contact prediction list).
cnt : int
The number of True Positives.
jaccardindex : float
The Jaccard Index
AvScore : float
The average score of all True Positives.
AccScore : float
The accumulated score of all True Positives.
probability : float
The interface score.
| Obtain several scores for an interface matched contact prediction map
Parameters
conpredlist : Conkit Contact Map
Contact prediction list
pdbmap : str
File path to contact list file containing interface pdb contacts
Returns
matchedmap : Conkit Contact Map
The matched map (pdb interface + contact prediction list).
cnt : int
The number of True Positives.
jaccardindex : float
The Jaccard Index
AvScore : float
The average score of all True Positives.
AccScore : float
The accumulated score of all True Positives.
probability : float
The interface score. | [
"Obtain",
"several",
"scores",
"for",
"an",
"interface",
"matched",
"contact",
"prediction",
"map",
"Parameters",
"conpredlist",
":",
"Conkit",
"Contact",
"Map",
"Contact",
"prediction",
"list",
"pdbmap",
":",
"str",
"File",
"path",
"to",
"contact",
"list",
"file",
"containing",
"interface",
"pdb",
"contacts",
"Returns",
"matchedmap",
":",
"Conkit",
"Contact",
"Map",
"The",
"matched",
"map",
"(",
"pdb",
"interface",
"+",
"contact",
"prediction",
"list",
")",
".",
"cnt",
":",
"int",
"The",
"number",
"of",
"True",
"Positives",
".",
"jaccardindex",
":",
"float",
"The",
"Jaccard",
"Index",
"AvScore",
":",
"float",
"The",
"average",
"score",
"of",
"all",
"True",
"Positives",
".",
"AccScore",
":",
"float",
"The",
"accumulated",
"score",
"of",
"all",
"True",
"Positives",
".",
"probability",
":",
"float",
"The",
"interface",
"score",
"."
] | def match_maps(conpredlist,pdbmap):
matchedmap = conpredlist.match(pdbmap, add_false_negatives=False,inplace=False)
cnt=0
AccScore=0.0
probability = 1.0
for contact in matchedmap:
if contact.true_positive:
cnt += 1
AccScore += contact.raw_score
probability *= (1.0 - contact.raw_score)
jaccardindex=conpredlist.get_jaccard_index(pdbmap)
probability = 1.0 - probability
if cnt > 0:
AvScore = AccScore / cnt
else:
AvScore=0
return matchedmap, cnt, jaccardindex, AvScore, AccScore, probability | [
"def",
"match_maps",
"(",
"conpredlist",
",",
"pdbmap",
")",
":",
"matchedmap",
"=",
"conpredlist",
".",
"match",
"(",
"pdbmap",
",",
"add_false_negatives",
"=",
"False",
",",
"inplace",
"=",
"False",
")",
"cnt",
"=",
"0",
"AccScore",
"=",
"0.0",
"probability",
"=",
"1.0",
"for",
"contact",
"in",
"matchedmap",
":",
"if",
"contact",
".",
"true_positive",
":",
"cnt",
"+=",
"1",
"AccScore",
"+=",
"contact",
".",
"raw_score",
"probability",
"*=",
"(",
"1.0",
"-",
"contact",
".",
"raw_score",
")",
"jaccardindex",
"=",
"conpredlist",
".",
"get_jaccard_index",
"(",
"pdbmap",
")",
"probability",
"=",
"1.0",
"-",
"probability",
"if",
"cnt",
">",
"0",
":",
"AvScore",
"=",
"AccScore",
"/",
"cnt",
"else",
":",
"AvScore",
"=",
"0",
"return",
"matchedmap",
",",
"cnt",
",",
"jaccardindex",
",",
"AvScore",
",",
"AccScore",
",",
"probability"
] | Obtain several scores for an interface matched contact prediction map
Parameters | [
"Obtain",
"several",
"scores",
"for",
"an",
"interface",
"matched",
"contact",
"prediction",
"map",
"Parameters"
] | [
"\"\"\"\n Obtain several scores for an interface matched contact prediction map\n\n Parameters\n ----------\n conpredlist : Conkit Contact Map\n Contact prediction list\n pdbmap : str\n File path to contact list file containing interface pdb contacts\n\n Returns\n -------\n matchedmap : Conkit Contact Map\n The matched map (pdb interface + contact prediction list).\n cnt : int\n The number of True Positives.\n jaccardindex : float\n The Jaccard Index\n AvScore : float\n The average score of all True Positives.\n AccScore : float\n The accumulated score of all True Positives.\n probability : float\n The interface score.\n \"\"\""
] | [
{
"param": "conpredlist",
"type": null
},
{
"param": "pdbmap",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "conpredlist",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "pdbmap",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def match_maps(conpredlist,pdbmap):
matchedmap = conpredlist.match(pdbmap, add_false_negatives=False,inplace=False)
cnt=0
AccScore=0.0
probability = 1.0
for contact in matchedmap:
if contact.true_positive:
cnt += 1
AccScore += contact.raw_score
probability *= (1.0 - contact.raw_score)
jaccardindex=conpredlist.get_jaccard_index(pdbmap)
probability = 1.0 - probability
if cnt > 0:
AvScore = AccScore / cnt
else:
AvScore=0
return matchedmap, cnt, jaccardindex, AvScore, AccScore, probability | 289 | 539 |
76765eb3523e9c7b0a1c75c43d8e26353b67698d | kylewuolle/google-cloud-sdk | lib/googlecloudsdk/command_lib/iot/util.py | [
"Apache-2.0"
] | Python | AddBlockedToRequest | <not_specific> | def AddBlockedToRequest(ref, args, req):
"""Python hook for yaml commands to process the blocked flag."""
del ref
req.device.blocked = args.blocked
return req | Python hook for yaml commands to process the blocked flag. | Python hook for yaml commands to process the blocked flag. | [
"Python",
"hook",
"for",
"yaml",
"commands",
"to",
"process",
"the",
"blocked",
"flag",
"."
] | def AddBlockedToRequest(ref, args, req):
del ref
req.device.blocked = args.blocked
return req | [
"def",
"AddBlockedToRequest",
"(",
"ref",
",",
"args",
",",
"req",
")",
":",
"del",
"ref",
"req",
".",
"device",
".",
"blocked",
"=",
"args",
".",
"blocked",
"return",
"req"
] | Python hook for yaml commands to process the blocked flag. | [
"Python",
"hook",
"for",
"yaml",
"commands",
"to",
"process",
"the",
"blocked",
"flag",
"."
] | [
"\"\"\"Python hook for yaml commands to process the blocked flag.\"\"\""
] | [
{
"param": "ref",
"type": null
},
{
"param": "args",
"type": null
},
{
"param": "req",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "ref",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "args",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "req",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def AddBlockedToRequest(ref, args, req):
del ref
req.device.blocked = args.blocked
return req | 290 | 656 |
5b390248f03640a46eae88716347ee27762d9227 | kjaymiller/sd-police-call-observability | src/load_geo.py | [
"MIT"
] | Python | convert_data | null | def convert_data(json_data):
"""Transforms the geopandas dataframe into a json file to be easily loaded
into elasticsearch. Eland would be better suited for this but currently
does not support geopandas `Point` data.
"""
for row in json.loads(json_data)["features"]:
point = row["properties"]
point["coordinates"] = row['geometry']['coordinates']
yield point | Transforms the geopandas dataframe into a json file to be easily loaded
into elasticsearch. Eland would be better suited for this but currently
does not support geopandas `Point` data.
| Transforms the geopandas dataframe into a json file to be easily loaded
into elasticsearch. Eland would be better suited for this but currently
does not support geopandas `Point` data. | [
"Transforms",
"the",
"geopandas",
"dataframe",
"into",
"a",
"json",
"file",
"to",
"be",
"easily",
"loaded",
"into",
"elasticsearch",
".",
"Eland",
"would",
"be",
"better",
"suited",
"for",
"this",
"but",
"currently",
"does",
"not",
"support",
"geopandas",
"`",
"Point",
"`",
"data",
"."
] | def convert_data(json_data):
for row in json.loads(json_data)["features"]:
point = row["properties"]
point["coordinates"] = row['geometry']['coordinates']
yield point | [
"def",
"convert_data",
"(",
"json_data",
")",
":",
"for",
"row",
"in",
"json",
".",
"loads",
"(",
"json_data",
")",
"[",
"\"features\"",
"]",
":",
"point",
"=",
"row",
"[",
"\"properties\"",
"]",
"point",
"[",
"\"coordinates\"",
"]",
"=",
"row",
"[",
"'geometry'",
"]",
"[",
"'coordinates'",
"]",
"yield",
"point"
] | Transforms the geopandas dataframe into a json file to be easily loaded
into elasticsearch. | [
"Transforms",
"the",
"geopandas",
"dataframe",
"into",
"a",
"json",
"file",
"to",
"be",
"easily",
"loaded",
"into",
"elasticsearch",
"."
] | [
"\"\"\"Transforms the geopandas dataframe into a json file to be easily loaded\n into elasticsearch. Eland would be better suited for this but currently\n does not support geopandas `Point` data.\n \"\"\""
] | [
{
"param": "json_data",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "json_data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import json
def convert_data(json_data):
for row in json.loads(json_data)["features"]:
point = row["properties"]
point["coordinates"] = row['geometry']['coordinates']
yield point | 291 | 359 |
1b5d1ae02bfc9e5f2069483afc1d51d6074eb3c4 | cygniv404/BIS-software | Address_Validator.py | [
"MIT"
] | Python | in_suit3 | <not_specific> | def in_suit3(list, list0):
"""
test 2 suits of street numbers if they have crossed numbers
For example: "22-27" in "21-24"retruns True
:param a: Int of number
:param b: String List of number
:return: boolean
"""
text = list.replace("-", "")
text0 = list0.replace("-", "")
if ("-" in list) and ("-" in list0) and (text.isdigit() is True) and (text0.isdigit() is True):
list1 = list.split("-")
x = int(list1[0])
suit = set()
suit.add(x)
while x < int(list1[len(list1) - 1]):
x += 1
suit.add(x)
suit.add(int(list1[len(list1) - 1]))
list2 = list0.split("-")
y = int(list2[0])
suit0 = set()
suit0.add(y)
while y < int(list2[len(list2) - 1]):
y += 1
suit0.add(y)
suit0.add(int(list2[len(list2) - 1]))
temp = [item for item in suit if item in suit0]
if len(temp) > 0: return True
return False |
test 2 suits of street numbers if they have crossed numbers
For example: "22-27" in "21-24"retruns True
:param a: Int of number
:param b: String List of number
:return: boolean
| test 2 suits of street numbers if they have crossed numbers
For example: "22-27" in "21-24"retruns True | [
"test",
"2",
"suits",
"of",
"street",
"numbers",
"if",
"they",
"have",
"crossed",
"numbers",
"For",
"example",
":",
"\"",
"22",
"-",
"27",
"\"",
"in",
"\"",
"21",
"-",
"24",
"\"",
"retruns",
"True"
] | def in_suit3(list, list0):
text = list.replace("-", "")
text0 = list0.replace("-", "")
if ("-" in list) and ("-" in list0) and (text.isdigit() is True) and (text0.isdigit() is True):
list1 = list.split("-")
x = int(list1[0])
suit = set()
suit.add(x)
while x < int(list1[len(list1) - 1]):
x += 1
suit.add(x)
suit.add(int(list1[len(list1) - 1]))
list2 = list0.split("-")
y = int(list2[0])
suit0 = set()
suit0.add(y)
while y < int(list2[len(list2) - 1]):
y += 1
suit0.add(y)
suit0.add(int(list2[len(list2) - 1]))
temp = [item for item in suit if item in suit0]
if len(temp) > 0: return True
return False | [
"def",
"in_suit3",
"(",
"list",
",",
"list0",
")",
":",
"text",
"=",
"list",
".",
"replace",
"(",
"\"-\"",
",",
"\"\"",
")",
"text0",
"=",
"list0",
".",
"replace",
"(",
"\"-\"",
",",
"\"\"",
")",
"if",
"(",
"\"-\"",
"in",
"list",
")",
"and",
"(",
"\"-\"",
"in",
"list0",
")",
"and",
"(",
"text",
".",
"isdigit",
"(",
")",
"is",
"True",
")",
"and",
"(",
"text0",
".",
"isdigit",
"(",
")",
"is",
"True",
")",
":",
"list1",
"=",
"list",
".",
"split",
"(",
"\"-\"",
")",
"x",
"=",
"int",
"(",
"list1",
"[",
"0",
"]",
")",
"suit",
"=",
"set",
"(",
")",
"suit",
".",
"add",
"(",
"x",
")",
"while",
"x",
"<",
"int",
"(",
"list1",
"[",
"len",
"(",
"list1",
")",
"-",
"1",
"]",
")",
":",
"x",
"+=",
"1",
"suit",
".",
"add",
"(",
"x",
")",
"suit",
".",
"add",
"(",
"int",
"(",
"list1",
"[",
"len",
"(",
"list1",
")",
"-",
"1",
"]",
")",
")",
"list2",
"=",
"list0",
".",
"split",
"(",
"\"-\"",
")",
"y",
"=",
"int",
"(",
"list2",
"[",
"0",
"]",
")",
"suit0",
"=",
"set",
"(",
")",
"suit0",
".",
"add",
"(",
"y",
")",
"while",
"y",
"<",
"int",
"(",
"list2",
"[",
"len",
"(",
"list2",
")",
"-",
"1",
"]",
")",
":",
"y",
"+=",
"1",
"suit0",
".",
"add",
"(",
"y",
")",
"suit0",
".",
"add",
"(",
"int",
"(",
"list2",
"[",
"len",
"(",
"list2",
")",
"-",
"1",
"]",
")",
")",
"temp",
"=",
"[",
"item",
"for",
"item",
"in",
"suit",
"if",
"item",
"in",
"suit0",
"]",
"if",
"len",
"(",
"temp",
")",
">",
"0",
":",
"return",
"True",
"return",
"False"
] | test 2 suits of street numbers if they have crossed numbers
For example: "22-27" in "21-24"retruns True | [
"test",
"2",
"suits",
"of",
"street",
"numbers",
"if",
"they",
"have",
"crossed",
"numbers",
"For",
"example",
":",
"\"",
"22",
"-",
"27",
"\"",
"in",
"\"",
"21",
"-",
"24",
"\"",
"retruns",
"True"
] | [
"\"\"\"\n\n test 2 suits of street numbers if they have crossed numbers\n For example: \"22-27\" in \"21-24\"retruns True\n\n :param a: Int of number\n :param b: String List of number\n :return: boolean\n\n\n \"\"\""
] | [
{
"param": "list",
"type": null
},
{
"param": "list0",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "list0",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "a",
"type": null,
"docstring": "Int of number",
"docstring_tokens": [
"Int",
"of",
"number"
],
"default": null,
"is_optional": null
},
{
"identifier": "b",
"type": null,
"docstring": "String List of number",
"docstring_tokens": [
"String",
"List",
"of",
"number"
],
"default": null,
"is_optional": null
}
],
"others": []
} | def in_suit3(list, list0):
text = list.replace("-", "")
text0 = list0.replace("-", "")
if ("-" in list) and ("-" in list0) and (text.isdigit() is True) and (text0.isdigit() is True):
list1 = list.split("-")
x = int(list1[0])
suit = set()
suit.add(x)
while x < int(list1[len(list1) - 1]):
x += 1
suit.add(x)
suit.add(int(list1[len(list1) - 1]))
list2 = list0.split("-")
y = int(list2[0])
suit0 = set()
suit0.add(y)
while y < int(list2[len(list2) - 1]):
y += 1
suit0.add(y)
suit0.add(int(list2[len(list2) - 1]))
temp = [item for item in suit if item in suit0]
if len(temp) > 0: return True
return False | 292 | 489 |
662408d672bc4eb444279bbb2dcb90e2167e3ecf | jsharkey13/facebook_message_parser | fb_analysis.py | [
"MIT"
] | Python | _update_thread_dict | null | def _update_thread_dict(thread_dict, thread_name, num):
"""Add new entries to count dictionary, dealing with duplicates carefully."""
if thread_name not in thread_dict:
thread_dict.update({thread_name: num})
else: # Deal with duplicates, otherwise old entries get overwritten:
thread_dict[thread_name] += num | Add new entries to count dictionary, dealing with duplicates carefully. | Add new entries to count dictionary, dealing with duplicates carefully. | [
"Add",
"new",
"entries",
"to",
"count",
"dictionary",
"dealing",
"with",
"duplicates",
"carefully",
"."
] | def _update_thread_dict(thread_dict, thread_name, num):
if thread_name not in thread_dict:
thread_dict.update({thread_name: num})
else:
thread_dict[thread_name] += num | [
"def",
"_update_thread_dict",
"(",
"thread_dict",
",",
"thread_name",
",",
"num",
")",
":",
"if",
"thread_name",
"not",
"in",
"thread_dict",
":",
"thread_dict",
".",
"update",
"(",
"{",
"thread_name",
":",
"num",
"}",
")",
"else",
":",
"thread_dict",
"[",
"thread_name",
"]",
"+=",
"num"
] | Add new entries to count dictionary, dealing with duplicates carefully. | [
"Add",
"new",
"entries",
"to",
"count",
"dictionary",
"dealing",
"with",
"duplicates",
"carefully",
"."
] | [
"\"\"\"Add new entries to count dictionary, dealing with duplicates carefully.\"\"\"",
"# Deal with duplicates, otherwise old entries get overwritten:"
] | [
{
"param": "thread_dict",
"type": null
},
{
"param": "thread_name",
"type": null
},
{
"param": "num",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "thread_dict",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "thread_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "num",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _update_thread_dict(thread_dict, thread_name, num):
if thread_name not in thread_dict:
thread_dict.update({thread_name: num})
else:
thread_dict[thread_name] += num | 293 | 833 |
47fde3b12615fee380570b30cff0e48fc7ee1955 | anandg95/CLRS-solutions | Ch12/bst_succ_pred.py | [
"MIT"
] | Python | successor_postorder | <not_specific> | def successor_postorder(bst, node):
"""
Cases:
1. If node is the right child of its parent, then successor is the parent
2. If node is the left child of its parent, successor would be in right subtree of the parent. It is reahced
by traversing left childs successively and if there is no left taking right and repeating.
If there is no right child, parent is the successor.
"""
if node.parent is None:
return None
y = node.parent
if y.right == None or y.right == node:
return y
y = y.right
while y is not None:
node = y
y = y.left if y.left is not None else y.right
return node |
Cases:
1. If node is the right child of its parent, then successor is the parent
2. If node is the left child of its parent, successor would be in right subtree of the parent. It is reahced
by traversing left childs successively and if there is no left taking right and repeating.
If there is no right child, parent is the successor.
| 1. If node is the right child of its parent, then successor is the parent
2. If node is the left child of its parent, successor would be in right subtree of the parent. It is reahced
by traversing left childs successively and if there is no left taking right and repeating.
If there is no right child, parent is the successor. | [
"1",
".",
"If",
"node",
"is",
"the",
"right",
"child",
"of",
"its",
"parent",
"then",
"successor",
"is",
"the",
"parent",
"2",
".",
"If",
"node",
"is",
"the",
"left",
"child",
"of",
"its",
"parent",
"successor",
"would",
"be",
"in",
"right",
"subtree",
"of",
"the",
"parent",
".",
"It",
"is",
"reahced",
"by",
"traversing",
"left",
"childs",
"successively",
"and",
"if",
"there",
"is",
"no",
"left",
"taking",
"right",
"and",
"repeating",
".",
"If",
"there",
"is",
"no",
"right",
"child",
"parent",
"is",
"the",
"successor",
"."
] | def successor_postorder(bst, node):
if node.parent is None:
return None
y = node.parent
if y.right == None or y.right == node:
return y
y = y.right
while y is not None:
node = y
y = y.left if y.left is not None else y.right
return node | [
"def",
"successor_postorder",
"(",
"bst",
",",
"node",
")",
":",
"if",
"node",
".",
"parent",
"is",
"None",
":",
"return",
"None",
"y",
"=",
"node",
".",
"parent",
"if",
"y",
".",
"right",
"==",
"None",
"or",
"y",
".",
"right",
"==",
"node",
":",
"return",
"y",
"y",
"=",
"y",
".",
"right",
"while",
"y",
"is",
"not",
"None",
":",
"node",
"=",
"y",
"y",
"=",
"y",
".",
"left",
"if",
"y",
".",
"left",
"is",
"not",
"None",
"else",
"y",
".",
"right",
"return",
"node"
] | Cases:
1. | [
"Cases",
":",
"1",
"."
] | [
"\"\"\"\n Cases:\n 1. If node is the right child of its parent, then successor is the parent\n 2. If node is the left child of its parent, successor would be in right subtree of the parent. It is reahced\n by traversing left childs successively and if there is no left taking right and repeating. \n If there is no right child, parent is the successor.\n \"\"\""
] | [
{
"param": "bst",
"type": null
},
{
"param": "node",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "bst",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "node",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def successor_postorder(bst, node):
if node.parent is None:
return None
y = node.parent
if y.right == None or y.right == node:
return y
y = y.right
while y is not None:
node = y
y = y.left if y.left is not None else y.right
return node | 295 | 458 |
c302dcda1cdabdd30bccc5df1dd00c5325cf2c7a | earthobservatory/ariamh-pub | interferogram/sentinel/standard_product_packaging.py | [
"Apache-2.0"
] | Python | expand_attrdict | <not_specific> | def expand_attrdict(attr_dict, attr_name, attr_value):
'''
expand an attribute dict with more keys and values
Update the attribute dictionary if original key is used again with a new value
'''
#pdb.set_trace()
if attr_dict is None:
attr_dict = {}
for count in range(len(attr_name)):
attr_temp = {}
attr_temp["name"]=attr_name[count]
attr_temp["value"]=attr_value[count]
# adding it to the original dictionary
if len(attr_dict)==0:
attr_dict = [attr_temp]
else:
# looping over all the attributes to see if the name already is in use
count_dict = 0
name_match = None
for attr_dict_item in attr_dict:
if attr_dict_item["name"] == attr_temp["name"]:
name_match = count_dict
count_dict = count_dict +1
# if a match was found needs to update the attribute information
if name_match is not None:
attr_dict[name_match]=attr_temp
else:
attr_dict.append(attr_temp)
return attr_dict |
expand an attribute dict with more keys and values
Update the attribute dictionary if original key is used again with a new value
| expand an attribute dict with more keys and values
Update the attribute dictionary if original key is used again with a new value | [
"expand",
"an",
"attribute",
"dict",
"with",
"more",
"keys",
"and",
"values",
"Update",
"the",
"attribute",
"dictionary",
"if",
"original",
"key",
"is",
"used",
"again",
"with",
"a",
"new",
"value"
] | def expand_attrdict(attr_dict, attr_name, attr_value):
if attr_dict is None:
attr_dict = {}
for count in range(len(attr_name)):
attr_temp = {}
attr_temp["name"]=attr_name[count]
attr_temp["value"]=attr_value[count]
if len(attr_dict)==0:
attr_dict = [attr_temp]
else:
count_dict = 0
name_match = None
for attr_dict_item in attr_dict:
if attr_dict_item["name"] == attr_temp["name"]:
name_match = count_dict
count_dict = count_dict +1
if name_match is not None:
attr_dict[name_match]=attr_temp
else:
attr_dict.append(attr_temp)
return attr_dict | [
"def",
"expand_attrdict",
"(",
"attr_dict",
",",
"attr_name",
",",
"attr_value",
")",
":",
"if",
"attr_dict",
"is",
"None",
":",
"attr_dict",
"=",
"{",
"}",
"for",
"count",
"in",
"range",
"(",
"len",
"(",
"attr_name",
")",
")",
":",
"attr_temp",
"=",
"{",
"}",
"attr_temp",
"[",
"\"name\"",
"]",
"=",
"attr_name",
"[",
"count",
"]",
"attr_temp",
"[",
"\"value\"",
"]",
"=",
"attr_value",
"[",
"count",
"]",
"if",
"len",
"(",
"attr_dict",
")",
"==",
"0",
":",
"attr_dict",
"=",
"[",
"attr_temp",
"]",
"else",
":",
"count_dict",
"=",
"0",
"name_match",
"=",
"None",
"for",
"attr_dict_item",
"in",
"attr_dict",
":",
"if",
"attr_dict_item",
"[",
"\"name\"",
"]",
"==",
"attr_temp",
"[",
"\"name\"",
"]",
":",
"name_match",
"=",
"count_dict",
"count_dict",
"=",
"count_dict",
"+",
"1",
"if",
"name_match",
"is",
"not",
"None",
":",
"attr_dict",
"[",
"name_match",
"]",
"=",
"attr_temp",
"else",
":",
"attr_dict",
".",
"append",
"(",
"attr_temp",
")",
"return",
"attr_dict"
] | expand an attribute dict with more keys and values
Update the attribute dictionary if original key is used again with a new value | [
"expand",
"an",
"attribute",
"dict",
"with",
"more",
"keys",
"and",
"values",
"Update",
"the",
"attribute",
"dictionary",
"if",
"original",
"key",
"is",
"used",
"again",
"with",
"a",
"new",
"value"
] | [
"'''\n expand an attribute dict with more keys and values\n Update the attribute dictionary if original key is used again with a new value\n '''",
"#pdb.set_trace()",
"# adding it to the original dictionary",
"# looping over all the attributes to see if the name already is in use",
"# if a match was found needs to update the attribute information"
] | [
{
"param": "attr_dict",
"type": null
},
{
"param": "attr_name",
"type": null
},
{
"param": "attr_value",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "attr_dict",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "attr_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "attr_value",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def expand_attrdict(attr_dict, attr_name, attr_value):
if attr_dict is None:
attr_dict = {}
for count in range(len(attr_name)):
attr_temp = {}
attr_temp["name"]=attr_name[count]
attr_temp["value"]=attr_value[count]
if len(attr_dict)==0:
attr_dict = [attr_temp]
else:
count_dict = 0
name_match = None
for attr_dict_item in attr_dict:
if attr_dict_item["name"] == attr_temp["name"]:
name_match = count_dict
count_dict = count_dict +1
if name_match is not None:
attr_dict[name_match]=attr_temp
else:
attr_dict.append(attr_temp)
return attr_dict | 296 | 826 |
077c176bd2d67513a5a3097f158121ce51ab6fb9 | pretchu/datadog-agent | tasks/system_probe.py | [
"Apache-2.0"
] | Python | tempdir | null | def tempdir():
"""
Helper to create a temp directory and clean it
"""
dirpath = tempfile.mkdtemp()
try:
yield dirpath
finally:
shutil.rmtree(dirpath) |
Helper to create a temp directory and clean it
| Helper to create a temp directory and clean it | [
"Helper",
"to",
"create",
"a",
"temp",
"directory",
"and",
"clean",
"it"
] | def tempdir():
dirpath = tempfile.mkdtemp()
try:
yield dirpath
finally:
shutil.rmtree(dirpath) | [
"def",
"tempdir",
"(",
")",
":",
"dirpath",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"yield",
"dirpath",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"dirpath",
")"
] | Helper to create a temp directory and clean it | [
"Helper",
"to",
"create",
"a",
"temp",
"directory",
"and",
"clean",
"it"
] | [
"\"\"\"\n Helper to create a temp directory and clean it\n \"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | import tempfile
import shutil
def tempdir():
dirpath = tempfile.mkdtemp()
try:
yield dirpath
finally:
shutil.rmtree(dirpath) | 297 | 911 |
320b18d052c052ec2e7b11b4a4e30465d40fd408 | tkisss/anndata | anndata/core/anndata.py | [
"BSD-3-Clause"
] | Python | _check_2d_shape | null | def _check_2d_shape(X):
"""Check shape of array or sparse matrix.
Assure that X is always 2D: Unlike numpy we always deal with 2D arrays.
"""
if X.dtype.names is None and len(X.shape) != 2:
raise ValueError('X needs to be 2-dimensional, not '
'{}-dimensional.'.format(len(X.shape))) | Check shape of array or sparse matrix.
Assure that X is always 2D: Unlike numpy we always deal with 2D arrays.
| Check shape of array or sparse matrix.
Assure that X is always 2D: Unlike numpy we always deal with 2D arrays. | [
"Check",
"shape",
"of",
"array",
"or",
"sparse",
"matrix",
".",
"Assure",
"that",
"X",
"is",
"always",
"2D",
":",
"Unlike",
"numpy",
"we",
"always",
"deal",
"with",
"2D",
"arrays",
"."
] | def _check_2d_shape(X):
if X.dtype.names is None and len(X.shape) != 2:
raise ValueError('X needs to be 2-dimensional, not '
'{}-dimensional.'.format(len(X.shape))) | [
"def",
"_check_2d_shape",
"(",
"X",
")",
":",
"if",
"X",
".",
"dtype",
".",
"names",
"is",
"None",
"and",
"len",
"(",
"X",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'X needs to be 2-dimensional, not '",
"'{}-dimensional.'",
".",
"format",
"(",
"len",
"(",
"X",
".",
"shape",
")",
")",
")"
] | Check shape of array or sparse matrix. | [
"Check",
"shape",
"of",
"array",
"or",
"sparse",
"matrix",
"."
] | [
"\"\"\"Check shape of array or sparse matrix.\n\n Assure that X is always 2D: Unlike numpy we always deal with 2D arrays.\n \"\"\""
] | [
{
"param": "X",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "X",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _check_2d_shape(X):
if X.dtype.names is None and len(X.shape) != 2:
raise ValueError('X needs to be 2-dimensional, not '
'{}-dimensional.'.format(len(X.shape))) | 298 | 342 |
0c37757c13d197d3855aa8c26f0ad82ed174cf46 | herbertludowieg/exatomic | exatomic/algorithms/orbital_util.py | [
"Apache-2.0"
] | Python | _check_column | <not_specific> | def _check_column(uni, df, key):
"""Sanity checking of columns in a given dataframe in the universe.
Args:
uni (:class:`~exatomic.core.universe.Universe`): a universe
df (str): name of dataframe attribute in the universe
key (str): column name in df
Returns:
key (str) if key in uni.df
"""
if key is None:
if 'momatrix' in df: key = 'coef'
elif 'orbital' in df: key = 'occupation'
else: raise Exception("{} not supported".format(df))
err = '"{}" not in uni.{}.columns'.format
if key not in getattr(uni, df).columns:
raise Exception(err(key, df))
return key | Sanity checking of columns in a given dataframe in the universe.
Args:
uni (:class:`~exatomic.core.universe.Universe`): a universe
df (str): name of dataframe attribute in the universe
key (str): column name in df
Returns:
key (str) if key in uni.df
| Sanity checking of columns in a given dataframe in the universe. | [
"Sanity",
"checking",
"of",
"columns",
"in",
"a",
"given",
"dataframe",
"in",
"the",
"universe",
"."
] | def _check_column(uni, df, key):
if key is None:
if 'momatrix' in df: key = 'coef'
elif 'orbital' in df: key = 'occupation'
else: raise Exception("{} not supported".format(df))
err = '"{}" not in uni.{}.columns'.format
if key not in getattr(uni, df).columns:
raise Exception(err(key, df))
return key | [
"def",
"_check_column",
"(",
"uni",
",",
"df",
",",
"key",
")",
":",
"if",
"key",
"is",
"None",
":",
"if",
"'momatrix'",
"in",
"df",
":",
"key",
"=",
"'coef'",
"elif",
"'orbital'",
"in",
"df",
":",
"key",
"=",
"'occupation'",
"else",
":",
"raise",
"Exception",
"(",
"\"{} not supported\"",
".",
"format",
"(",
"df",
")",
")",
"err",
"=",
"'\"{}\" not in uni.{}.columns'",
".",
"format",
"if",
"key",
"not",
"in",
"getattr",
"(",
"uni",
",",
"df",
")",
".",
"columns",
":",
"raise",
"Exception",
"(",
"err",
"(",
"key",
",",
"df",
")",
")",
"return",
"key"
] | Sanity checking of columns in a given dataframe in the universe. | [
"Sanity",
"checking",
"of",
"columns",
"in",
"a",
"given",
"dataframe",
"in",
"the",
"universe",
"."
] | [
"\"\"\"Sanity checking of columns in a given dataframe in the universe.\n\n Args:\n uni (:class:`~exatomic.core.universe.Universe`): a universe\n df (str): name of dataframe attribute in the universe\n key (str): column name in df\n\n Returns:\n key (str) if key in uni.df\n \"\"\""
] | [
{
"param": "uni",
"type": null
},
{
"param": "df",
"type": null
},
{
"param": "key",
"type": null
}
] | {
"returns": [
{
"docstring": "key (str) if key in uni.df",
"docstring_tokens": [
"key",
"(",
"str",
")",
"if",
"key",
"in",
"uni",
".",
"df"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "uni",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "df",
"type": null,
"docstring": "name of dataframe attribute in the universe",
"docstring_tokens": [
"name",
"of",
"dataframe",
"attribute",
"in",
"the",
"universe"
],
"default": null,
"is_optional": false
},
{
"identifier": "key",
"type": null,
"docstring": "column name in df",
"docstring_tokens": [
"column",
"name",
"in",
"df"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [
{
"identifier": "uni (",
"type": null,
"docstring": "`~exatomic.core.universe.Universe`): a universe",
"docstring_tokens": [
"`",
"~exatomic",
".",
"core",
".",
"universe",
".",
"Universe",
"`",
")",
":",
"a",
"universe"
],
"default": null,
"is_optional": null
}
],
"others": []
} | def _check_column(uni, df, key):
if key is None:
if 'momatrix' in df: key = 'coef'
elif 'orbital' in df: key = 'occupation'
else: raise Exception("{} not supported".format(df))
err = '"{}" not in uni.{}.columns'.format
if key not in getattr(uni, df).columns:
raise Exception(err(key, df))
return key | 299 | 228 |
932779c0e4387ae783bb57df591e65bcb01ebecb | CDCgov/DCPC | lapps-grid/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py | [
"Apache-2.0"
] | Python | check_archive | <not_specific> | def check_archive( archive_file, dest_dir ):
"""
Ensure that a tar archive has no absolute paths or relative paths outside
the archive.
"""
with tarfile.open( archive_file, mode='r:gz' ) as archive_fp:
for arc_path in archive_fp.getnames():
assert os.path.normpath(
os.path.join(
dest_dir,
arc_path
) ).startswith( dest_dir.rstrip(os.sep) + os.sep ), \
"Archive member would extract outside target directory: %s" % arc_path
return True |
Ensure that a tar archive has no absolute paths or relative paths outside
the archive.
| Ensure that a tar archive has no absolute paths or relative paths outside
the archive. | [
"Ensure",
"that",
"a",
"tar",
"archive",
"has",
"no",
"absolute",
"paths",
"or",
"relative",
"paths",
"outside",
"the",
"archive",
"."
] | def check_archive( archive_file, dest_dir ):
with tarfile.open( archive_file, mode='r:gz' ) as archive_fp:
for arc_path in archive_fp.getnames():
assert os.path.normpath(
os.path.join(
dest_dir,
arc_path
) ).startswith( dest_dir.rstrip(os.sep) + os.sep ), \
"Archive member would extract outside target directory: %s" % arc_path
return True | [
"def",
"check_archive",
"(",
"archive_file",
",",
"dest_dir",
")",
":",
"with",
"tarfile",
".",
"open",
"(",
"archive_file",
",",
"mode",
"=",
"'r:gz'",
")",
"as",
"archive_fp",
":",
"for",
"arc_path",
"in",
"archive_fp",
".",
"getnames",
"(",
")",
":",
"assert",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dest_dir",
",",
"arc_path",
")",
")",
".",
"startswith",
"(",
"dest_dir",
".",
"rstrip",
"(",
"os",
".",
"sep",
")",
"+",
"os",
".",
"sep",
")",
",",
"\"Archive member would extract outside target directory: %s\"",
"%",
"arc_path",
"return",
"True"
] | Ensure that a tar archive has no absolute paths or relative paths outside
the archive. | [
"Ensure",
"that",
"a",
"tar",
"archive",
"has",
"no",
"absolute",
"paths",
"or",
"relative",
"paths",
"outside",
"the",
"archive",
"."
] | [
"\"\"\"\n Ensure that a tar archive has no absolute paths or relative paths outside\n the archive.\n \"\"\""
] | [
{
"param": "archive_file",
"type": null
},
{
"param": "dest_dir",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "archive_file",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "dest_dir",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import tarfile
import os
def check_archive( archive_file, dest_dir ):
with tarfile.open( archive_file, mode='r:gz' ) as archive_fp:
for arc_path in archive_fp.getnames():
assert os.path.normpath(
os.path.join(
dest_dir,
arc_path
) ).startswith( dest_dir.rstrip(os.sep) + os.sep ), \
"Archive member would extract outside target directory: %s" % arc_path
return True | 300 | 975 |
fb68875215fce848af62b8298fd455c49e666e18 | megvii-research/megfile | megfile/fs.py | [
"Apache-2.0",
"MIT"
] | Python | fs_isdir | bool | def fs_isdir(path: PathLike, followlinks: bool = False) -> bool:
'''
Test if a path is directory
.. note::
The difference between this function and ``os.path.isdir`` is that this function regard symlink as file
:param path: Given file path
:param followlinks: False if regard symlink as file, else True
:returns: True if the path is a directory, else False
'''
if os.path.islink(path) and not followlinks:
return False
return os.path.isdir(path) |
Test if a path is directory
.. note::
The difference between this function and ``os.path.isdir`` is that this function regard symlink as file
:param path: Given file path
:param followlinks: False if regard symlink as file, else True
:returns: True if the path is a directory, else False
| Test if a path is directory
note:.
The difference between this function and ``os.path.isdir`` is that this function regard symlink as file | [
"Test",
"if",
"a",
"path",
"is",
"directory",
"note",
":",
".",
"The",
"difference",
"between",
"this",
"function",
"and",
"`",
"`",
"os",
".",
"path",
".",
"isdir",
"`",
"`",
"is",
"that",
"this",
"function",
"regard",
"symlink",
"as",
"file"
] | def fs_isdir(path: PathLike, followlinks: bool = False) -> bool:
if os.path.islink(path) and not followlinks:
return False
return os.path.isdir(path) | [
"def",
"fs_isdir",
"(",
"path",
":",
"PathLike",
",",
"followlinks",
":",
"bool",
"=",
"False",
")",
"->",
"bool",
":",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"path",
")",
"and",
"not",
"followlinks",
":",
"return",
"False",
"return",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")"
] | Test if a path is directory
.. note:: | [
"Test",
"if",
"a",
"path",
"is",
"directory",
"..",
"note",
"::"
] | [
"'''\n Test if a path is directory\n\n .. note::\n\n The difference between this function and ``os.path.isdir`` is that this function regard symlink as file\n\n :param path: Given file path\n :param followlinks: False if regard symlink as file, else True\n :returns: True if the path is a directory, else False\n\n '''"
] | [
{
"param": "path",
"type": "PathLike"
},
{
"param": "followlinks",
"type": "bool"
}
] | {
"returns": [
{
"docstring": "True if the path is a directory, else False",
"docstring_tokens": [
"True",
"if",
"the",
"path",
"is",
"a",
"directory",
"else",
"False"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "path",
"type": "PathLike",
"docstring": "Given file path",
"docstring_tokens": [
"Given",
"file",
"path"
],
"default": null,
"is_optional": null
},
{
"identifier": "followlinks",
"type": "bool",
"docstring": "False if regard symlink as file, else True",
"docstring_tokens": [
"False",
"if",
"regard",
"symlink",
"as",
"file",
"else",
"True"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
def fs_isdir(path: PathLike, followlinks: bool = False) -> bool:
if os.path.islink(path) and not followlinks:
return False
return os.path.isdir(path) | 301 | 57 |
357664668399863ce323e34df0428cd6a7040c4a | cguardia/questions | questions/validators.py | [
"MIT"
] | Python | numeric_validator | <not_specific> | def numeric_validator(validator: Validator, value: Any, form_data: Dict[str, Any]):
"""Validate if number value is within set limits.
:param validator:
The validator instance for the current question.
:param value:
The value to be validated.
:param form_data:
The dictionary containing from data entered for current form.
:Returns:
If validation passes, :data:`True`, else :data:`False`.
"""
max_value = float(validator.max_value)
min_value = float(validator.min_value)
result = True
value = float(value)
if value < min_value or max_value > 0 and value > max_value:
result = False
return result | Validate if number value is within set limits.
:param validator:
The validator instance for the current question.
:param value:
The value to be validated.
:param form_data:
The dictionary containing from data entered for current form.
:Returns:
If validation passes, :data:`True`, else :data:`False`.
| Validate if number value is within set limits. | [
"Validate",
"if",
"number",
"value",
"is",
"within",
"set",
"limits",
"."
] | def numeric_validator(validator: Validator, value: Any, form_data: Dict[str, Any]):
max_value = float(validator.max_value)
min_value = float(validator.min_value)
result = True
value = float(value)
if value < min_value or max_value > 0 and value > max_value:
result = False
return result | [
"def",
"numeric_validator",
"(",
"validator",
":",
"Validator",
",",
"value",
":",
"Any",
",",
"form_data",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
":",
"max_value",
"=",
"float",
"(",
"validator",
".",
"max_value",
")",
"min_value",
"=",
"float",
"(",
"validator",
".",
"min_value",
")",
"result",
"=",
"True",
"value",
"=",
"float",
"(",
"value",
")",
"if",
"value",
"<",
"min_value",
"or",
"max_value",
">",
"0",
"and",
"value",
">",
"max_value",
":",
"result",
"=",
"False",
"return",
"result"
] | Validate if number value is within set limits. | [
"Validate",
"if",
"number",
"value",
"is",
"within",
"set",
"limits",
"."
] | [
"\"\"\"Validate if number value is within set limits.\n\n :param validator:\n The validator instance for the current question.\n :param value:\n The value to be validated.\n :param form_data:\n The dictionary containing from data entered for current form.\n\n :Returns:\n If validation passes, :data:`True`, else :data:`False`.\n \"\"\""
] | [
{
"param": "validator",
"type": "Validator"
},
{
"param": "value",
"type": "Any"
},
{
"param": "form_data",
"type": "Dict[str, Any]"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "validator",
"type": "Validator",
"docstring": "The validator instance for the current question.",
"docstring_tokens": [
"The",
"validator",
"instance",
"for",
"the",
"current",
"question",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "value",
"type": "Any",
"docstring": "The value to be validated.",
"docstring_tokens": [
"The",
"value",
"to",
"be",
"validated",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "form_data",
"type": "Dict[str, Any]",
"docstring": "The dictionary containing from data entered for current form.",
"docstring_tokens": [
"The",
"dictionary",
"containing",
"from",
"data",
"entered",
"for",
"current",
"form",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": [
{
"identifier": "Returns",
"docstring": "If validation passes, :data:`True`, else :data:`False`.",
"docstring_tokens": [
"If",
"validation",
"passes",
":",
"data",
":",
"`",
"True",
"`",
"else",
":",
"data",
":",
"`",
"False",
"`",
"."
]
}
]
} | def numeric_validator(validator: Validator, value: Any, form_data: Dict[str, Any]):
max_value = float(validator.max_value)
min_value = float(validator.min_value)
result = True
value = float(value)
if value < min_value or max_value > 0 and value > max_value:
result = False
return result | 302 | 907 |
b35da1fb66211b3512488ba47f5073de4750fc55 | lbragstad/fernet-inspector | fernet_inspector/core.py | [
"Apache-2.0"
] | Python | _restore_padding | <not_specific> | def _restore_padding(token):
"""Restore padding based on token size.
:param token: token to restore padding on
:returns: token with correct padding
"""
# Re-inflate the padding
mod_returned = len(token) % 4
if mod_returned:
missing_padding = 4 - mod_returned
token += b'=' * missing_padding
return token | Restore padding based on token size.
:param token: token to restore padding on
:returns: token with correct padding
| Restore padding based on token size. | [
"Restore",
"padding",
"based",
"on",
"token",
"size",
"."
] | def _restore_padding(token):
mod_returned = len(token) % 4
if mod_returned:
missing_padding = 4 - mod_returned
token += b'=' * missing_padding
return token | [
"def",
"_restore_padding",
"(",
"token",
")",
":",
"mod_returned",
"=",
"len",
"(",
"token",
")",
"%",
"4",
"if",
"mod_returned",
":",
"missing_padding",
"=",
"4",
"-",
"mod_returned",
"token",
"+=",
"b'='",
"*",
"missing_padding",
"return",
"token"
] | Restore padding based on token size. | [
"Restore",
"padding",
"based",
"on",
"token",
"size",
"."
] | [
"\"\"\"Restore padding based on token size.\n\n :param token: token to restore padding on\n :returns: token with correct padding\n\n \"\"\"",
"# Re-inflate the padding"
] | [
{
"param": "token",
"type": null
}
] | {
"returns": [
{
"docstring": "token with correct padding",
"docstring_tokens": [
"token",
"with",
"correct",
"padding"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "token",
"type": null,
"docstring": "token to restore padding on",
"docstring_tokens": [
"token",
"to",
"restore",
"padding",
"on"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _restore_padding(token):
mod_returned = len(token) % 4
if mod_returned:
missing_padding = 4 - mod_returned
token += b'=' * missing_padding
return token | 303 | 209 |
25ca9380d29c3fffaa572be0a8ea8686dd1b17f3 | MoustafaaAshraf/Disaster-Response-Message-Classification | models/train_classifier.py | [
"MIT"
] | Python | save_model | null | def save_model(model, model_filepath):
'''
Saving a file of the trained model
:param model: trained model
:param model_filepath: location of the trained model file
'''
# Creating a pathfile to save the model
model_pkl = open(model_filepath, 'wb')
# Saving the model into the created location
pickle.dump(model, model_pkl)
# Closing
model_pkl.close() |
Saving a file of the trained model
:param model: trained model
:param model_filepath: location of the trained model file
| Saving a file of the trained model | [
"Saving",
"a",
"file",
"of",
"the",
"trained",
"model"
] | def save_model(model, model_filepath):
model_pkl = open(model_filepath, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close() | [
"def",
"save_model",
"(",
"model",
",",
"model_filepath",
")",
":",
"model_pkl",
"=",
"open",
"(",
"model_filepath",
",",
"'wb'",
")",
"pickle",
".",
"dump",
"(",
"model",
",",
"model_pkl",
")",
"model_pkl",
".",
"close",
"(",
")"
] | Saving a file of the trained model | [
"Saving",
"a",
"file",
"of",
"the",
"trained",
"model"
] | [
"'''\n Saving a file of the trained model\n :param model: trained model\n :param model_filepath: location of the trained model file\n '''",
"# Creating a pathfile to save the model",
"# Saving the model into the created location",
"# Closing"
] | [
{
"param": "model",
"type": null
},
{
"param": "model_filepath",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "model",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "model_filepath",
"type": null,
"docstring": "location of the trained model file",
"docstring_tokens": [
"location",
"of",
"the",
"trained",
"model",
"file"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import pickle
def save_model(model, model_filepath):
model_pkl = open(model_filepath, 'wb')
pickle.dump(model, model_pkl)
model_pkl.close() | 304 | 157 |
a55d54a627f7136b12396650c9afe9f182e0d56f | ginogustavo/h1bvisa | cleaning.py | [
"MIT"
] | Python | clean_wages | <not_specific> | def clean_wages(w):
"""
Function to remove '$' symbol and other delimiters from wages column which consistes of str and float type values
if the column entry is string type then remove the symbols else return the column value as it is
"""
if isinstance(w, str):
return(w.replace('$', '').replace(',', ''))
return(w) |
Function to remove '$' symbol and other delimiters from wages column which consistes of str and float type values
if the column entry is string type then remove the symbols else return the column value as it is
| Function to remove '$' symbol and other delimiters from wages column which consistes of str and float type values
if the column entry is string type then remove the symbols else return the column value as it is | [
"Function",
"to",
"remove",
"'",
"$",
"'",
"symbol",
"and",
"other",
"delimiters",
"from",
"wages",
"column",
"which",
"consistes",
"of",
"str",
"and",
"float",
"type",
"values",
"if",
"the",
"column",
"entry",
"is",
"string",
"type",
"then",
"remove",
"the",
"symbols",
"else",
"return",
"the",
"column",
"value",
"as",
"it",
"is"
] | def clean_wages(w):
if isinstance(w, str):
return(w.replace('$', '').replace(',', ''))
return(w) | [
"def",
"clean_wages",
"(",
"w",
")",
":",
"if",
"isinstance",
"(",
"w",
",",
"str",
")",
":",
"return",
"(",
"w",
".",
"replace",
"(",
"'$'",
",",
"''",
")",
".",
"replace",
"(",
"','",
",",
"''",
")",
")",
"return",
"(",
"w",
")"
] | Function to remove '$' symbol and other delimiters from wages column which consistes of str and float type values
if the column entry is string type then remove the symbols else return the column value as it is | [
"Function",
"to",
"remove",
"'",
"$",
"'",
"symbol",
"and",
"other",
"delimiters",
"from",
"wages",
"column",
"which",
"consistes",
"of",
"str",
"and",
"float",
"type",
"values",
"if",
"the",
"column",
"entry",
"is",
"string",
"type",
"then",
"remove",
"the",
"symbols",
"else",
"return",
"the",
"column",
"value",
"as",
"it",
"is"
] | [
"\"\"\"\r\n Function to remove '$' symbol and other delimiters from wages column which consistes of str and float type values\r\n if the column entry is string type then remove the symbols else return the column value as it is \r\n \"\"\""
] | [
{
"param": "w",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "w",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def clean_wages(w):
if isinstance(w, str):
return(w.replace('$', '').replace(',', ''))
return(w) | 306 | 503 |
534146f8def9815e3d8190130fb453ff96ac98f9 | cancervariants/metakb | tests/conftest.py | [
"MIT"
] | Python | pmid_11423618 | <not_specific> | def pmid_11423618():
"""Create a test fixture for PMID 11423618."""
return {
"id": "pmid:11423618",
"label": "Gorre, Mercedes E., et al. \"Clinical resistance to STI-571 cancer therapy caused by BCR-ABL gene mutation or amplification.\" Science 293.5531 (2001): 876-880.", # noqa: E501
"xrefs": [
"doi:10.1126/science.1062538"
],
"type": "Document"
} | Create a test fixture for PMID 11423618. | Create a test fixture for PMID 11423618. | [
"Create",
"a",
"test",
"fixture",
"for",
"PMID",
"11423618",
"."
] | def pmid_11423618():
return {
"id": "pmid:11423618",
"label": "Gorre, Mercedes E., et al. \"Clinical resistance to STI-571 cancer therapy caused by BCR-ABL gene mutation or amplification.\" Science 293.5531 (2001): 876-880.",
"xrefs": [
"doi:10.1126/science.1062538"
],
"type": "Document"
} | [
"def",
"pmid_11423618",
"(",
")",
":",
"return",
"{",
"\"id\"",
":",
"\"pmid:11423618\"",
",",
"\"label\"",
":",
"\"Gorre, Mercedes E., et al. \\\"Clinical resistance to STI-571 cancer therapy caused by BCR-ABL gene mutation or amplification.\\\" Science 293.5531 (2001): 876-880.\"",
",",
"\"xrefs\"",
":",
"[",
"\"doi:10.1126/science.1062538\"",
"]",
",",
"\"type\"",
":",
"\"Document\"",
"}"
] | Create a test fixture for PMID 11423618. | [
"Create",
"a",
"test",
"fixture",
"for",
"PMID",
"11423618",
"."
] | [
"\"\"\"Create a test fixture for PMID 11423618.\"\"\"",
"# noqa: E501"
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | def pmid_11423618():
return {
"id": "pmid:11423618",
"label": "Gorre, Mercedes E., et al. \"Clinical resistance to STI-571 cancer therapy caused by BCR-ABL gene mutation or amplification.\" Science 293.5531 (2001): 876-880.",
"xrefs": [
"doi:10.1126/science.1062538"
],
"type": "Document"
} | 307 | 713 |
164033dd319c0ba73f0d9d7a5d621770d594f85f | quic-pdangi/aimet | TrainingExtensions/torch/src/python/aimet_torch/torchscript_utils.py | [
"BSD-3-Clause"
] | Python | _coalesce_add_and_mm_nodes | null | def _coalesce_add_and_mm_nodes(ir_nodes_list: List[IrNode]):
"""
helper method to combine add and mm operation into addmm to map back to fc layer
:param ir_nodes_list: List of ir_nodes to update connections for
"""
del_node_indices = []
for i, ir_node in enumerate(ir_nodes_list):
if ir_node.node_type == 'add' and len(ir_node.inputs) == 1:
producer_ir_node = ir_nodes_list[i-1]
if producer_ir_node.node_type == 'mm' and len(producer_ir_node.outputs) == 1 and \
producer_ir_node.outputs[0] == ir_node.inputs[0]:
producer_ir_node.outputs = ir_node.outputs
producer_ir_node.node_type = 'addmm'
del_node_indices.insert(0, i)
for index in del_node_indices:
del ir_nodes_list[index] |
helper method to combine add and mm operation into addmm to map back to fc layer
:param ir_nodes_list: List of ir_nodes to update connections for
| helper method to combine add and mm operation into addmm to map back to fc layer | [
"helper",
"method",
"to",
"combine",
"add",
"and",
"mm",
"operation",
"into",
"addmm",
"to",
"map",
"back",
"to",
"fc",
"layer"
] | def _coalesce_add_and_mm_nodes(ir_nodes_list: List[IrNode]):
del_node_indices = []
for i, ir_node in enumerate(ir_nodes_list):
if ir_node.node_type == 'add' and len(ir_node.inputs) == 1:
producer_ir_node = ir_nodes_list[i-1]
if producer_ir_node.node_type == 'mm' and len(producer_ir_node.outputs) == 1 and \
producer_ir_node.outputs[0] == ir_node.inputs[0]:
producer_ir_node.outputs = ir_node.outputs
producer_ir_node.node_type = 'addmm'
del_node_indices.insert(0, i)
for index in del_node_indices:
del ir_nodes_list[index] | [
"def",
"_coalesce_add_and_mm_nodes",
"(",
"ir_nodes_list",
":",
"List",
"[",
"IrNode",
"]",
")",
":",
"del_node_indices",
"=",
"[",
"]",
"for",
"i",
",",
"ir_node",
"in",
"enumerate",
"(",
"ir_nodes_list",
")",
":",
"if",
"ir_node",
".",
"node_type",
"==",
"'add'",
"and",
"len",
"(",
"ir_node",
".",
"inputs",
")",
"==",
"1",
":",
"producer_ir_node",
"=",
"ir_nodes_list",
"[",
"i",
"-",
"1",
"]",
"if",
"producer_ir_node",
".",
"node_type",
"==",
"'mm'",
"and",
"len",
"(",
"producer_ir_node",
".",
"outputs",
")",
"==",
"1",
"and",
"producer_ir_node",
".",
"outputs",
"[",
"0",
"]",
"==",
"ir_node",
".",
"inputs",
"[",
"0",
"]",
":",
"producer_ir_node",
".",
"outputs",
"=",
"ir_node",
".",
"outputs",
"producer_ir_node",
".",
"node_type",
"=",
"'addmm'",
"del_node_indices",
".",
"insert",
"(",
"0",
",",
"i",
")",
"for",
"index",
"in",
"del_node_indices",
":",
"del",
"ir_nodes_list",
"[",
"index",
"]"
] | helper method to combine add and mm operation into addmm to map back to fc layer | [
"helper",
"method",
"to",
"combine",
"add",
"and",
"mm",
"operation",
"into",
"addmm",
"to",
"map",
"back",
"to",
"fc",
"layer"
] | [
"\"\"\"\n helper method to combine add and mm operation into addmm to map back to fc layer\n :param ir_nodes_list: List of ir_nodes to update connections for\n \"\"\""
] | [
{
"param": "ir_nodes_list",
"type": "List[IrNode]"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "ir_nodes_list",
"type": "List[IrNode]",
"docstring": "List of ir_nodes to update connections for",
"docstring_tokens": [
"List",
"of",
"ir_nodes",
"to",
"update",
"connections",
"for"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _coalesce_add_and_mm_nodes(ir_nodes_list: List[IrNode]):
del_node_indices = []
for i, ir_node in enumerate(ir_nodes_list):
if ir_node.node_type == 'add' and len(ir_node.inputs) == 1:
producer_ir_node = ir_nodes_list[i-1]
if producer_ir_node.node_type == 'mm' and len(producer_ir_node.outputs) == 1 and \
producer_ir_node.outputs[0] == ir_node.inputs[0]:
producer_ir_node.outputs = ir_node.outputs
producer_ir_node.node_type = 'addmm'
del_node_indices.insert(0, i)
for index in del_node_indices:
del ir_nodes_list[index] | 308 | 490 |
4aa13db698ad6d7140a4d7d5cd8d6f26dfc49427 | mollypi/FinanceOps | returns.py | [
"MIT"
] | Python | max_pullup | <not_specific> | def max_pullup(df, window=None):
"""
Calculate the Maximum Pullup for all the individual columns in `df`.
This gives the Max Pullup for each time-step, calculated either
from the beginning or from the preceding window of the given length.
If you want the highest pullup that has occurred through all time-steps,
then you should call the `max()` function on the resulting DataFrame,
for example: `max_pullup(df=df_prices).max()`
Unlike the function `max_drawdown` which is typically calculated for the
entire DataFrame `df` by setting `window=None`, it is often more useful
to set the `window` parameter when calculating the Max Pullup using this
function. This is because many stocks tend to have increasing prices over
time, so the "pullup" or recovery from their historical lows is potentially
infinite over time. So it may be more useful to only consider the pullup
over e.g. 1-year periods by setting the `window` parameter accordingly.
:param df:
Pandas DataFrame typically with share-prices but could have any data.
If the DataFrame contains data for more stocks, then the columns are
the stock-tickers and the rows are the time-steps.
:param window:
If `None` then calculate the Max Pullup from the beginning.
If an integer then calculate the Max Pullup for a rolling window
of that length.
:return:
Pandas DataFrame with the Max Pullup time-series.
"""
if window is None:
# Calculate Max Pullup from the beginning.
max_pu = df / df.cummin() - 1.0
else:
# Calculate Max Pullup for a rolling window.
max_pu = df / df.rolling(window=window).min() - 1.0
return max_pu |
Calculate the Maximum Pullup for all the individual columns in `df`.
This gives the Max Pullup for each time-step, calculated either
from the beginning or from the preceding window of the given length.
If you want the highest pullup that has occurred through all time-steps,
then you should call the `max()` function on the resulting DataFrame,
for example: `max_pullup(df=df_prices).max()`
Unlike the function `max_drawdown` which is typically calculated for the
entire DataFrame `df` by setting `window=None`, it is often more useful
to set the `window` parameter when calculating the Max Pullup using this
function. This is because many stocks tend to have increasing prices over
time, so the "pullup" or recovery from their historical lows is potentially
infinite over time. So it may be more useful to only consider the pullup
over e.g. 1-year periods by setting the `window` parameter accordingly.
:param df:
Pandas DataFrame typically with share-prices but could have any data.
If the DataFrame contains data for more stocks, then the columns are
the stock-tickers and the rows are the time-steps.
:param window:
If `None` then calculate the Max Pullup from the beginning.
If an integer then calculate the Max Pullup for a rolling window
of that length.
:return:
Pandas DataFrame with the Max Pullup time-series.
| Calculate the Maximum Pullup for all the individual columns in `df`.
This gives the Max Pullup for each time-step, calculated either
from the beginning or from the preceding window of the given length.
If you want the highest pullup that has occurred through all time-steps,
then you should call the `max()` function on the resulting DataFrame,
for example: `max_pullup(df=df_prices).max()`
| [
"Calculate",
"the",
"Maximum",
"Pullup",
"for",
"all",
"the",
"individual",
"columns",
"in",
"`",
"df",
"`",
".",
"This",
"gives",
"the",
"Max",
"Pullup",
"for",
"each",
"time",
"-",
"step",
"calculated",
"either",
"from",
"the",
"beginning",
"or",
"from",
"the",
"preceding",
"window",
"of",
"the",
"given",
"length",
".",
"If",
"you",
"want",
"the",
"highest",
"pullup",
"that",
"has",
"occurred",
"through",
"all",
"time",
"-",
"steps",
"then",
"you",
"should",
"call",
"the",
"`",
"max",
"()",
"`",
"function",
"on",
"the",
"resulting",
"DataFrame",
"for",
"example",
":",
"`",
"max_pullup",
"(",
"df",
"=",
"df_prices",
")",
".",
"max",
"()",
"`"
] | def max_pullup(df, window=None):
if window is None:
max_pu = df / df.cummin() - 1.0
else:
max_pu = df / df.rolling(window=window).min() - 1.0
return max_pu | [
"def",
"max_pullup",
"(",
"df",
",",
"window",
"=",
"None",
")",
":",
"if",
"window",
"is",
"None",
":",
"max_pu",
"=",
"df",
"/",
"df",
".",
"cummin",
"(",
")",
"-",
"1.0",
"else",
":",
"max_pu",
"=",
"df",
"/",
"df",
".",
"rolling",
"(",
"window",
"=",
"window",
")",
".",
"min",
"(",
")",
"-",
"1.0",
"return",
"max_pu"
] | Calculate the Maximum Pullup for all the individual columns in `df`. | [
"Calculate",
"the",
"Maximum",
"Pullup",
"for",
"all",
"the",
"individual",
"columns",
"in",
"`",
"df",
"`",
"."
] | [
"\"\"\"\n Calculate the Maximum Pullup for all the individual columns in `df`.\n\n This gives the Max Pullup for each time-step, calculated either\n from the beginning or from the preceding window of the given length.\n\n If you want the highest pullup that has occurred through all time-steps,\n then you should call the `max()` function on the resulting DataFrame,\n for example: `max_pullup(df=df_prices).max()`\n\n Unlike the function `max_drawdown` which is typically calculated for the\n entire DataFrame `df` by setting `window=None`, it is often more useful\n to set the `window` parameter when calculating the Max Pullup using this\n function. This is because many stocks tend to have increasing prices over\n time, so the \"pullup\" or recovery from their historical lows is potentially\n infinite over time. So it may be more useful to only consider the pullup\n over e.g. 1-year periods by setting the `window` parameter accordingly.\n\n :param df:\n Pandas DataFrame typically with share-prices but could have any data.\n If the DataFrame contains data for more stocks, then the columns are\n the stock-tickers and the rows are the time-steps.\n\n :param window:\n If `None` then calculate the Max Pullup from the beginning.\n If an integer then calculate the Max Pullup for a rolling window\n of that length.\n\n :return:\n Pandas DataFrame with the Max Pullup time-series.\n \"\"\"",
"# Calculate Max Pullup from the beginning.",
"# Calculate Max Pullup for a rolling window."
] | [
{
"param": "df",
"type": null
},
{
"param": "window",
"type": null
}
] | {
"returns": [
{
"docstring": "Pandas DataFrame with the Max Pullup time-series.",
"docstring_tokens": [
"Pandas",
"DataFrame",
"with",
"the",
"Max",
"Pullup",
"time",
"-",
"series",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "df",
"type": null,
"docstring": "Pandas DataFrame typically with share-prices but could have any data.\nIf the DataFrame contains data for more stocks, then the columns are\nthe stock-tickers and the rows are the time-steps.",
"docstring_tokens": [
"Pandas",
"DataFrame",
"typically",
"with",
"share",
"-",
"prices",
"but",
"could",
"have",
"any",
"data",
".",
"If",
"the",
"DataFrame",
"contains",
"data",
"for",
"more",
"stocks",
"then",
"the",
"columns",
"are",
"the",
"stock",
"-",
"tickers",
"and",
"the",
"rows",
"are",
"the",
"time",
"-",
"steps",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "window",
"type": null,
"docstring": "If `None` then calculate the Max Pullup from the beginning.\nIf an integer then calculate the Max Pullup for a rolling window\nof that length.",
"docstring_tokens": [
"If",
"`",
"None",
"`",
"then",
"calculate",
"the",
"Max",
"Pullup",
"from",
"the",
"beginning",
".",
"If",
"an",
"integer",
"then",
"calculate",
"the",
"Max",
"Pullup",
"for",
"a",
"rolling",
"window",
"of",
"that",
"length",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def max_pullup(df, window=None):
if window is None:
max_pu = df / df.cummin() - 1.0
else:
max_pu = df / df.rolling(window=window).min() - 1.0
return max_pu | 310 | 868 |
244aa1902b798e1cee62de97c4f43ebee221a68f | DataONEorg/d1_python | test_utilities/src/d1_test/pycharm.py | [
"Apache-2.0"
] | Python | _tag_empty_file | null | def _tag_empty_file(path):
"""If path is to empty file, write the text "<empty>" to the file.
- This works around the issue that PyCharm PyCharm Diff & Merge errors out if one
of the input files are empty. - Is probably less confusing when debugging
"""
if not os.path.getsize(path):
with open(path, "w") as f:
f.write("<empty>") | If path is to empty file, write the text "<empty>" to the file.
- This works around the issue that PyCharm PyCharm Diff & Merge errors out if one
of the input files are empty. - Is probably less confusing when debugging
| If path is to empty file, write the text "" to the file.
This works around the issue that PyCharm PyCharm Diff & Merge errors out if one
of the input files are empty. - Is probably less confusing when debugging | [
"If",
"path",
"is",
"to",
"empty",
"file",
"write",
"the",
"text",
"\"",
"\"",
"to",
"the",
"file",
".",
"This",
"works",
"around",
"the",
"issue",
"that",
"PyCharm",
"PyCharm",
"Diff",
"&",
"Merge",
"errors",
"out",
"if",
"one",
"of",
"the",
"input",
"files",
"are",
"empty",
".",
"-",
"Is",
"probably",
"less",
"confusing",
"when",
"debugging"
] | def _tag_empty_file(path):
if not os.path.getsize(path):
with open(path, "w") as f:
f.write("<empty>") | [
"def",
"_tag_empty_file",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"getsize",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"<empty>\"",
")"
] | If path is to empty file, write the text "<empty>" to the file. | [
"If",
"path",
"is",
"to",
"empty",
"file",
"write",
"the",
"text",
"\"",
"<empty",
">",
"\"",
"to",
"the",
"file",
"."
] | [
"\"\"\"If path is to empty file, write the text \"<empty>\" to the file.\n\n - This works around the issue that PyCharm PyCharm Diff & Merge errors out if one\n of the input files are empty. - Is probably less confusing when debugging\n\n \"\"\""
] | [
{
"param": "path",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
def _tag_empty_file(path):
if not os.path.getsize(path):
with open(path, "w") as f:
f.write("<empty>") | 312 | 218 |
9d4271d5c29558973c5847595bfe215b0fe8d90f | Zaerog/python-docs-samples | functions/billing/main.py | [
"Apache-2.0"
] | Python | __disable_billing_for_project | null | def __disable_billing_for_project(project_name, projects):
"""
Disable billing for a project by removing its billing account
@param {string} project_name Name of project disable billing on
"""
body = {'billingAccountName': ''} # Disable billing
try:
res = projects.updateBillingInfo(name=project_name, body=body).execute()
print(f'Billing disabled: {json.dumps(res)}')
except Exception:
print('Failed to disable billing, possibly check permissions') |
Disable billing for a project by removing its billing account
@param {string} project_name Name of project disable billing on
| Disable billing for a project by removing its billing account
@param {string} project_name Name of project disable billing on | [
"Disable",
"billing",
"for",
"a",
"project",
"by",
"removing",
"its",
"billing",
"account",
"@param",
"{",
"string",
"}",
"project_name",
"Name",
"of",
"project",
"disable",
"billing",
"on"
] | def __disable_billing_for_project(project_name, projects):
body = {'billingAccountName': ''}
try:
res = projects.updateBillingInfo(name=project_name, body=body).execute()
print(f'Billing disabled: {json.dumps(res)}')
except Exception:
print('Failed to disable billing, possibly check permissions') | [
"def",
"__disable_billing_for_project",
"(",
"project_name",
",",
"projects",
")",
":",
"body",
"=",
"{",
"'billingAccountName'",
":",
"''",
"}",
"try",
":",
"res",
"=",
"projects",
".",
"updateBillingInfo",
"(",
"name",
"=",
"project_name",
",",
"body",
"=",
"body",
")",
".",
"execute",
"(",
")",
"print",
"(",
"f'Billing disabled: {json.dumps(res)}'",
")",
"except",
"Exception",
":",
"print",
"(",
"'Failed to disable billing, possibly check permissions'",
")"
] | Disable billing for a project by removing its billing account
@param {string} project_name Name of project disable billing on | [
"Disable",
"billing",
"for",
"a",
"project",
"by",
"removing",
"its",
"billing",
"account",
"@param",
"{",
"string",
"}",
"project_name",
"Name",
"of",
"project",
"disable",
"billing",
"on"
] | [
"\"\"\"\n Disable billing for a project by removing its billing account\n @param {string} project_name Name of project disable billing on\n \"\"\"",
"# Disable billing"
] | [
{
"param": "project_name",
"type": null
},
{
"param": "projects",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "project_name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "projects",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import json
def __disable_billing_for_project(project_name, projects):
body = {'billingAccountName': ''}
try:
res = projects.updateBillingInfo(name=project_name, body=body).execute()
print(f'Billing disabled: {json.dumps(res)}')
except Exception:
print('Failed to disable billing, possibly check permissions') | 313 | 987 |
2a5b9649732caaa6ad1121e0d59c3c505d806b18 | lrwb-aou/curation | tests/integration_tests/data_steward/cdr_cleaner/cleaning_rules/ppi_branching_test.py | [
"MIT"
] | Python | _fq_table_name | str | def _fq_table_name(table: Table) -> str:
"""
Get fully qualified name of a table
:param table: the table to get the name of
:return: table name in the form `project.dataset.table_id`
"""
return f'{table.project}.{table.dataset_id}.{table.table_id}' |
Get fully qualified name of a table
:param table: the table to get the name of
:return: table name in the form `project.dataset.table_id`
| Get fully qualified name of a table | [
"Get",
"fully",
"qualified",
"name",
"of",
"a",
"table"
] | def _fq_table_name(table: Table) -> str:
return f'{table.project}.{table.dataset_id}.{table.table_id}' | [
"def",
"_fq_table_name",
"(",
"table",
":",
"Table",
")",
"->",
"str",
":",
"return",
"f'{table.project}.{table.dataset_id}.{table.table_id}'"
] | Get fully qualified name of a table | [
"Get",
"fully",
"qualified",
"name",
"of",
"a",
"table"
] | [
"\"\"\"\n Get fully qualified name of a table\n\n :param table: the table to get the name of\n :return: table name in the form `project.dataset.table_id`\n \"\"\""
] | [
{
"param": "table",
"type": "Table"
}
] | {
"returns": [
{
"docstring": "table name in the form `project.dataset.table_id`",
"docstring_tokens": [
"table",
"name",
"in",
"the",
"form",
"`",
"project",
".",
"dataset",
".",
"table_id",
"`"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "table",
"type": "Table",
"docstring": "the table to get the name of",
"docstring_tokens": [
"the",
"table",
"to",
"get",
"the",
"name",
"of"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _fq_table_name(table: Table) -> str:
return f'{table.project}.{table.dataset_id}.{table.table_id}' | 314 | 866 |
8d258afb889602daff524d7be0469af380da8f87 | Enzo-ltz/BachelorDIM-Lectures-Algorithms-2020 | S1_algotools.py | [
"MIT"
] | Python | average | <not_specific> | def average(tab):
'''
This funtion calculates the average of a list
Args :
tab: The list of number
Returns the mean of the list
Raises:
Value error if no positive value is find
'''
som = 0
n=0
for i in range(len(tab)):
if tab[i] > 0:
som = som + tab[i]
n = n+1
if n>0:
moy = som/n
else:
raise ValueError('no positive value found')
return(moy)
'''
What happens if som initilization is forgotten ?
L'erreur NameError: name 'som' is not defined
Il faut donc la déclarer
What can you expect if all the values are below zero ?
Le calcul ne les prendra pas en compte
''' |
This funtion calculates the average of a list
Args :
tab: The list of number
Returns the mean of the list
Raises:
Value error if no positive value is find
| This funtion calculates the average of a list
Args :
tab: The list of number
Returns the mean of the list
Value error if no positive value is find | [
"This",
"funtion",
"calculates",
"the",
"average",
"of",
"a",
"list",
"Args",
":",
"tab",
":",
"The",
"list",
"of",
"number",
"Returns",
"the",
"mean",
"of",
"the",
"list",
"Value",
"error",
"if",
"no",
"positive",
"value",
"is",
"find"
] | def average(tab):
som = 0
n=0
for i in range(len(tab)):
if tab[i] > 0:
som = som + tab[i]
n = n+1
if n>0:
moy = som/n
else:
raise ValueError('no positive value found')
return(moy) | [
"def",
"average",
"(",
"tab",
")",
":",
"som",
"=",
"0",
"n",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"tab",
")",
")",
":",
"if",
"tab",
"[",
"i",
"]",
">",
"0",
":",
"som",
"=",
"som",
"+",
"tab",
"[",
"i",
"]",
"n",
"=",
"n",
"+",
"1",
"if",
"n",
">",
"0",
":",
"moy",
"=",
"som",
"/",
"n",
"else",
":",
"raise",
"ValueError",
"(",
"'no positive value found'",
")",
"return",
"(",
"moy",
")",
"'''\n What happens if som initilization is forgotten ?\n L'erreur NameError: name 'som' is not defined\n Il faut donc la déclarer\n \n What can you expect if all the values are below zero ?\n Le calcul ne les prendra pas en compte \n '''"
] | This funtion calculates the average of a list
Args :
tab: The list of number | [
"This",
"funtion",
"calculates",
"the",
"average",
"of",
"a",
"list",
"Args",
":",
"tab",
":",
"The",
"list",
"of",
"number"
] | [
"'''\n \n This funtion calculates the average of a list\n \n Args : \n tab: The list of number\n \n Returns the mean of the list\n \n Raises:\n Value error if no positive value is find\n '''",
"'''\n What happens if som initilization is forgotten ?\n L'erreur NameError: name 'som' is not defined\n Il faut donc la déclarer\n \n What can you expect if all the values are below zero ?\n Le calcul ne les prendra pas en compte \n '''"
] | [
{
"param": "tab",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "tab",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def average(tab):
som = 0
n=0
for i in range(len(tab)):
if tab[i] > 0:
som = som + tab[i]
n = n+1
if n>0:
moy = som/n
else:
raise ValueError('no positive value found')
return(moy) | 315 | 458 |
e89be0e46a4e4f06c678ed62e880b446af83ee7d | yungone/Cilia | src/Utilities.py | [
"MIT"
] | Python | flen | <not_specific> | def flen(filename):
"""
File LENgth computes and returns the number of lines in a file. @filename <string> is path to a file. This is an epensive method to call for the whole file is read to determine the number of lines.
returns: <integer> line count
"""
# Read and count lines.
with open(filename, 'r') as infile:
return sum((1 for line in infile)) |
File LENgth computes and returns the number of lines in a file. @filename <string> is path to a file. This is an epensive method to call for the whole file is read to determine the number of lines.
returns: <integer> line count
| File LENgth computes and returns the number of lines in a file. @filename is path to a file. This is an epensive method to call for the whole file is read to determine the number of lines.
returns: line count | [
"File",
"LENgth",
"computes",
"and",
"returns",
"the",
"number",
"of",
"lines",
"in",
"a",
"file",
".",
"@filename",
"is",
"path",
"to",
"a",
"file",
".",
"This",
"is",
"an",
"epensive",
"method",
"to",
"call",
"for",
"the",
"whole",
"file",
"is",
"read",
"to",
"determine",
"the",
"number",
"of",
"lines",
".",
"returns",
":",
"line",
"count"
] | def flen(filename):
with open(filename, 'r') as infile:
return sum((1 for line in infile)) | [
"def",
"flen",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"infile",
":",
"return",
"sum",
"(",
"(",
"1",
"for",
"line",
"in",
"infile",
")",
")"
] | File LENgth computes and returns the number of lines in a file. | [
"File",
"LENgth",
"computes",
"and",
"returns",
"the",
"number",
"of",
"lines",
"in",
"a",
"file",
"."
] | [
"\"\"\"\n\t\tFile LENgth computes and returns the number of lines in a file. @filename <string> is path to a file. This is an epensive method to call for the whole file is read to determine the number of lines.\n\t\t\n\t\treturns: <integer> line count\n\t\"\"\"",
"# Read and count lines."
] | [
{
"param": "filename",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def flen(filename):
with open(filename, 'r') as infile:
return sum((1 for line in infile)) | 316 | 522 |
3b2206a3c64fbcf5d5a7d28a82c6e758a7a76d84 | DBeath/feedsearch-gateway | gateway/utils.py | [
"MIT"
] | Python | truncate_integer | int | def truncate_integer(value: int, length: int = 10) -> int:
"""
Truncate an integer value to the desired length.
:param value: integer to truncate
:param length: desired length of integer
:return: truncated integer
"""
val_length = len(str(value))
if val_length > length:
diff = val_length - length
return value // (10 ** diff)
return value |
Truncate an integer value to the desired length.
:param value: integer to truncate
:param length: desired length of integer
:return: truncated integer
| Truncate an integer value to the desired length. | [
"Truncate",
"an",
"integer",
"value",
"to",
"the",
"desired",
"length",
"."
] | def truncate_integer(value: int, length: int = 10) -> int:
val_length = len(str(value))
if val_length > length:
diff = val_length - length
return value // (10 ** diff)
return value | [
"def",
"truncate_integer",
"(",
"value",
":",
"int",
",",
"length",
":",
"int",
"=",
"10",
")",
"->",
"int",
":",
"val_length",
"=",
"len",
"(",
"str",
"(",
"value",
")",
")",
"if",
"val_length",
">",
"length",
":",
"diff",
"=",
"val_length",
"-",
"length",
"return",
"value",
"//",
"(",
"10",
"**",
"diff",
")",
"return",
"value"
] | Truncate an integer value to the desired length. | [
"Truncate",
"an",
"integer",
"value",
"to",
"the",
"desired",
"length",
"."
] | [
"\"\"\"\n Truncate an integer value to the desired length.\n\n :param value: integer to truncate\n :param length: desired length of integer\n :return: truncated integer\n \"\"\""
] | [
{
"param": "value",
"type": "int"
},
{
"param": "length",
"type": "int"
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "value",
"type": "int",
"docstring": "integer to truncate",
"docstring_tokens": [
"integer",
"to",
"truncate"
],
"default": null,
"is_optional": null
},
{
"identifier": "length",
"type": "int",
"docstring": "desired length of integer",
"docstring_tokens": [
"desired",
"length",
"of",
"integer"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def truncate_integer(value: int, length: int = 10) -> int:
val_length = len(str(value))
if val_length > length:
diff = val_length - length
return value // (10 ** diff)
return value | 317 | 868 |
454a77e9443f874cb912811365baf034e9360b6b | amhirsch/gla-covid-19 | daily_pr/parse.py | [
"MIT"
] | Python | _str_to_int | int | def _str_to_int(number: str) -> int:
"""Parses a string to an integer with safegaurds for commas in numerical
representation.
"""
try:
return int(number.replace(',', ''))
except ValueError:
pass | Parses a string to an integer with safegaurds for commas in numerical
representation.
| Parses a string to an integer with safegaurds for commas in numerical
representation. | [
"Parses",
"a",
"string",
"to",
"an",
"integer",
"with",
"safegaurds",
"for",
"commas",
"in",
"numerical",
"representation",
"."
] | def _str_to_int(number: str) -> int:
try:
return int(number.replace(',', ''))
except ValueError:
pass | [
"def",
"_str_to_int",
"(",
"number",
":",
"str",
")",
"->",
"int",
":",
"try",
":",
"return",
"int",
"(",
"number",
".",
"replace",
"(",
"','",
",",
"''",
")",
")",
"except",
"ValueError",
":",
"pass"
] | Parses a string to an integer with safegaurds for commas in numerical
representation. | [
"Parses",
"a",
"string",
"to",
"an",
"integer",
"with",
"safegaurds",
"for",
"commas",
"in",
"numerical",
"representation",
"."
] | [
"\"\"\"Parses a string to an integer with safegaurds for commas in numerical\n representation.\n \"\"\""
] | [
{
"param": "number",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "number",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _str_to_int(number: str) -> int:
try:
return int(number.replace(',', ''))
except ValueError:
pass | 318 | 815 |
9930d6778ff12b786721c238c8745bd6ae64369e | turnaround0/udacity-ai-p2-isolation | game_agent.py | [
"MIT"
] | Python | custom_score_2 | <not_specific> | def custom_score_2(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
# Check game is end
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
# If player is close to center, it can go to many directions.
# But, on corner, it have a few choices to move.
w, h = game.width / 2., game.height / 2.
own_y, own_x = game.get_player_location(player)
opp_y, opp_x = game.get_player_location(game.get_opponent(player))
own_diff = float((h - own_y)**2 + (w - own_x)**2)
opp_diff = float((h - opp_y)**2 + (w - opp_x)**2)
# This tactic is to increase number of player moves,
# and to decrease number of opponent moves.
own_moves = len(game.get_legal_moves(player))
opp_moves = len(game.get_legal_moves(game.get_opponent(player)))
# Check if player and opponent is on same quadrant
# If then, distance from center should be multiplied.
# Because player can attack opponents from center to corner.
own_quadrant = opp_quadrant = 0
if own_x >= w:
own_quadrant += 1
if own_y >= h:
own_quadrant += 2
if opp_x >= w:
opp_quadrant += 1
if opp_y >= h:
opp_quadrant += 2
if own_quadrant == opp_quadrant:
factor = 1.
else:
factor = 0.1
return float((opp_diff - own_diff) * factor + own_moves - opp_moves) | Calculate the heuristic value of a game state from the point of view
of the given player.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
| Calculate the heuristic value of a game state from the point of view
of the given player.
this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game .
player : object
A player instance in the current game
Returns
float
The heuristic value of the current game state to the specified player. | [
"Calculate",
"the",
"heuristic",
"value",
"of",
"a",
"game",
"state",
"from",
"the",
"point",
"of",
"view",
"of",
"the",
"given",
"player",
".",
"this",
"function",
"should",
"be",
"called",
"from",
"within",
"a",
"Player",
"instance",
"as",
"`",
"self",
".",
"score",
"()",
"`",
"--",
"you",
"should",
"not",
"need",
"to",
"call",
"this",
"function",
"directly",
".",
"Parameters",
"game",
":",
"`",
"isolation",
".",
"Board",
"`",
"An",
"instance",
"of",
"`",
"isolation",
".",
"Board",
"`",
"encoding",
"the",
"current",
"state",
"of",
"the",
"game",
".",
"player",
":",
"object",
"A",
"player",
"instance",
"in",
"the",
"current",
"game",
"Returns",
"float",
"The",
"heuristic",
"value",
"of",
"the",
"current",
"game",
"state",
"to",
"the",
"specified",
"player",
"."
] | def custom_score_2(game, player):
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
w, h = game.width / 2., game.height / 2.
own_y, own_x = game.get_player_location(player)
opp_y, opp_x = game.get_player_location(game.get_opponent(player))
own_diff = float((h - own_y)**2 + (w - own_x)**2)
opp_diff = float((h - opp_y)**2 + (w - opp_x)**2)
own_moves = len(game.get_legal_moves(player))
opp_moves = len(game.get_legal_moves(game.get_opponent(player)))
own_quadrant = opp_quadrant = 0
if own_x >= w:
own_quadrant += 1
if own_y >= h:
own_quadrant += 2
if opp_x >= w:
opp_quadrant += 1
if opp_y >= h:
opp_quadrant += 2
if own_quadrant == opp_quadrant:
factor = 1.
else:
factor = 0.1
return float((opp_diff - own_diff) * factor + own_moves - opp_moves) | [
"def",
"custom_score_2",
"(",
"game",
",",
"player",
")",
":",
"if",
"game",
".",
"is_loser",
"(",
"player",
")",
":",
"return",
"float",
"(",
"\"-inf\"",
")",
"if",
"game",
".",
"is_winner",
"(",
"player",
")",
":",
"return",
"float",
"(",
"\"inf\"",
")",
"w",
",",
"h",
"=",
"game",
".",
"width",
"/",
"2.",
",",
"game",
".",
"height",
"/",
"2.",
"own_y",
",",
"own_x",
"=",
"game",
".",
"get_player_location",
"(",
"player",
")",
"opp_y",
",",
"opp_x",
"=",
"game",
".",
"get_player_location",
"(",
"game",
".",
"get_opponent",
"(",
"player",
")",
")",
"own_diff",
"=",
"float",
"(",
"(",
"h",
"-",
"own_y",
")",
"**",
"2",
"+",
"(",
"w",
"-",
"own_x",
")",
"**",
"2",
")",
"opp_diff",
"=",
"float",
"(",
"(",
"h",
"-",
"opp_y",
")",
"**",
"2",
"+",
"(",
"w",
"-",
"opp_x",
")",
"**",
"2",
")",
"own_moves",
"=",
"len",
"(",
"game",
".",
"get_legal_moves",
"(",
"player",
")",
")",
"opp_moves",
"=",
"len",
"(",
"game",
".",
"get_legal_moves",
"(",
"game",
".",
"get_opponent",
"(",
"player",
")",
")",
")",
"own_quadrant",
"=",
"opp_quadrant",
"=",
"0",
"if",
"own_x",
">=",
"w",
":",
"own_quadrant",
"+=",
"1",
"if",
"own_y",
">=",
"h",
":",
"own_quadrant",
"+=",
"2",
"if",
"opp_x",
">=",
"w",
":",
"opp_quadrant",
"+=",
"1",
"if",
"opp_y",
">=",
"h",
":",
"opp_quadrant",
"+=",
"2",
"if",
"own_quadrant",
"==",
"opp_quadrant",
":",
"factor",
"=",
"1.",
"else",
":",
"factor",
"=",
"0.1",
"return",
"float",
"(",
"(",
"opp_diff",
"-",
"own_diff",
")",
"*",
"factor",
"+",
"own_moves",
"-",
"opp_moves",
")"
] | Calculate the heuristic value of a game state from the point of view
of the given player. | [
"Calculate",
"the",
"heuristic",
"value",
"of",
"a",
"game",
"state",
"from",
"the",
"point",
"of",
"view",
"of",
"the",
"given",
"player",
"."
] | [
"\"\"\"Calculate the heuristic value of a game state from the point of view\n of the given player.\n\n Note: this function should be called from within a Player instance as\n `self.score()` -- you should not need to call this function directly.\n\n Parameters\n ----------\n game : `isolation.Board`\n An instance of `isolation.Board` encoding the current state of the\n game (e.g., player locations and blocked cells).\n\n player : object\n A player instance in the current game (i.e., an object corresponding to\n one of the player objects `game.__player_1__` or `game.__player_2__`.)\n\n Returns\n -------\n float\n The heuristic value of the current game state to the specified player.\n \"\"\"",
"# Check game is end",
"# If player is close to center, it can go to many directions.",
"# But, on corner, it have a few choices to move.",
"# This tactic is to increase number of player moves,",
"# and to decrease number of opponent moves.",
"# Check if player and opponent is on same quadrant",
"# If then, distance from center should be multiplied.",
"# Because player can attack opponents from center to corner."
] | [
{
"param": "game",
"type": null
},
{
"param": "player",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "game",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "player",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def custom_score_2(game, player):
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
w, h = game.width / 2., game.height / 2.
own_y, own_x = game.get_player_location(player)
opp_y, opp_x = game.get_player_location(game.get_opponent(player))
own_diff = float((h - own_y)**2 + (w - own_x)**2)
opp_diff = float((h - opp_y)**2 + (w - opp_x)**2)
own_moves = len(game.get_legal_moves(player))
opp_moves = len(game.get_legal_moves(game.get_opponent(player)))
own_quadrant = opp_quadrant = 0
if own_x >= w:
own_quadrant += 1
if own_y >= h:
own_quadrant += 2
if opp_x >= w:
opp_quadrant += 1
if opp_y >= h:
opp_quadrant += 2
if own_quadrant == opp_quadrant:
factor = 1.
else:
factor = 0.1
return float((opp_diff - own_diff) * factor + own_moves - opp_moves) | 319 | 447 |
f8c9ffa0a8397735e9373a6fb1e7140e428e4bc1 | dawedawe/traipor | app/training/plan_util.py | [
"ISC"
] | Python | sort_loads | <not_specific> | def sort_loads(plan):
"""sort loads in plan while keeping the positions of the loaded days"""
loads = []
loaded_days = []
for index, load in enumerate(plan):
if load > 0.0:
loaded_days.append(index)
loads.append(load)
loads = sorted(loads)
for i in loaded_days:
plan[i] = loads.pop(0)
return plan | sort loads in plan while keeping the positions of the loaded days | sort loads in plan while keeping the positions of the loaded days | [
"sort",
"loads",
"in",
"plan",
"while",
"keeping",
"the",
"positions",
"of",
"the",
"loaded",
"days"
] | def sort_loads(plan):
loads = []
loaded_days = []
for index, load in enumerate(plan):
if load > 0.0:
loaded_days.append(index)
loads.append(load)
loads = sorted(loads)
for i in loaded_days:
plan[i] = loads.pop(0)
return plan | [
"def",
"sort_loads",
"(",
"plan",
")",
":",
"loads",
"=",
"[",
"]",
"loaded_days",
"=",
"[",
"]",
"for",
"index",
",",
"load",
"in",
"enumerate",
"(",
"plan",
")",
":",
"if",
"load",
">",
"0.0",
":",
"loaded_days",
".",
"append",
"(",
"index",
")",
"loads",
".",
"append",
"(",
"load",
")",
"loads",
"=",
"sorted",
"(",
"loads",
")",
"for",
"i",
"in",
"loaded_days",
":",
"plan",
"[",
"i",
"]",
"=",
"loads",
".",
"pop",
"(",
"0",
")",
"return",
"plan"
] | sort loads in plan while keeping the positions of the loaded days | [
"sort",
"loads",
"in",
"plan",
"while",
"keeping",
"the",
"positions",
"of",
"the",
"loaded",
"days"
] | [
"\"\"\"sort loads in plan while keeping the positions of the loaded days\"\"\""
] | [
{
"param": "plan",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "plan",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def sort_loads(plan):
loads = []
loaded_days = []
for index, load in enumerate(plan):
if load > 0.0:
loaded_days.append(index)
loads.append(load)
loads = sorted(loads)
for i in loaded_days:
plan[i] = loads.pop(0)
return plan | 320 | 230 |
90bcf24dc2c601750c736ab336ed52b87f7bd4e4 | MartinThoma/hwrt | hwrt/utils.py | [
"MIT"
] | Python | create_run_logfile | None | def create_run_logfile(folder: str) -> None:
"""Create a 'run.log' within folder. This file contains the time of the
latest successful run.
"""
with open(os.path.join(folder, "run.log"), "w") as fp:
datestring = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
fp.write("timestamp: '%s'" % datestring) | Create a 'run.log' within folder. This file contains the time of the
latest successful run.
| Create a 'run.log' within folder. This file contains the time of the
latest successful run. | [
"Create",
"a",
"'",
"run",
".",
"log",
"'",
"within",
"folder",
".",
"This",
"file",
"contains",
"the",
"time",
"of",
"the",
"latest",
"successful",
"run",
"."
] | def create_run_logfile(folder: str) -> None:
with open(os.path.join(folder, "run.log"), "w") as fp:
datestring = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
fp.write("timestamp: '%s'" % datestring) | [
"def",
"create_run_logfile",
"(",
"folder",
":",
"str",
")",
"->",
"None",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"\"run.log\"",
")",
",",
"\"w\"",
")",
"as",
"fp",
":",
"datestring",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"fp",
".",
"write",
"(",
"\"timestamp: '%s'\"",
"%",
"datestring",
")"
] | Create a 'run.log' within folder. | [
"Create",
"a",
"'",
"run",
".",
"log",
"'",
"within",
"folder",
"."
] | [
"\"\"\"Create a 'run.log' within folder. This file contains the time of the\n latest successful run.\n \"\"\""
] | [
{
"param": "folder",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "folder",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
import datetime
def create_run_logfile(folder: str) -> None:
with open(os.path.join(folder, "run.log"), "w") as fp:
datestring = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
fp.write("timestamp: '%s'" % datestring) | 321 | 594 |
abb46c36a8bef3d973b36ae91eaf31c3a266595f | ghgindex/gh | ghgi/parser.py | [
"CC0-1.0"
] | Python | unstop | list | def unstop(cls, ingredient: list) -> list:
""" Remove stopwords and prep mods from parsed ingredient"""
out = []
prev_token = None
mods = []
for token in ingredient[0]:
if type(token[0]) is str:
if token[0].lower() in cls.STOPWORDS:
continue
elif token[0].lower() in cls.PREP_MODS:
mods += [token[0]]
continue
if token[0] == ',' and prev_token in [',', None]:
prev_token = token[0]
continue
out += [token]
if type(token[0]) is str:
prev_token = token[0]
# clean up dangling cruft left by removal of stopwords
while out and out[-1][1] in ['CC', ',', 'TO']:
del out[-1]
return [out, ingredient[1], ingredient[2] + mods] | Remove stopwords and prep mods from parsed ingredient | Remove stopwords and prep mods from parsed ingredient | [
"Remove",
"stopwords",
"and",
"prep",
"mods",
"from",
"parsed",
"ingredient"
] | def unstop(cls, ingredient: list) -> list:
out = []
prev_token = None
mods = []
for token in ingredient[0]:
if type(token[0]) is str:
if token[0].lower() in cls.STOPWORDS:
continue
elif token[0].lower() in cls.PREP_MODS:
mods += [token[0]]
continue
if token[0] == ',' and prev_token in [',', None]:
prev_token = token[0]
continue
out += [token]
if type(token[0]) is str:
prev_token = token[0]
while out and out[-1][1] in ['CC', ',', 'TO']:
del out[-1]
return [out, ingredient[1], ingredient[2] + mods] | [
"def",
"unstop",
"(",
"cls",
",",
"ingredient",
":",
"list",
")",
"->",
"list",
":",
"out",
"=",
"[",
"]",
"prev_token",
"=",
"None",
"mods",
"=",
"[",
"]",
"for",
"token",
"in",
"ingredient",
"[",
"0",
"]",
":",
"if",
"type",
"(",
"token",
"[",
"0",
"]",
")",
"is",
"str",
":",
"if",
"token",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"in",
"cls",
".",
"STOPWORDS",
":",
"continue",
"elif",
"token",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"in",
"cls",
".",
"PREP_MODS",
":",
"mods",
"+=",
"[",
"token",
"[",
"0",
"]",
"]",
"continue",
"if",
"token",
"[",
"0",
"]",
"==",
"','",
"and",
"prev_token",
"in",
"[",
"','",
",",
"None",
"]",
":",
"prev_token",
"=",
"token",
"[",
"0",
"]",
"continue",
"out",
"+=",
"[",
"token",
"]",
"if",
"type",
"(",
"token",
"[",
"0",
"]",
")",
"is",
"str",
":",
"prev_token",
"=",
"token",
"[",
"0",
"]",
"while",
"out",
"and",
"out",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"in",
"[",
"'CC'",
",",
"','",
",",
"'TO'",
"]",
":",
"del",
"out",
"[",
"-",
"1",
"]",
"return",
"[",
"out",
",",
"ingredient",
"[",
"1",
"]",
",",
"ingredient",
"[",
"2",
"]",
"+",
"mods",
"]"
] | Remove stopwords and prep mods from parsed ingredient | [
"Remove",
"stopwords",
"and",
"prep",
"mods",
"from",
"parsed",
"ingredient"
] | [
"\"\"\" Remove stopwords and prep mods from parsed ingredient\"\"\"",
"# clean up dangling cruft left by removal of stopwords"
] | [
{
"param": "cls",
"type": null
},
{
"param": "ingredient",
"type": "list"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "ingredient",
"type": "list",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def unstop(cls, ingredient: list) -> list:
out = []
prev_token = None
mods = []
for token in ingredient[0]:
if type(token[0]) is str:
if token[0].lower() in cls.STOPWORDS:
continue
elif token[0].lower() in cls.PREP_MODS:
mods += [token[0]]
continue
if token[0] == ',' and prev_token in [',', None]:
prev_token = token[0]
continue
out += [token]
if type(token[0]) is str:
prev_token = token[0]
while out and out[-1][1] in ['CC', ',', 'TO']:
del out[-1]
return [out, ingredient[1], ingredient[2] + mods] | 322 | 600 |
fd5b659e7876006c2c69b3cab6023c78b74cb60d | ejfitzgerald/agents-aea | scripts/update_package_versions.py | [
"Apache-2.0"
] | Python | inplace_change | None | def inplace_change(fp: Path, old_string: str, new_string: str) -> None:
"""Replace the occurence of a string with a new one in the provided file."""
with open(fp) as f:
s = f.read()
if old_string not in s:
return
with open(fp, "w") as f:
s = s.replace(old_string, new_string)
f.write(s) | Replace the occurence of a string with a new one in the provided file. | Replace the occurence of a string with a new one in the provided file. | [
"Replace",
"the",
"occurence",
"of",
"a",
"string",
"with",
"a",
"new",
"one",
"in",
"the",
"provided",
"file",
"."
] | def inplace_change(fp: Path, old_string: str, new_string: str) -> None:
with open(fp) as f:
s = f.read()
if old_string not in s:
return
with open(fp, "w") as f:
s = s.replace(old_string, new_string)
f.write(s) | [
"def",
"inplace_change",
"(",
"fp",
":",
"Path",
",",
"old_string",
":",
"str",
",",
"new_string",
":",
"str",
")",
"->",
"None",
":",
"with",
"open",
"(",
"fp",
")",
"as",
"f",
":",
"s",
"=",
"f",
".",
"read",
"(",
")",
"if",
"old_string",
"not",
"in",
"s",
":",
"return",
"with",
"open",
"(",
"fp",
",",
"\"w\"",
")",
"as",
"f",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"old_string",
",",
"new_string",
")",
"f",
".",
"write",
"(",
"s",
")"
] | Replace the occurence of a string with a new one in the provided file. | [
"Replace",
"the",
"occurence",
"of",
"a",
"string",
"with",
"a",
"new",
"one",
"in",
"the",
"provided",
"file",
"."
] | [
"\"\"\"Replace the occurence of a string with a new one in the provided file.\"\"\""
] | [
{
"param": "fp",
"type": "Path"
},
{
"param": "old_string",
"type": "str"
},
{
"param": "new_string",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "fp",
"type": "Path",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "old_string",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "new_string",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def inplace_change(fp: Path, old_string: str, new_string: str) -> None:
with open(fp) as f:
s = f.read()
if old_string not in s:
return
with open(fp, "w") as f:
s = s.replace(old_string, new_string)
f.write(s) | 323 | 978 |
80c038f49648d36c714702aed5bfb3645aec9824 | robfalck/AoC2017 | day21/day21.py | [
"Apache-2.0"
] | Python | encode_pattern | <not_specific> | def encode_pattern(mat):
"""
Encode the matrix into a hashable value.
"""
return tuple(mat.flatten().tolist()) |
Encode the matrix into a hashable value.
| Encode the matrix into a hashable value. | [
"Encode",
"the",
"matrix",
"into",
"a",
"hashable",
"value",
"."
] | def encode_pattern(mat):
return tuple(mat.flatten().tolist()) | [
"def",
"encode_pattern",
"(",
"mat",
")",
":",
"return",
"tuple",
"(",
"mat",
".",
"flatten",
"(",
")",
".",
"tolist",
"(",
")",
")"
] | Encode the matrix into a hashable value. | [
"Encode",
"the",
"matrix",
"into",
"a",
"hashable",
"value",
"."
] | [
"\"\"\"\n Encode the matrix into a hashable value.\n \"\"\""
] | [
{
"param": "mat",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "mat",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def encode_pattern(mat):
return tuple(mat.flatten().tolist()) | 324 | 869 |
78a9f0a5983d1c08cfc8edd068974b198976ad6f | Ouranosinc/cowbird | setup.py | [
"MIT"
] | Python | _split_requirement | <not_specific> | def _split_requirement(requirement, version=False, python=False, merge=False):
# type: (str, bool, bool, bool) -> Union[str, Tuple[str, str]]
"""
Splits a requirement package definition into it's name and version specification.
Returns the appropriate part(s) according to :paramref:`version`. If ``True``, returns the operator and version
string. The returned version in this case would be either the package's or the environment python's version string
according to the value of :paramref:`python`. Otherwise, only returns the 'other part' of the requirement, which
will be the plain package name without version or the complete ``package+version`` without ``python_version`` part.
Package requirement format::
package [<|<=|==|>|>=|!= x.y.z][; python_version <|<=|==|>|>=|!= "x.y.z"]
Returned values with provided arguments::
default: "<package>"
python=True n/a
version=True: ([pkg-op], [pkg-ver])
version=True,python=True: ([py-op], [py-ver])
version=True,merge=True: "<package> [pkg-op] [pkg-ver]"
version=True,python=True,merge=True: "[python_version] [py-op] [py-ver]"
:param requirement: full package string requirement.
:param version:
Retrieves the version operator and version number instead of only the package's name (without specifications).
:param python:
Retrieves the python operator and python version instead of the package's version.
Must be combined with :paramref:`version`, otherwise doesn't do anything.
:param merge:
Nothing done if ``False`` (other arguments behave normally).
If only :paramref:`version` is ``True``, merges the package name back with the version operator and number into
a single string (if any version part), but without the python version part (if any).
If both :paramref:`version` and :paramref:`python` are ``True`` combines back the part after ``;`` to form
the python version specifier.
:return: Extracted requirement part(s). Emtpy strings if parts cannot be found.
"""
idx_pyv = -1 if python else 0
if python and "python_version" not in requirement:
return ("", "") if version and not merge else ""
requirement = requirement.split("python_version")[idx_pyv].replace(";", "").replace("\"", "")
op_str = ""
pkg_name = requirement
for operator in [">=", ">", "<=", "<", "!=", "==", "="]:
if operator in requirement:
op_str = operator
pkg_name, requirement = requirement.split(operator, 1)
break
if not version:
return pkg_name.strip()
if op_str == "":
pkg_name = requirement
requirement = ""
parts = (op_str, requirement.strip())
if merge and python:
return "python_version {} \"{}\"".format(parts[0], parts[1])
if merge and version:
return "{}{}{}".format(pkg_name, parts[0], parts[1])
return parts |
Splits a requirement package definition into it's name and version specification.
Returns the appropriate part(s) according to :paramref:`version`. If ``True``, returns the operator and version
string. The returned version in this case would be either the package's or the environment python's version string
according to the value of :paramref:`python`. Otherwise, only returns the 'other part' of the requirement, which
will be the plain package name without version or the complete ``package+version`` without ``python_version`` part.
Package requirement format::
package [<|<=|==|>|>=|!= x.y.z][; python_version <|<=|==|>|>=|!= "x.y.z"]
Returned values with provided arguments::
default: "<package>"
python=True n/a
version=True: ([pkg-op], [pkg-ver])
version=True,python=True: ([py-op], [py-ver])
version=True,merge=True: "<package> [pkg-op] [pkg-ver]"
version=True,python=True,merge=True: "[python_version] [py-op] [py-ver]"
:param requirement: full package string requirement.
:param version:
Retrieves the version operator and version number instead of only the package's name (without specifications).
:param python:
Retrieves the python operator and python version instead of the package's version.
Must be combined with :paramref:`version`, otherwise doesn't do anything.
:param merge:
Nothing done if ``False`` (other arguments behave normally).
If only :paramref:`version` is ``True``, merges the package name back with the version operator and number into
a single string (if any version part), but without the python version part (if any).
If both :paramref:`version` and :paramref:`python` are ``True`` combines back the part after ``;`` to form
the python version specifier.
:return: Extracted requirement part(s). Emtpy strings if parts cannot be found.
| Splits a requirement package definition into it's name and version specification.
Returns the appropriate part(s) according to :paramref:`version`. If ``True``, returns the operator and version
string. The returned version in this case would be either the package's or the environment python's version string
according to the value of :paramref:`python`.
Package requirement format:.
Returned values with provided arguments:.
| [
"Splits",
"a",
"requirement",
"package",
"definition",
"into",
"it",
"'",
"s",
"name",
"and",
"version",
"specification",
".",
"Returns",
"the",
"appropriate",
"part",
"(",
"s",
")",
"according",
"to",
":",
"paramref",
":",
"`",
"version",
"`",
".",
"If",
"`",
"`",
"True",
"`",
"`",
"returns",
"the",
"operator",
"and",
"version",
"string",
".",
"The",
"returned",
"version",
"in",
"this",
"case",
"would",
"be",
"either",
"the",
"package",
"'",
"s",
"or",
"the",
"environment",
"python",
"'",
"s",
"version",
"string",
"according",
"to",
"the",
"value",
"of",
":",
"paramref",
":",
"`",
"python",
"`",
".",
"Package",
"requirement",
"format",
":",
".",
"Returned",
"values",
"with",
"provided",
"arguments",
":",
"."
] | def _split_requirement(requirement, version=False, python=False, merge=False):
idx_pyv = -1 if python else 0
if python and "python_version" not in requirement:
return ("", "") if version and not merge else ""
requirement = requirement.split("python_version")[idx_pyv].replace(";", "").replace("\"", "")
op_str = ""
pkg_name = requirement
for operator in [">=", ">", "<=", "<", "!=", "==", "="]:
if operator in requirement:
op_str = operator
pkg_name, requirement = requirement.split(operator, 1)
break
if not version:
return pkg_name.strip()
if op_str == "":
pkg_name = requirement
requirement = ""
parts = (op_str, requirement.strip())
if merge and python:
return "python_version {} \"{}\"".format(parts[0], parts[1])
if merge and version:
return "{}{}{}".format(pkg_name, parts[0], parts[1])
return parts | [
"def",
"_split_requirement",
"(",
"requirement",
",",
"version",
"=",
"False",
",",
"python",
"=",
"False",
",",
"merge",
"=",
"False",
")",
":",
"idx_pyv",
"=",
"-",
"1",
"if",
"python",
"else",
"0",
"if",
"python",
"and",
"\"python_version\"",
"not",
"in",
"requirement",
":",
"return",
"(",
"\"\"",
",",
"\"\"",
")",
"if",
"version",
"and",
"not",
"merge",
"else",
"\"\"",
"requirement",
"=",
"requirement",
".",
"split",
"(",
"\"python_version\"",
")",
"[",
"idx_pyv",
"]",
".",
"replace",
"(",
"\";\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"\\\"\"",
",",
"\"\"",
")",
"op_str",
"=",
"\"\"",
"pkg_name",
"=",
"requirement",
"for",
"operator",
"in",
"[",
"\">=\"",
",",
"\">\"",
",",
"\"<=\"",
",",
"\"<\"",
",",
"\"!=\"",
",",
"\"==\"",
",",
"\"=\"",
"]",
":",
"if",
"operator",
"in",
"requirement",
":",
"op_str",
"=",
"operator",
"pkg_name",
",",
"requirement",
"=",
"requirement",
".",
"split",
"(",
"operator",
",",
"1",
")",
"break",
"if",
"not",
"version",
":",
"return",
"pkg_name",
".",
"strip",
"(",
")",
"if",
"op_str",
"==",
"\"\"",
":",
"pkg_name",
"=",
"requirement",
"requirement",
"=",
"\"\"",
"parts",
"=",
"(",
"op_str",
",",
"requirement",
".",
"strip",
"(",
")",
")",
"if",
"merge",
"and",
"python",
":",
"return",
"\"python_version {} \\\"{}\\\"\"",
".",
"format",
"(",
"parts",
"[",
"0",
"]",
",",
"parts",
"[",
"1",
"]",
")",
"if",
"merge",
"and",
"version",
":",
"return",
"\"{}{}{}\"",
".",
"format",
"(",
"pkg_name",
",",
"parts",
"[",
"0",
"]",
",",
"parts",
"[",
"1",
"]",
")",
"return",
"parts"
] | Splits a requirement package definition into it's name and version specification. | [
"Splits",
"a",
"requirement",
"package",
"definition",
"into",
"it",
"'",
"s",
"name",
"and",
"version",
"specification",
"."
] | [
"# type: (str, bool, bool, bool) -> Union[str, Tuple[str, str]]",
"\"\"\"\n Splits a requirement package definition into it's name and version specification.\n\n Returns the appropriate part(s) according to :paramref:`version`. If ``True``, returns the operator and version\n string. The returned version in this case would be either the package's or the environment python's version string\n according to the value of :paramref:`python`. Otherwise, only returns the 'other part' of the requirement, which\n will be the plain package name without version or the complete ``package+version`` without ``python_version`` part.\n\n Package requirement format::\n\n package [<|<=|==|>|>=|!= x.y.z][; python_version <|<=|==|>|>=|!= \"x.y.z\"]\n\n Returned values with provided arguments::\n\n default: \"<package>\"\n python=True n/a\n version=True: ([pkg-op], [pkg-ver])\n version=True,python=True: ([py-op], [py-ver])\n version=True,merge=True: \"<package> [pkg-op] [pkg-ver]\"\n version=True,python=True,merge=True: \"[python_version] [py-op] [py-ver]\"\n\n :param requirement: full package string requirement.\n :param version:\n Retrieves the version operator and version number instead of only the package's name (without specifications).\n :param python:\n Retrieves the python operator and python version instead of the package's version.\n Must be combined with :paramref:`version`, otherwise doesn't do anything.\n :param merge:\n Nothing done if ``False`` (other arguments behave normally).\n If only :paramref:`version` is ``True``, merges the package name back with the version operator and number into\n a single string (if any version part), but without the python version part (if any).\n If both :paramref:`version` and :paramref:`python` are ``True`` combines back the part after ``;`` to form\n the python version specifier.\n :return: Extracted requirement part(s). Emtpy strings if parts cannot be found.\n \"\"\""
] | [
{
"param": "requirement",
"type": null
},
{
"param": "version",
"type": null
},
{
"param": "python",
"type": null
},
{
"param": "merge",
"type": null
}
] | {
"returns": [
{
"docstring": "Extracted requirement part(s). Emtpy strings if parts cannot be found.",
"docstring_tokens": [
"Extracted",
"requirement",
"part",
"(",
"s",
")",
".",
"Emtpy",
"strings",
"if",
"parts",
"cannot",
"be",
"found",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "requirement",
"type": null,
"docstring": "full package string requirement.",
"docstring_tokens": [
"full",
"package",
"string",
"requirement",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "version",
"type": null,
"docstring": "Retrieves the version operator and version number instead of only the package's name (without specifications).",
"docstring_tokens": [
"Retrieves",
"the",
"version",
"operator",
"and",
"version",
"number",
"instead",
"of",
"only",
"the",
"package",
"'",
"s",
"name",
"(",
"without",
"specifications",
")",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "python",
"type": null,
"docstring": "Retrieves the python operator and python version instead of the package's version.\nMust be combined with :paramref:`version`, otherwise doesn't do anything.",
"docstring_tokens": [
"Retrieves",
"the",
"python",
"operator",
"and",
"python",
"version",
"instead",
"of",
"the",
"package",
"'",
"s",
"version",
".",
"Must",
"be",
"combined",
"with",
":",
"paramref",
":",
"`",
"version",
"`",
"otherwise",
"doesn",
"'",
"t",
"do",
"anything",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "merge",
"type": null,
"docstring": "Nothing done if ``False`` (other arguments behave normally).\nIf only :paramref:`version` is ``True``, merges the package name back with the version operator and number into\na single string (if any version part), but without the python version part (if any).\nIf both :paramref:`version` and :paramref:`python` are ``True`` combines back the part after ``;`` to form\nthe python version specifier.",
"docstring_tokens": [
"Nothing",
"done",
"if",
"`",
"`",
"False",
"`",
"`",
"(",
"other",
"arguments",
"behave",
"normally",
")",
".",
"If",
"only",
":",
"paramref",
":",
"`",
"version",
"`",
"is",
"`",
"`",
"True",
"`",
"`",
"merges",
"the",
"package",
"name",
"back",
"with",
"the",
"version",
"operator",
"and",
"number",
"into",
"a",
"single",
"string",
"(",
"if",
"any",
"version",
"part",
")",
"but",
"without",
"the",
"python",
"version",
"part",
"(",
"if",
"any",
")",
".",
"If",
"both",
":",
"paramref",
":",
"`",
"version",
"`",
"and",
":",
"paramref",
":",
"`",
"python",
"`",
"are",
"`",
"`",
"True",
"`",
"`",
"combines",
"back",
"the",
"part",
"after",
"`",
"`",
";",
"`",
"`",
"to",
"form",
"the",
"python",
"version",
"specifier",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _split_requirement(requirement, version=False, python=False, merge=False):
idx_pyv = -1 if python else 0
if python and "python_version" not in requirement:
return ("", "") if version and not merge else ""
requirement = requirement.split("python_version")[idx_pyv].replace(";", "").replace("\"", "")
op_str = ""
pkg_name = requirement
for operator in [">=", ">", "<=", "<", "!=", "==", "="]:
if operator in requirement:
op_str = operator
pkg_name, requirement = requirement.split(operator, 1)
break
if not version:
return pkg_name.strip()
if op_str == "":
pkg_name = requirement
requirement = ""
parts = (op_str, requirement.strip())
if merge and python:
return "python_version {} \"{}\"".format(parts[0], parts[1])
if merge and version:
return "{}{}{}".format(pkg_name, parts[0], parts[1])
return parts | 325 | 31 |
462982d8e43ae9a5a9f7901259d7fa159bdc91ab | panchiwalashivani/neuroconv | BonsaiRecordingExtractor.py | [
"Apache-2.0"
] | Python | find_soup | <not_specific> | def find_soup(soup, **kwargs):
"""
Raise error when no matching text is found in soup
Parameters
----------
soup : BeautifulSoup object
kwargs : parameters to pass into find
"""
if soup.find(**kwargs) is None:
raise ValueError(
f"Cannot find text matching {kwargs} in file. Check if specified file path is correct"
)
return soup.find(**kwargs) |
Raise error when no matching text is found in soup
Parameters
----------
soup : BeautifulSoup object
kwargs : parameters to pass into find
| Raise error when no matching text is found in soup
Parameters
soup : BeautifulSoup object
kwargs : parameters to pass into find | [
"Raise",
"error",
"when",
"no",
"matching",
"text",
"is",
"found",
"in",
"soup",
"Parameters",
"soup",
":",
"BeautifulSoup",
"object",
"kwargs",
":",
"parameters",
"to",
"pass",
"into",
"find"
] | def find_soup(soup, **kwargs):
if soup.find(**kwargs) is None:
raise ValueError(
f"Cannot find text matching {kwargs} in file. Check if specified file path is correct"
)
return soup.find(**kwargs) | [
"def",
"find_soup",
"(",
"soup",
",",
"**",
"kwargs",
")",
":",
"if",
"soup",
".",
"find",
"(",
"**",
"kwargs",
")",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"f\"Cannot find text matching {kwargs} in file. Check if specified file path is correct\"",
")",
"return",
"soup",
".",
"find",
"(",
"**",
"kwargs",
")"
] | Raise error when no matching text is found in soup
Parameters | [
"Raise",
"error",
"when",
"no",
"matching",
"text",
"is",
"found",
"in",
"soup",
"Parameters"
] | [
"\"\"\"\n Raise error when no matching text is found in soup\n\n Parameters\n ----------\n soup : BeautifulSoup object \n kwargs : parameters to pass into find\n \"\"\""
] | [
{
"param": "soup",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "soup",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def find_soup(soup, **kwargs):
if soup.find(**kwargs) is None:
raise ValueError(
f"Cannot find text matching {kwargs} in file. Check if specified file path is correct"
)
return soup.find(**kwargs) | 326 | 596 |
2112933751835c522c87b9c5261f35ac4ccf9d54 | copumpkin/pants | src/python/pants/engine/scheduler.py | [
"Apache-2.0"
] | Python | failure | <not_specific> | def failure(cls, error):
"""Create a failure result.
A failure result represent a run with a fatal error. It presents the error but no
products.
:param error: The execution error encountered.
:type error: :class:`pants.base.exceptions.TaskError`
:rtype: `ExecutionResult`
"""
return cls(error=error, root_products=None) | Create a failure result.
A failure result represent a run with a fatal error. It presents the error but no
products.
:param error: The execution error encountered.
:type error: :class:`pants.base.exceptions.TaskError`
:rtype: `ExecutionResult`
| Create a failure result.
A failure result represent a run with a fatal error. It presents the error but no
products. | [
"Create",
"a",
"failure",
"result",
".",
"A",
"failure",
"result",
"represent",
"a",
"run",
"with",
"a",
"fatal",
"error",
".",
"It",
"presents",
"the",
"error",
"but",
"no",
"products",
"."
] | def failure(cls, error):
return cls(error=error, root_products=None) | [
"def",
"failure",
"(",
"cls",
",",
"error",
")",
":",
"return",
"cls",
"(",
"error",
"=",
"error",
",",
"root_products",
"=",
"None",
")"
] | Create a failure result. | [
"Create",
"a",
"failure",
"result",
"."
] | [
"\"\"\"Create a failure result.\n\n A failure result represent a run with a fatal error. It presents the error but no\n products.\n\n :param error: The execution error encountered.\n :type error: :class:`pants.base.exceptions.TaskError`\n :rtype: `ExecutionResult`\n \"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "error",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "`ExecutionResult`"
}
],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "error",
"type": null,
"docstring": "The execution error encountered.",
"docstring_tokens": [
"The",
"execution",
"error",
"encountered",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def failure(cls, error):
return cls(error=error, root_products=None) | 327 | 957 |
7a4eadc60c7f5d7e9df279c8ed4e0f27684700d0 | softdevteam/warmup_stats | warmup/krun_results.py | [
"Apache-2.0",
"MIT-0",
"MIT"
] | Python | read_krun_results_file | <not_specific> | def read_krun_results_file(results_file):
"""Return the JSON data stored in a Krun results file.
"""
results = None
with bz2.BZ2File(results_file, 'rb') as file_:
results = json.loads(file_.read())
return results
return None | Return the JSON data stored in a Krun results file.
| Return the JSON data stored in a Krun results file. | [
"Return",
"the",
"JSON",
"data",
"stored",
"in",
"a",
"Krun",
"results",
"file",
"."
] | def read_krun_results_file(results_file):
results = None
with bz2.BZ2File(results_file, 'rb') as file_:
results = json.loads(file_.read())
return results
return None | [
"def",
"read_krun_results_file",
"(",
"results_file",
")",
":",
"results",
"=",
"None",
"with",
"bz2",
".",
"BZ2File",
"(",
"results_file",
",",
"'rb'",
")",
"as",
"file_",
":",
"results",
"=",
"json",
".",
"loads",
"(",
"file_",
".",
"read",
"(",
")",
")",
"return",
"results",
"return",
"None"
] | Return the JSON data stored in a Krun results file. | [
"Return",
"the",
"JSON",
"data",
"stored",
"in",
"a",
"Krun",
"results",
"file",
"."
] | [
"\"\"\"Return the JSON data stored in a Krun results file.\n \"\"\""
] | [
{
"param": "results_file",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "results_file",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import bz2
import json
def read_krun_results_file(results_file):
results = None
with bz2.BZ2File(results_file, 'rb') as file_:
results = json.loads(file_.read())
return results
return None | 328 | 908 |
914085ef72b1ddbfd2e924b1cbf3154431a99276 | QISKit/qiskit-sdk-py | qiskit/transpiler/passes/routing/swap_strategies/pauli_2q_evolution_commutation.py | [
"Apache-2.0"
] | Python | summands_commute | bool | def summands_commute(operator: SparsePauliOp) -> bool:
"""Check if all summands in the evolved operator commute.
Args:
operator: The operator to check if all its summands commute.
Returns:
True if all summands commute, False otherwise.
"""
# get a list of summands that commute
commuting_subparts = operator.paulis.group_qubit_wise_commuting()
# if all commute we only have one summand!
return len(commuting_subparts) == 1 | Check if all summands in the evolved operator commute.
Args:
operator: The operator to check if all its summands commute.
Returns:
True if all summands commute, False otherwise.
| Check if all summands in the evolved operator commute. | [
"Check",
"if",
"all",
"summands",
"in",
"the",
"evolved",
"operator",
"commute",
"."
] | def summands_commute(operator: SparsePauliOp) -> bool:
commuting_subparts = operator.paulis.group_qubit_wise_commuting()
return len(commuting_subparts) == 1 | [
"def",
"summands_commute",
"(",
"operator",
":",
"SparsePauliOp",
")",
"->",
"bool",
":",
"commuting_subparts",
"=",
"operator",
".",
"paulis",
".",
"group_qubit_wise_commuting",
"(",
")",
"return",
"len",
"(",
"commuting_subparts",
")",
"==",
"1"
] | Check if all summands in the evolved operator commute. | [
"Check",
"if",
"all",
"summands",
"in",
"the",
"evolved",
"operator",
"commute",
"."
] | [
"\"\"\"Check if all summands in the evolved operator commute.\n\n Args:\n operator: The operator to check if all its summands commute.\n\n Returns:\n True if all summands commute, False otherwise.\n \"\"\"",
"# get a list of summands that commute",
"# if all commute we only have one summand!"
] | [
{
"param": "operator",
"type": "SparsePauliOp"
}
] | {
"returns": [
{
"docstring": "True if all summands commute, False otherwise.",
"docstring_tokens": [
"True",
"if",
"all",
"summands",
"commute",
"False",
"otherwise",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "operator",
"type": "SparsePauliOp",
"docstring": "The operator to check if all its summands commute.",
"docstring_tokens": [
"The",
"operator",
"to",
"check",
"if",
"all",
"its",
"summands",
"commute",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def summands_commute(operator: SparsePauliOp) -> bool:
commuting_subparts = operator.paulis.group_qubit_wise_commuting()
return len(commuting_subparts) == 1 | 329 | 348 |
639575c5891df1c628bda56d85dd31b9d6ca3543 | jjhuang/django-anonymizer | anonymizer/replacers.py | [
"MIT"
] | Python | email | <not_specific> | def email(anon, obj, field, val):
"""
Generates a random email address.
"""
return anon.faker.email(field=field) |
Generates a random email address.
| Generates a random email address. | [
"Generates",
"a",
"random",
"email",
"address",
"."
] | def email(anon, obj, field, val):
return anon.faker.email(field=field) | [
"def",
"email",
"(",
"anon",
",",
"obj",
",",
"field",
",",
"val",
")",
":",
"return",
"anon",
".",
"faker",
".",
"email",
"(",
"field",
"=",
"field",
")"
] | Generates a random email address. | [
"Generates",
"a",
"random",
"email",
"address",
"."
] | [
"\"\"\"\n Generates a random email address.\n \"\"\""
] | [
{
"param": "anon",
"type": null
},
{
"param": "obj",
"type": null
},
{
"param": "field",
"type": null
},
{
"param": "val",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "anon",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "obj",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "field",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "val",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def email(anon, obj, field, val):
return anon.faker.email(field=field) | 330 | 1,022 |
c3a7f5d559117a04284aab27f6f1a3cd6d5d3236 | kokyriakidis/bcbio-nextgen | bcbio/distributed/ipythontasks.py | [
"MIT"
] | Python | apply | <not_specific> | def apply(object, args=None, kwargs=None):
"""Python3 apply replacement for double unpacking of inputs during apply.
Thanks to: https://github.com/stefanholek/apply
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return object(*args, **kwargs) | Python3 apply replacement for double unpacking of inputs during apply.
Thanks to: https://github.com/stefanholek/apply
| Python3 apply replacement for double unpacking of inputs during apply. | [
"Python3",
"apply",
"replacement",
"for",
"double",
"unpacking",
"of",
"inputs",
"during",
"apply",
"."
] | def apply(object, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return object(*args, **kwargs) | [
"def",
"apply",
"(",
"object",
",",
"args",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"(",
")",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"return",
"object",
"(",
"*",
"args",
",",
"**",
"kwargs",
")"
] | Python3 apply replacement for double unpacking of inputs during apply. | [
"Python3",
"apply",
"replacement",
"for",
"double",
"unpacking",
"of",
"inputs",
"during",
"apply",
"."
] | [
"\"\"\"Python3 apply replacement for double unpacking of inputs during apply.\n\n Thanks to: https://github.com/stefanholek/apply\n \"\"\""
] | [
{
"param": "object",
"type": null
},
{
"param": "args",
"type": null
},
{
"param": "kwargs",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "object",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "args",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "kwargs",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def apply(object, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return object(*args, **kwargs) | 331 | 169 |
b11ce15ff742ec149db808f567017fc64131d6ca | azadoks/aiida-core | aiida/tools/visualization/graph.py | [
"MIT",
"BSD-3-Clause"
] | Python | _add_graphviz_edge | <not_specific> | def _add_graphviz_edge(graph, in_node, out_node, style=None):
"""add graphviz edge between two nodes
:param graph: the graphviz.DiGraph to add the edge to
:param in_node: the head node
:param out_node: the tail node
:param style: the graphviz style (Default value = None)
:type style: dict or None
"""
if style is None:
style = {}
# coerce node style values to strings
style = {k: str(v) for k, v in style.items()}
return graph.edge(f'N{in_node.pk}', f'N{out_node.pk}', **style) | add graphviz edge between two nodes
:param graph: the graphviz.DiGraph to add the edge to
:param in_node: the head node
:param out_node: the tail node
:param style: the graphviz style (Default value = None)
:type style: dict or None
| add graphviz edge between two nodes | [
"add",
"graphviz",
"edge",
"between",
"two",
"nodes"
] | def _add_graphviz_edge(graph, in_node, out_node, style=None):
if style is None:
style = {}
style = {k: str(v) for k, v in style.items()}
return graph.edge(f'N{in_node.pk}', f'N{out_node.pk}', **style) | [
"def",
"_add_graphviz_edge",
"(",
"graph",
",",
"in_node",
",",
"out_node",
",",
"style",
"=",
"None",
")",
":",
"if",
"style",
"is",
"None",
":",
"style",
"=",
"{",
"}",
"style",
"=",
"{",
"k",
":",
"str",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"style",
".",
"items",
"(",
")",
"}",
"return",
"graph",
".",
"edge",
"(",
"f'N{in_node.pk}'",
",",
"f'N{out_node.pk}'",
",",
"**",
"style",
")"
] | add graphviz edge between two nodes | [
"add",
"graphviz",
"edge",
"between",
"two",
"nodes"
] | [
"\"\"\"add graphviz edge between two nodes\n\n :param graph: the graphviz.DiGraph to add the edge to\n :param in_node: the head node\n :param out_node: the tail node\n :param style: the graphviz style (Default value = None)\n :type style: dict or None\n \"\"\"",
"# coerce node style values to strings"
] | [
{
"param": "graph",
"type": null
},
{
"param": "in_node",
"type": null
},
{
"param": "out_node",
"type": null
},
{
"param": "style",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "graph",
"type": null,
"docstring": "the graphviz.DiGraph to add the edge to",
"docstring_tokens": [
"the",
"graphviz",
".",
"DiGraph",
"to",
"add",
"the",
"edge",
"to"
],
"default": null,
"is_optional": null
},
{
"identifier": "in_node",
"type": null,
"docstring": "the head node",
"docstring_tokens": [
"the",
"head",
"node"
],
"default": null,
"is_optional": null
},
{
"identifier": "out_node",
"type": null,
"docstring": "the tail node",
"docstring_tokens": [
"the",
"tail",
"node"
],
"default": null,
"is_optional": null
},
{
"identifier": "style",
"type": null,
"docstring": "the graphviz style (Default value = None)",
"docstring_tokens": [
"the",
"graphviz",
"style",
"(",
"Default",
"value",
"=",
"None",
")"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _add_graphviz_edge(graph, in_node, out_node, style=None):
if style is None:
style = {}
style = {k: str(v) for k, v in style.items()}
return graph.edge(f'N{in_node.pk}', f'N{out_node.pk}', **style) | 333 | 504 |
4efb8100915f2e5fdc23d323a15fe8496bfd82ef | abattery/falken | service/learner/brains/observation_preprocessor.py | [
"Apache-2.0"
] | Python | _get_from_nest | <not_specific> | def _get_from_nest(nest, path):
"""Return element from a dictionary-only nest using path specified as list.
Args:
nest: A nested dictionary.
path: A list of strings specifying a nested element.
Returns:
An leaf or subnest of the input nest.
"""
if not path or not nest:
return nest
return _get_from_nest(nest.get(path[0], None), path[1:]) | Return element from a dictionary-only nest using path specified as list.
Args:
nest: A nested dictionary.
path: A list of strings specifying a nested element.
Returns:
An leaf or subnest of the input nest.
| Return element from a dictionary-only nest using path specified as list. | [
"Return",
"element",
"from",
"a",
"dictionary",
"-",
"only",
"nest",
"using",
"path",
"specified",
"as",
"list",
"."
] | def _get_from_nest(nest, path):
if not path or not nest:
return nest
return _get_from_nest(nest.get(path[0], None), path[1:]) | [
"def",
"_get_from_nest",
"(",
"nest",
",",
"path",
")",
":",
"if",
"not",
"path",
"or",
"not",
"nest",
":",
"return",
"nest",
"return",
"_get_from_nest",
"(",
"nest",
".",
"get",
"(",
"path",
"[",
"0",
"]",
",",
"None",
")",
",",
"path",
"[",
"1",
":",
"]",
")"
] | Return element from a dictionary-only nest using path specified as list. | [
"Return",
"element",
"from",
"a",
"dictionary",
"-",
"only",
"nest",
"using",
"path",
"specified",
"as",
"list",
"."
] | [
"\"\"\"Return element from a dictionary-only nest using path specified as list.\n\n Args:\n nest: A nested dictionary.\n path: A list of strings specifying a nested element.\n Returns:\n An leaf or subnest of the input nest.\n \"\"\""
] | [
{
"param": "nest",
"type": null
},
{
"param": "path",
"type": null
}
] | {
"returns": [
{
"docstring": "An leaf or subnest of the input nest.",
"docstring_tokens": [
"An",
"leaf",
"or",
"subnest",
"of",
"the",
"input",
"nest",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "nest",
"type": null,
"docstring": "A nested dictionary.",
"docstring_tokens": [
"A",
"nested",
"dictionary",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "path",
"type": null,
"docstring": "A list of strings specifying a nested element.",
"docstring_tokens": [
"A",
"list",
"of",
"strings",
"specifying",
"a",
"nested",
"element",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _get_from_nest(nest, path):
if not path or not nest:
return nest
return _get_from_nest(nest.get(path[0], None), path[1:]) | 334 | 464 |
91809908f1eccf694a7928861c277a83294a1ec9 | AntiCompositeNumber/AntiCompositeBot | src/BadGPS.py | [
"Apache-2.0"
] | Python | save_page | null | def save_page(page, summary):
"""Save the page to the wiki, retrying once if it doesn't work."""
try:
page.save(summary=summary, botflag=True)
except Exception:
print("Save failed, trying again soon")
time.sleep(15)
page.save(summary=summary, botflag=True) | Save the page to the wiki, retrying once if it doesn't work. | Save the page to the wiki, retrying once if it doesn't work. | [
"Save",
"the",
"page",
"to",
"the",
"wiki",
"retrying",
"once",
"if",
"it",
"doesn",
"'",
"t",
"work",
"."
] | def save_page(page, summary):
try:
page.save(summary=summary, botflag=True)
except Exception:
print("Save failed, trying again soon")
time.sleep(15)
page.save(summary=summary, botflag=True) | [
"def",
"save_page",
"(",
"page",
",",
"summary",
")",
":",
"try",
":",
"page",
".",
"save",
"(",
"summary",
"=",
"summary",
",",
"botflag",
"=",
"True",
")",
"except",
"Exception",
":",
"print",
"(",
"\"Save failed, trying again soon\"",
")",
"time",
".",
"sleep",
"(",
"15",
")",
"page",
".",
"save",
"(",
"summary",
"=",
"summary",
",",
"botflag",
"=",
"True",
")"
] | Save the page to the wiki, retrying once if it doesn't work. | [
"Save",
"the",
"page",
"to",
"the",
"wiki",
"retrying",
"once",
"if",
"it",
"doesn",
"'",
"t",
"work",
"."
] | [
"\"\"\"Save the page to the wiki, retrying once if it doesn't work.\"\"\""
] | [
{
"param": "page",
"type": null
},
{
"param": "summary",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "page",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "summary",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import time
def save_page(page, summary):
try:
page.save(summary=summary, botflag=True)
except Exception:
print("Save failed, trying again soon")
time.sleep(15)
page.save(summary=summary, botflag=True) | 335 | 781 |
5300ceb789f587dbdfdcfba9721616d01a33b870 | consag/build-and-deploy-informatica | cicd/createProject.py | [
"MIT"
] | Python | parse_the_arguments | <not_specific> | def parse_the_arguments(argv):
"""Parses the provided arguments and exits on an error.
Use the option -h on the command line to get an overview of the required and optional arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--project", required=True, action="store", dest="project_name",
help="Name of the project that needs to be created.")
args = parser.parse_args()
return args | Parses the provided arguments and exits on an error.
Use the option -h on the command line to get an overview of the required and optional arguments.
| Parses the provided arguments and exits on an error.
Use the option -h on the command line to get an overview of the required and optional arguments. | [
"Parses",
"the",
"provided",
"arguments",
"and",
"exits",
"on",
"an",
"error",
".",
"Use",
"the",
"option",
"-",
"h",
"on",
"the",
"command",
"line",
"to",
"get",
"an",
"overview",
"of",
"the",
"required",
"and",
"optional",
"arguments",
"."
] | def parse_the_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--project", required=True, action="store", dest="project_name",
help="Name of the project that needs to be created.")
args = parser.parse_args()
return args | [
"def",
"parse_the_arguments",
"(",
"argv",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"\"-p\"",
",",
"\"--project\"",
",",
"required",
"=",
"True",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"project_name\"",
",",
"help",
"=",
"\"Name of the project that needs to be created.\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"return",
"args"
] | Parses the provided arguments and exits on an error. | [
"Parses",
"the",
"provided",
"arguments",
"and",
"exits",
"on",
"an",
"error",
"."
] | [
"\"\"\"Parses the provided arguments and exits on an error.\n Use the option -h on the command line to get an overview of the required and optional arguments.\n \"\"\""
] | [
{
"param": "argv",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "argv",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import argparse
def parse_the_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--project", required=True, action="store", dest="project_name",
help="Name of the project that needs to be created.")
args = parser.parse_args()
return args | 336 | 412 |
d60987083cf4a23f3a0287d6ff297e5c71656554 | CyberFlameGO/gpt-code-clippy | evaluation/evaluation/metrics/extrinsic_eval.py | [
"Apache-2.0"
] | Python | compute_exact_match | float | def compute_exact_match(references, generated) -> float:
"""
Computes Exact Match Accuracy.
args:
reference: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation: list of translations to score. Each translation
should be tokenized into a list of tokens.
returns:
exact_match_accuracy : Float
"""
assert (
len(references[0]) == len(generated),
"Number of Samples should be equal in References and Synthesized Outputs..",
)
exact_match_count = 0.0
for gen, ref in zip(generated, references[0]):
if gen == ref:
exact_match_count += 1
exact_match_acc = exact_match_count / len(generated)
return exact_match_acc |
Computes Exact Match Accuracy.
args:
reference: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation: list of translations to score. Each translation
should be tokenized into a list of tokens.
returns:
exact_match_accuracy : Float
| Computes Exact Match Accuracy.
args:
reference: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation: list of translations to score. Each translation
should be tokenized into a list of tokens.
returns:
exact_match_accuracy : Float | [
"Computes",
"Exact",
"Match",
"Accuracy",
".",
"args",
":",
"reference",
":",
"list",
"of",
"lists",
"of",
"references",
"for",
"each",
"translation",
".",
"Each",
"reference",
"should",
"be",
"tokenized",
"into",
"a",
"list",
"of",
"tokens",
".",
"translation",
":",
"list",
"of",
"translations",
"to",
"score",
".",
"Each",
"translation",
"should",
"be",
"tokenized",
"into",
"a",
"list",
"of",
"tokens",
".",
"returns",
":",
"exact_match_accuracy",
":",
"Float"
] | def compute_exact_match(references, generated) -> float:
assert (
len(references[0]) == len(generated),
"Number of Samples should be equal in References and Synthesized Outputs..",
)
exact_match_count = 0.0
for gen, ref in zip(generated, references[0]):
if gen == ref:
exact_match_count += 1
exact_match_acc = exact_match_count / len(generated)
return exact_match_acc | [
"def",
"compute_exact_match",
"(",
"references",
",",
"generated",
")",
"->",
"float",
":",
"assert",
"(",
"len",
"(",
"references",
"[",
"0",
"]",
")",
"==",
"len",
"(",
"generated",
")",
",",
"\"Number of Samples should be equal in References and Synthesized Outputs..\"",
",",
")",
"exact_match_count",
"=",
"0.0",
"for",
"gen",
",",
"ref",
"in",
"zip",
"(",
"generated",
",",
"references",
"[",
"0",
"]",
")",
":",
"if",
"gen",
"==",
"ref",
":",
"exact_match_count",
"+=",
"1",
"exact_match_acc",
"=",
"exact_match_count",
"/",
"len",
"(",
"generated",
")",
"return",
"exact_match_acc"
] | Computes Exact Match Accuracy. | [
"Computes",
"Exact",
"Match",
"Accuracy",
"."
] | [
"\"\"\"\n Computes Exact Match Accuracy.\n args:\n reference: list of lists of references for each translation. Each\n reference should be tokenized into a list of tokens.\n translation: list of translations to score. Each translation\n should be tokenized into a list of tokens.\n returns:\n exact_match_accuracy : Float\n \"\"\""
] | [
{
"param": "references",
"type": null
},
{
"param": "generated",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "references",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "generated",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def compute_exact_match(references, generated) -> float:
assert (
len(references[0]) == len(generated),
"Number of Samples should be equal in References and Synthesized Outputs..",
)
exact_match_count = 0.0
for gen, ref in zip(generated, references[0]):
if gen == ref:
exact_match_count += 1
exact_match_acc = exact_match_count / len(generated)
return exact_match_acc | 337 | 12 |