hexsha
stringlengths
40
40
repo
stringlengths
5
121
path
stringlengths
4
227
license
sequence
language
stringclasses
1 value
identifier
stringlengths
1
107
return_type
stringlengths
2
237
original_string
stringlengths
75
13.4k
original_docstring
stringlengths
13
12.9k
docstring
stringlengths
13
2.57k
docstring_tokens
sequence
code
stringlengths
23
1.88k
code_tokens
sequence
short_docstring
stringlengths
1
1.32k
short_docstring_tokens
sequence
comment
sequence
parameters
list
docstring_params
dict
code_with_imports
stringlengths
23
1.88k
idxs
int64
0
611k
cluster
int64
0
1.02k
5fc183cf6ad47509c1f93a1f030a4b72800f91ad
jcolekaplan/computer_vision
ransac/ransac.py
[ "MIT" ]
Python
outputAvgDist
null
def outputAvgDist(avgInlierDist, avgOutlierDist): """ Take in the avergae inlier and outleir distances Output stats """ print("avg inlier dist {:.3f}".format(avgInlierDist)) print("avg outlier dist {:.3f}".format(avgOutlierDist))
Take in the avergae inlier and outleir distances Output stats
Take in the avergae inlier and outleir distances Output stats
[ "Take", "in", "the", "avergae", "inlier", "and", "outleir", "distances", "Output", "stats" ]
def outputAvgDist(avgInlierDist, avgOutlierDist): print("avg inlier dist {:.3f}".format(avgInlierDist)) print("avg outlier dist {:.3f}".format(avgOutlierDist))
[ "def", "outputAvgDist", "(", "avgInlierDist", ",", "avgOutlierDist", ")", ":", "print", "(", "\"avg inlier dist {:.3f}\"", ".", "format", "(", "avgInlierDist", ")", ")", "print", "(", "\"avg outlier dist {:.3f}\"", ".", "format", "(", "avgOutlierDist", ")", ")" ]
Take in the avergae inlier and outleir distances Output stats
[ "Take", "in", "the", "avergae", "inlier", "and", "outleir", "distances", "Output", "stats" ]
[ "\"\"\"\n Take in the avergae inlier and outleir distances\n Output stats\n \"\"\"" ]
[ { "param": "avgInlierDist", "type": null }, { "param": "avgOutlierDist", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "avgInlierDist", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "avgOutlierDist", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def outputAvgDist(avgInlierDist, avgOutlierDist): print("avg inlier dist {:.3f}".format(avgInlierDist)) print("avg outlier dist {:.3f}".format(avgOutlierDist))
610,515
870
ab812ff63cb3b1f76c153ec449bb54c9c5263627
Baptistech/MicroServiceGeneratorPythonFlask
ServiceGenerator.py
[ "MIT" ]
Python
main_file_writer
null
def main_file_writer(file_name, file_path, file_contents): """ This function will write in the main file of our service :param file_name: str :param file_path: str :param file_contents: dic :return: """ destination = file_path + file_name current_file = open(destination, "a") # add necessary import for the project current_file.write("from flask import Flask\n") current_file.write("from flask_restful import api\n") current_file.write("from settings import *\n") current_file.write("from config import session\n") # add classic line for flask restful api project current_file.write("\napp = Flask(__name__)\n" + "app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI\n" + "api = Api(app)\n") # create managers in current app for manager in file_contents["managers"]: new_manager = (manager.replace("Manager", "")).lower() current_file.write("app." + new_manager + "_manager = " + manager + "(session)\n") # Create routes for route in file_contents["routes"]: current_file.write("api.add_resource(" + route["controller"] + ", '" + route["url"]+"')\n") # Write the classic at the end of the main file current_file.write("\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)")
This function will write in the main file of our service :param file_name: str :param file_path: str :param file_contents: dic :return:
This function will write in the main file of our service
[ "This", "function", "will", "write", "in", "the", "main", "file", "of", "our", "service" ]
def main_file_writer(file_name, file_path, file_contents): destination = file_path + file_name current_file = open(destination, "a") current_file.write("from flask import Flask\n") current_file.write("from flask_restful import api\n") current_file.write("from settings import *\n") current_file.write("from config import session\n") current_file.write("\napp = Flask(__name__)\n" + "app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI\n" + "api = Api(app)\n") for manager in file_contents["managers"]: new_manager = (manager.replace("Manager", "")).lower() current_file.write("app." + new_manager + "_manager = " + manager + "(session)\n") for route in file_contents["routes"]: current_file.write("api.add_resource(" + route["controller"] + ", '" + route["url"]+"')\n") current_file.write("\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)")
[ "def", "main_file_writer", "(", "file_name", ",", "file_path", ",", "file_contents", ")", ":", "destination", "=", "file_path", "+", "file_name", "current_file", "=", "open", "(", "destination", ",", "\"a\"", ")", "current_file", ".", "write", "(", "\"from flask import Flask\\n\"", ")", "current_file", ".", "write", "(", "\"from flask_restful import api\\n\"", ")", "current_file", ".", "write", "(", "\"from settings import *\\n\"", ")", "current_file", ".", "write", "(", "\"from config import session\\n\"", ")", "current_file", ".", "write", "(", "\"\\napp = Flask(__name__)\\n\"", "+", "\"app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI\\n\"", "+", "\"api = Api(app)\\n\"", ")", "for", "manager", "in", "file_contents", "[", "\"managers\"", "]", ":", "new_manager", "=", "(", "manager", ".", "replace", "(", "\"Manager\"", ",", "\"\"", ")", ")", ".", "lower", "(", ")", "current_file", ".", "write", "(", "\"app.\"", "+", "new_manager", "+", "\"_manager = \"", "+", "manager", "+", "\"(session)\\n\"", ")", "for", "route", "in", "file_contents", "[", "\"routes\"", "]", ":", "current_file", ".", "write", "(", "\"api.add_resource(\"", "+", "route", "[", "\"controller\"", "]", "+", "\", '\"", "+", "route", "[", "\"url\"", "]", "+", "\"')\\n\"", ")", "current_file", ".", "write", "(", "\"\\nif __name__ == '__main__':\\n app.run(host='0.0.0.0', port=5000, debug=True)\"", ")" ]
This function will write in the main file of our service
[ "This", "function", "will", "write", "in", "the", "main", "file", "of", "our", "service" ]
[ "\"\"\"\n This function will write in the main file of our service\n :param file_name: str\n :param file_path: str\n :param file_contents: dic\n :return:\n \"\"\"", "# add necessary import for the project", "# add classic line for flask restful api project", "# create managers in current app", "# Create routes", "# Write the classic at the end of the main file" ]
[ { "param": "file_name", "type": null }, { "param": "file_path", "type": null }, { "param": "file_contents", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "file_name", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "file_path", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "file_contents", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def main_file_writer(file_name, file_path, file_contents): destination = file_path + file_name current_file = open(destination, "a") current_file.write("from flask import Flask\n") current_file.write("from flask_restful import api\n") current_file.write("from settings import *\n") current_file.write("from config import session\n") current_file.write("\napp = Flask(__name__)\n" + "app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI\n" + "api = Api(app)\n") for manager in file_contents["managers"]: new_manager = (manager.replace("Manager", "")).lower() current_file.write("app." + new_manager + "_manager = " + manager + "(session)\n") for route in file_contents["routes"]: current_file.write("api.add_resource(" + route["controller"] + ", '" + route["url"]+"')\n") current_file.write("\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)")
610,516
404
b613fa27b00c059cb32b5593abafa54ce2d40eca
anarchivist/fido
fido/fido.py
[ "Apache-2.0" ]
Python
list_files
null
def list_files(roots, recurse=False): "Return the files one at a time. Roots could be a fileobj or a list." for root in roots: root = (root if root[-1] != '\n' else root[:-1]) root = os.path.normpath(root) if os.path.isfile(root): yield root else: for path, unused, files in os.walk(root): for f in files: yield os.path.join(path, f) if recurse == False: break
Return the files one at a time. Roots could be a fileobj or a list.
Return the files one at a time. Roots could be a fileobj or a list.
[ "Return", "the", "files", "one", "at", "a", "time", ".", "Roots", "could", "be", "a", "fileobj", "or", "a", "list", "." ]
def list_files(roots, recurse=False): for root in roots: root = (root if root[-1] != '\n' else root[:-1]) root = os.path.normpath(root) if os.path.isfile(root): yield root else: for path, unused, files in os.walk(root): for f in files: yield os.path.join(path, f) if recurse == False: break
[ "def", "list_files", "(", "roots", ",", "recurse", "=", "False", ")", ":", "for", "root", "in", "roots", ":", "root", "=", "(", "root", "if", "root", "[", "-", "1", "]", "!=", "'\\n'", "else", "root", "[", ":", "-", "1", "]", ")", "root", "=", "os", ".", "path", ".", "normpath", "(", "root", ")", "if", "os", ".", "path", ".", "isfile", "(", "root", ")", ":", "yield", "root", "else", ":", "for", "path", ",", "unused", ",", "files", "in", "os", ".", "walk", "(", "root", ")", ":", "for", "f", "in", "files", ":", "yield", "os", ".", "path", ".", "join", "(", "path", ",", "f", ")", "if", "recurse", "==", "False", ":", "break" ]
Return the files one at a time.
[ "Return", "the", "files", "one", "at", "a", "time", "." ]
[ "\"Return the files one at a time. Roots could be a fileobj or a list.\"" ]
[ { "param": "roots", "type": null }, { "param": "recurse", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "roots", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "recurse", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def list_files(roots, recurse=False): for root in roots: root = (root if root[-1] != '\n' else root[:-1]) root = os.path.normpath(root) if os.path.isfile(root): yield root else: for path, unused, files in os.walk(root): for f in files: yield os.path.join(path, f) if recurse == False: break
610,518
152
b0426f44b752ec79e7b3b5237754c6c857d84b79
solox2/wardroom-nc
swizzle/provision.py
[ "Apache-2.0" ]
Python
vagrant_ssh_config
null
def vagrant_ssh_config(tempfile): """ Get the current ssh config via `vagrant ssh-config` """ output = subprocess.check_output(['vagrant', 'ssh-config']) with open(tempfile, 'w') as fh: fh.write(output)
Get the current ssh config via `vagrant ssh-config`
Get the current ssh config via `vagrant ssh-config`
[ "Get", "the", "current", "ssh", "config", "via", "`", "vagrant", "ssh", "-", "config", "`" ]
def vagrant_ssh_config(tempfile): output = subprocess.check_output(['vagrant', 'ssh-config']) with open(tempfile, 'w') as fh: fh.write(output)
[ "def", "vagrant_ssh_config", "(", "tempfile", ")", ":", "output", "=", "subprocess", ".", "check_output", "(", "[", "'vagrant'", ",", "'ssh-config'", "]", ")", "with", "open", "(", "tempfile", ",", "'w'", ")", "as", "fh", ":", "fh", ".", "write", "(", "output", ")" ]
Get the current ssh config via `vagrant ssh-config`
[ "Get", "the", "current", "ssh", "config", "via", "`", "vagrant", "ssh", "-", "config", "`" ]
[ "\"\"\" Get the current ssh config via `vagrant ssh-config` \"\"\"" ]
[ { "param": "tempfile", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "tempfile", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import subprocess def vagrant_ssh_config(tempfile): output = subprocess.check_output(['vagrant', 'ssh-config']) with open(tempfile, 'w') as fh: fh.write(output)
610,519
681
18b5729d3bd18cf6a7829dba6d7f0ef31309b75f
WarmongeringBeaver/hera_sim
hera_sim/simulate.py
[ "MIT" ]
Python
_get_model_parameters
<not_specific>
def _get_model_parameters(model): """Retrieve the full model signature (init + call) parameters.""" init_params = inspect.signature(model.__class__).parameters call_params = inspect.signature(model).parameters # this doesn't work correctly if done on one line model_params = {} for params in (call_params, init_params): for parameter, value in params.items(): model_params[parameter] = value.default model_params.pop("kwargs", None) return model_params
Retrieve the full model signature (init + call) parameters.
Retrieve the full model signature (init + call) parameters.
[ "Retrieve", "the", "full", "model", "signature", "(", "init", "+", "call", ")", "parameters", "." ]
def _get_model_parameters(model): init_params = inspect.signature(model.__class__).parameters call_params = inspect.signature(model).parameters model_params = {} for params in (call_params, init_params): for parameter, value in params.items(): model_params[parameter] = value.default model_params.pop("kwargs", None) return model_params
[ "def", "_get_model_parameters", "(", "model", ")", ":", "init_params", "=", "inspect", ".", "signature", "(", "model", ".", "__class__", ")", ".", "parameters", "call_params", "=", "inspect", ".", "signature", "(", "model", ")", ".", "parameters", "model_params", "=", "{", "}", "for", "params", "in", "(", "call_params", ",", "init_params", ")", ":", "for", "parameter", ",", "value", "in", "params", ".", "items", "(", ")", ":", "model_params", "[", "parameter", "]", "=", "value", ".", "default", "model_params", ".", "pop", "(", "\"kwargs\"", ",", "None", ")", "return", "model_params" ]
Retrieve the full model signature (init + call) parameters.
[ "Retrieve", "the", "full", "model", "signature", "(", "init", "+", "call", ")", "parameters", "." ]
[ "\"\"\"Retrieve the full model signature (init + call) parameters.\"\"\"", "# this doesn't work correctly if done on one line" ]
[ { "param": "model", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "model", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import inspect def _get_model_parameters(model): init_params = inspect.signature(model.__class__).parameters call_params = inspect.signature(model).parameters model_params = {} for params in (call_params, init_params): for parameter, value in params.items(): model_params[parameter] = value.default model_params.pop("kwargs", None) return model_params
610,520
94
2e62831aa7894dd392132ab9ad395e94bbfebec4
benjaminfjones/mk-movies
mkmovies.py
[ "MIT" ]
Python
compute_gap
<not_specific>
def compute_gap(time0, time1): """ Compute the time difference between the given datetime objects in seconds (rounded to the nearest second. """ tdelta = abs(time1 - time0) return round(tdelta.total_seconds())
Compute the time difference between the given datetime objects in seconds (rounded to the nearest second.
Compute the time difference between the given datetime objects in seconds (rounded to the nearest second.
[ "Compute", "the", "time", "difference", "between", "the", "given", "datetime", "objects", "in", "seconds", "(", "rounded", "to", "the", "nearest", "second", "." ]
def compute_gap(time0, time1): tdelta = abs(time1 - time0) return round(tdelta.total_seconds())
[ "def", "compute_gap", "(", "time0", ",", "time1", ")", ":", "tdelta", "=", "abs", "(", "time1", "-", "time0", ")", "return", "round", "(", "tdelta", ".", "total_seconds", "(", ")", ")" ]
Compute the time difference between the given datetime objects in seconds (rounded to the nearest second.
[ "Compute", "the", "time", "difference", "between", "the", "given", "datetime", "objects", "in", "seconds", "(", "rounded", "to", "the", "nearest", "second", "." ]
[ "\"\"\"\n Compute the time difference between the given datetime objects in\n seconds (rounded to the nearest second.\n \"\"\"" ]
[ { "param": "time0", "type": null }, { "param": "time1", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "time0", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "time1", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def compute_gap(time0, time1): tdelta = abs(time1 - time0) return round(tdelta.total_seconds())
610,521
348
5b41601b5c9e7c764aa8f097003053482645f02e
apolotakeshi/data-engineer-udacity-project-01
table_creation/create_tables.py
[ "BSD-3-Clause" ]
Python
drop_and_create_database
null
def drop_and_create_database(cur, database_name="sparkifydb", utf8=None): """ - Creates and connects to the sparkifydb - Returns the connection and cursor to sparkifydb """ # create sparkify database with UTF8 encoding cur.execute("DROP DATABASE IF EXISTS {}".format(database_name)) if utf8: cur.execute("CREATE DATABASE {} WITH ENCODING 'utf8' TEMPLATE template0".format(database_name))
- Creates and connects to the sparkifydb - Returns the connection and cursor to sparkifydb
Creates and connects to the sparkifydb Returns the connection and cursor to sparkifydb
[ "Creates", "and", "connects", "to", "the", "sparkifydb", "Returns", "the", "connection", "and", "cursor", "to", "sparkifydb" ]
def drop_and_create_database(cur, database_name="sparkifydb", utf8=None): cur.execute("DROP DATABASE IF EXISTS {}".format(database_name)) if utf8: cur.execute("CREATE DATABASE {} WITH ENCODING 'utf8' TEMPLATE template0".format(database_name))
[ "def", "drop_and_create_database", "(", "cur", ",", "database_name", "=", "\"sparkifydb\"", ",", "utf8", "=", "None", ")", ":", "cur", ".", "execute", "(", "\"DROP DATABASE IF EXISTS {}\"", ".", "format", "(", "database_name", ")", ")", "if", "utf8", ":", "cur", ".", "execute", "(", "\"CREATE DATABASE {} WITH ENCODING 'utf8' TEMPLATE template0\"", ".", "format", "(", "database_name", ")", ")" ]
Creates and connects to the sparkifydb Returns the connection and cursor to sparkifydb
[ "Creates", "and", "connects", "to", "the", "sparkifydb", "Returns", "the", "connection", "and", "cursor", "to", "sparkifydb" ]
[ "\"\"\"\n - Creates and connects to the sparkifydb\n - Returns the connection and cursor to sparkifydb\n \"\"\"", "# create sparkify database with UTF8 encoding" ]
[ { "param": "cur", "type": null }, { "param": "database_name", "type": null }, { "param": "utf8", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cur", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "database_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "utf8", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def drop_and_create_database(cur, database_name="sparkifydb", utf8=None): cur.execute("DROP DATABASE IF EXISTS {}".format(database_name)) if utf8: cur.execute("CREATE DATABASE {} WITH ENCODING 'utf8' TEMPLATE template0".format(database_name))
610,522
289
50e9f2fe95dfe4d78d3d7771a8c29f32083b831b
mdlama/pydstool
PyDSTool/Toolbox/fracdim.py
[ "Python-2.0", "OLDAP-2.7" ]
Python
timeseq
<not_specific>
def timeseq(time, covering, refpt): """Extract time sequence from a covering""" ts = time.take(covering[refpt][3], 0) print("time sequence min = %f, max = %f"%(min(ts),max(ts))) return ts
Extract time sequence from a covering
Extract time sequence from a covering
[ "Extract", "time", "sequence", "from", "a", "covering" ]
def timeseq(time, covering, refpt): ts = time.take(covering[refpt][3], 0) print("time sequence min = %f, max = %f"%(min(ts),max(ts))) return ts
[ "def", "timeseq", "(", "time", ",", "covering", ",", "refpt", ")", ":", "ts", "=", "time", ".", "take", "(", "covering", "[", "refpt", "]", "[", "3", "]", ",", "0", ")", "print", "(", "\"time sequence min = %f, max = %f\"", "%", "(", "min", "(", "ts", ")", ",", "max", "(", "ts", ")", ")", ")", "return", "ts" ]
Extract time sequence from a covering
[ "Extract", "time", "sequence", "from", "a", "covering" ]
[ "\"\"\"Extract time sequence from a covering\"\"\"" ]
[ { "param": "time", "type": null }, { "param": "covering", "type": null }, { "param": "refpt", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "time", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "covering", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "refpt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def timeseq(time, covering, refpt): ts = time.take(covering[refpt][3], 0) print("time sequence min = %f, max = %f"%(min(ts),max(ts))) return ts
610,523
135
9dbed93de5f1fb63fe7bfcc2c3f227b76888eda3
ogrisel/codemaker
src/codemaker/features/image.py
[ "MIT" ]
Python
urldecode
<not_specific>
def urldecode(url): """Helper to convert query parameters as a dict""" if "?" in url: path, query = url.split("?", 1) return path, dict(cgi.parse_qsl(query)) else: return url, {}
Helper to convert query parameters as a dict
Helper to convert query parameters as a dict
[ "Helper", "to", "convert", "query", "parameters", "as", "a", "dict" ]
def urldecode(url): if "?" in url: path, query = url.split("?", 1) return path, dict(cgi.parse_qsl(query)) else: return url, {}
[ "def", "urldecode", "(", "url", ")", ":", "if", "\"?\"", "in", "url", ":", "path", ",", "query", "=", "url", ".", "split", "(", "\"?\"", ",", "1", ")", "return", "path", ",", "dict", "(", "cgi", ".", "parse_qsl", "(", "query", ")", ")", "else", ":", "return", "url", ",", "{", "}" ]
Helper to convert query parameters as a dict
[ "Helper", "to", "convert", "query", "parameters", "as", "a", "dict" ]
[ "\"\"\"Helper to convert query parameters as a dict\"\"\"" ]
[ { "param": "url", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "url", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import cgi def urldecode(url): if "?" in url: path, query = url.split("?", 1) return path, dict(cgi.parse_qsl(query)) else: return url, {}
610,524
286
3efd313d7ac51f98df1e07ef9a6054914b7da05d
Pikime/Adafruit_Learning_System_Guides
Trinket_Ultrasonic_Rangefinder/code.py
[ "MIT" ]
Python
find_mode
<not_specific>
def find_mode(x): """ find the mode (most common value reported) will return median (center of sorted list) should mode not be found """ n = len(x) max_count = 0 mode = 0 bimodal = 0 counter = 0 index = 0 while index < (n - 1): prev_count = counter counter = 0 while (x[index]) == (x[index + 1]): counter += 1 index += 1 if (counter > prev_count) and (counter > max_count): mode = x[index] max_count = counter bimodal = 0 if counter == 0: index += 1 # If the dataset has 2 or more modes. if counter == max_count: bimodal = 1 # Return the median if there is no mode. if (mode == 0) or (bimodal == 1): mode = x[int(n / 2)] return mode
find the mode (most common value reported) will return median (center of sorted list) should mode not be found
find the mode (most common value reported) will return median (center of sorted list) should mode not be found
[ "find", "the", "mode", "(", "most", "common", "value", "reported", ")", "will", "return", "median", "(", "center", "of", "sorted", "list", ")", "should", "mode", "not", "be", "found" ]
def find_mode(x): n = len(x) max_count = 0 mode = 0 bimodal = 0 counter = 0 index = 0 while index < (n - 1): prev_count = counter counter = 0 while (x[index]) == (x[index + 1]): counter += 1 index += 1 if (counter > prev_count) and (counter > max_count): mode = x[index] max_count = counter bimodal = 0 if counter == 0: index += 1 if counter == max_count: bimodal = 1 if (mode == 0) or (bimodal == 1): mode = x[int(n / 2)] return mode
[ "def", "find_mode", "(", "x", ")", ":", "n", "=", "len", "(", "x", ")", "max_count", "=", "0", "mode", "=", "0", "bimodal", "=", "0", "counter", "=", "0", "index", "=", "0", "while", "index", "<", "(", "n", "-", "1", ")", ":", "prev_count", "=", "counter", "counter", "=", "0", "while", "(", "x", "[", "index", "]", ")", "==", "(", "x", "[", "index", "+", "1", "]", ")", ":", "counter", "+=", "1", "index", "+=", "1", "if", "(", "counter", ">", "prev_count", ")", "and", "(", "counter", ">", "max_count", ")", ":", "mode", "=", "x", "[", "index", "]", "max_count", "=", "counter", "bimodal", "=", "0", "if", "counter", "==", "0", ":", "index", "+=", "1", "if", "counter", "==", "max_count", ":", "bimodal", "=", "1", "if", "(", "mode", "==", "0", ")", "or", "(", "bimodal", "==", "1", ")", ":", "mode", "=", "x", "[", "int", "(", "n", "/", "2", ")", "]", "return", "mode" ]
find the mode (most common value reported) will return median (center of sorted list) should mode not be found
[ "find", "the", "mode", "(", "most", "common", "value", "reported", ")", "will", "return", "median", "(", "center", "of", "sorted", "list", ")", "should", "mode", "not", "be", "found" ]
[ "\"\"\"\n find the mode (most common value reported)\n will return median (center of sorted list)\n should mode not be found\n \"\"\"", "# If the dataset has 2 or more modes.", "# Return the median if there is no mode." ]
[ { "param": "x", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def find_mode(x): n = len(x) max_count = 0 mode = 0 bimodal = 0 counter = 0 index = 0 while index < (n - 1): prev_count = counter counter = 0 while (x[index]) == (x[index + 1]): counter += 1 index += 1 if (counter > prev_count) and (counter > max_count): mode = x[index] max_count = counter bimodal = 0 if counter == 0: index += 1 if counter == max_count: bimodal = 1 if (mode == 0) or (bimodal == 1): mode = x[int(n / 2)] return mode
610,525
184
447042ea04a4c477be198ad69ef72c7a2313fcf7
PapenfussLab/sv_tools
sv_tools/kc_tests.py
[ "MIT" ]
Python
acc_alternating_runs
<not_specific>
def acc_alternating_runs(segments, walk): # Names could be improved here. """Recursive function that takes an empty list and a H/T string and returns a list of alternating segments.""" if len(walk) == 0: return segments else: next_letter = walk[0] def conjoin(segments, next_letter): if segments == []: return [next_letter] else: last_segment = segments[-1] last_letter = last_segment[-1] if last_letter == next_letter: return segments + [next_letter] elif last_letter != next_letter: return segments[:-1] + [last_segment + next_letter] if len(walk) == 1: return conjoin(segments, next_letter) else: tail = walk[1:] return acc_alternating_runs( conjoin(segments, next_letter), tail)
Recursive function that takes an empty list and a H/T string and returns a list of alternating segments.
Recursive function that takes an empty list and a H/T string and returns a list of alternating segments.
[ "Recursive", "function", "that", "takes", "an", "empty", "list", "and", "a", "H", "/", "T", "string", "and", "returns", "a", "list", "of", "alternating", "segments", "." ]
def acc_alternating_runs(segments, walk): if len(walk) == 0: return segments else: next_letter = walk[0] def conjoin(segments, next_letter): if segments == []: return [next_letter] else: last_segment = segments[-1] last_letter = last_segment[-1] if last_letter == next_letter: return segments + [next_letter] elif last_letter != next_letter: return segments[:-1] + [last_segment + next_letter] if len(walk) == 1: return conjoin(segments, next_letter) else: tail = walk[1:] return acc_alternating_runs( conjoin(segments, next_letter), tail)
[ "def", "acc_alternating_runs", "(", "segments", ",", "walk", ")", ":", "if", "len", "(", "walk", ")", "==", "0", ":", "return", "segments", "else", ":", "next_letter", "=", "walk", "[", "0", "]", "def", "conjoin", "(", "segments", ",", "next_letter", ")", ":", "if", "segments", "==", "[", "]", ":", "return", "[", "next_letter", "]", "else", ":", "last_segment", "=", "segments", "[", "-", "1", "]", "last_letter", "=", "last_segment", "[", "-", "1", "]", "if", "last_letter", "==", "next_letter", ":", "return", "segments", "+", "[", "next_letter", "]", "elif", "last_letter", "!=", "next_letter", ":", "return", "segments", "[", ":", "-", "1", "]", "+", "[", "last_segment", "+", "next_letter", "]", "if", "len", "(", "walk", ")", "==", "1", ":", "return", "conjoin", "(", "segments", ",", "next_letter", ")", "else", ":", "tail", "=", "walk", "[", "1", ":", "]", "return", "acc_alternating_runs", "(", "conjoin", "(", "segments", ",", "next_letter", ")", ",", "tail", ")" ]
Recursive function that takes an empty list and a H/T string and returns a list of alternating segments.
[ "Recursive", "function", "that", "takes", "an", "empty", "list", "and", "a", "H", "/", "T", "string", "and", "returns", "a", "list", "of", "alternating", "segments", "." ]
[ "# Names could be improved here.", "\"\"\"Recursive function that takes an empty list and a H/T\n string and returns a list of alternating segments.\"\"\"" ]
[ { "param": "segments", "type": null }, { "param": "walk", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "segments", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "walk", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def acc_alternating_runs(segments, walk): if len(walk) == 0: return segments else: next_letter = walk[0] def conjoin(segments, next_letter): if segments == []: return [next_letter] else: last_segment = segments[-1] last_letter = last_segment[-1] if last_letter == next_letter: return segments + [next_letter] elif last_letter != next_letter: return segments[:-1] + [last_segment + next_letter] if len(walk) == 1: return conjoin(segments, next_letter) else: tail = walk[1:] return acc_alternating_runs( conjoin(segments, next_letter), tail)
610,526
18
2dbf9448ec21fe388cd2ae3ba53b5424d735dd03
wouterl/petlib
examples/amacs.py
[ "BSD-2-Clause" ]
Python
keyGen_ggm
<not_specific>
def keyGen_ggm(params, n): """Secret key setup and parameter setup for issuer""" (_, _, h, o) = params sk = [o.random() for _ in range(n+1)] iparams = [s * h for s in sk[1:]] return sk, iparams
Secret key setup and parameter setup for issuer
Secret key setup and parameter setup for issuer
[ "Secret", "key", "setup", "and", "parameter", "setup", "for", "issuer" ]
def keyGen_ggm(params, n): (_, _, h, o) = params sk = [o.random() for _ in range(n+1)] iparams = [s * h for s in sk[1:]] return sk, iparams
[ "def", "keyGen_ggm", "(", "params", ",", "n", ")", ":", "(", "_", ",", "_", ",", "h", ",", "o", ")", "=", "params", "sk", "=", "[", "o", ".", "random", "(", ")", "for", "_", "in", "range", "(", "n", "+", "1", ")", "]", "iparams", "=", "[", "s", "*", "h", "for", "s", "in", "sk", "[", "1", ":", "]", "]", "return", "sk", ",", "iparams" ]
Secret key setup and parameter setup for issuer
[ "Secret", "key", "setup", "and", "parameter", "setup", "for", "issuer" ]
[ "\"\"\"Secret key setup and parameter setup for issuer\"\"\"" ]
[ { "param": "params", "type": null }, { "param": "n", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "params", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "n", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def keyGen_ggm(params, n): (_, _, h, o) = params sk = [o.random() for _ in range(n+1)] iparams = [s * h for s in sk[1:]] return sk, iparams
610,527
341
3e3a20f8279295f57c98b00f3828b76be3141917
RobertCraigie/prisma-client-py
tests/conftest.py
[ "Apache-2.0" ]
Python
request_has_client
bool
def request_has_client(request: 'FixtureRequest') -> bool: """Return whether or not the current request uses the prisma client""" return ( request.node.get_closest_marker('prisma') is not None or 'client' in request.fixturenames )
Return whether or not the current request uses the prisma client
Return whether or not the current request uses the prisma client
[ "Return", "whether", "or", "not", "the", "current", "request", "uses", "the", "prisma", "client" ]
def request_has_client(request: 'FixtureRequest') -> bool: return ( request.node.get_closest_marker('prisma') is not None or 'client' in request.fixturenames )
[ "def", "request_has_client", "(", "request", ":", "'FixtureRequest'", ")", "->", "bool", ":", "return", "(", "request", ".", "node", ".", "get_closest_marker", "(", "'prisma'", ")", "is", "not", "None", "or", "'client'", "in", "request", ".", "fixturenames", ")" ]
Return whether or not the current request uses the prisma client
[ "Return", "whether", "or", "not", "the", "current", "request", "uses", "the", "prisma", "client" ]
[ "\"\"\"Return whether or not the current request uses the prisma client\"\"\"" ]
[ { "param": "request", "type": "'FixtureRequest'" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "request", "type": "'FixtureRequest'", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def request_has_client(request: 'FixtureRequest') -> bool: return ( request.node.get_closest_marker('prisma') is not None or 'client' in request.fixturenames )
610,528
501
5df6d01f68217298d730d963eace2ccd3bf387f1
poldracklab/nondefaced-detector
nondefaced_detector/preprocess.py
[ "Apache-2.0" ]
Python
cleanup_files
null
def cleanup_files(*args): """ Function to remove temp files created during preprocessing.""" for p in args: if os.path.exists(p): os.remove(p)
Function to remove temp files created during preprocessing.
Function to remove temp files created during preprocessing.
[ "Function", "to", "remove", "temp", "files", "created", "during", "preprocessing", "." ]
def cleanup_files(*args): for p in args: if os.path.exists(p): os.remove(p)
[ "def", "cleanup_files", "(", "*", "args", ")", ":", "for", "p", "in", "args", ":", "if", "os", ".", "path", ".", "exists", "(", "p", ")", ":", "os", ".", "remove", "(", "p", ")" ]
Function to remove temp files created during preprocessing.
[ "Function", "to", "remove", "temp", "files", "created", "during", "preprocessing", "." ]
[ "\"\"\" Function to remove temp files created during preprocessing.\"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import os def cleanup_files(*args): for p in args: if os.path.exists(p): os.remove(p)
610,529
232
93e539b4fae584bbe90e0c0a9e0cc4a10915f6c2
beevesuw/snipsmanagercore
snipsmanagercore/intent_parser.py
[ "MIT" ]
Python
parse
<not_specific>
def parse(payload, candidate_classes): """ Parse a json response into an intent. :param payload: a JSON object representing an intent. :param candidate_classes: a list of classes representing various intents, each having their own `parse` method to attempt parsing the JSON object into the given intent class. :return: An object version of the intent if one of the candidate classes managed to parse it, or None. """ for cls in candidate_classes: intent = cls.parse(payload) if intent: return intent return None
Parse a json response into an intent. :param payload: a JSON object representing an intent. :param candidate_classes: a list of classes representing various intents, each having their own `parse` method to attempt parsing the JSON object into the given intent class. :return: An object version of the intent if one of the candidate classes managed to parse it, or None.
Parse a json response into an intent.
[ "Parse", "a", "json", "response", "into", "an", "intent", "." ]
def parse(payload, candidate_classes): for cls in candidate_classes: intent = cls.parse(payload) if intent: return intent return None
[ "def", "parse", "(", "payload", ",", "candidate_classes", ")", ":", "for", "cls", "in", "candidate_classes", ":", "intent", "=", "cls", ".", "parse", "(", "payload", ")", "if", "intent", ":", "return", "intent", "return", "None" ]
Parse a json response into an intent.
[ "Parse", "a", "json", "response", "into", "an", "intent", "." ]
[ "\"\"\" Parse a json response into an intent.\n\n :param payload: a JSON object representing an intent.\n :param candidate_classes: a list of classes representing various\n intents, each having their own `parse`\n method to attempt parsing the JSON object\n into the given intent class.\n :return: An object version of the intent if one of the candidate\n classes managed to parse it, or None.\n \"\"\"" ]
[ { "param": "payload", "type": null }, { "param": "candidate_classes", "type": null } ]
{ "returns": [ { "docstring": "An object version of the intent if one of the candidate\nclasses managed to parse it, or None.", "docstring_tokens": [ "An", "object", "version", "of", "the", "intent", "if", "one", "of", "the", "candidate", "classes", "managed", "to", "parse", "it", "or", "None", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "payload", "type": null, "docstring": "a JSON object representing an intent.", "docstring_tokens": [ "a", "JSON", "object", "representing", "an", "intent", "." ], "default": null, "is_optional": null }, { "identifier": "candidate_classes", "type": null, "docstring": "a list of classes representing various\nintents, each having their own `parse`\nmethod to attempt parsing the JSON object\ninto the given intent class.", "docstring_tokens": [ "a", "list", "of", "classes", "representing", "various", "intents", "each", "having", "their", "own", "`", "parse", "`", "method", "to", "attempt", "parsing", "the", "JSON", "object", "into", "the", "given", "intent", "class", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def parse(payload, candidate_classes): for cls in candidate_classes: intent = cls.parse(payload) if intent: return intent return None
610,530
510
0444cc1ff688c4f35aa95440ec69c759b5cc13b1
ninikolov/lha
preprocessing/clean_text.py
[ "MIT" ]
Python
light_clean
<not_specific>
def light_clean(txt): """Remove all new lines and extra spaces""" # return multi_regex_clean(txt, {WHITESPACE_REGEX: r" "}) txt = txt.replace('\n', ' ').replace('\r', ' ').replace("\t", " ") txt = txt.strip(' \t\n\r') # txt = multi_regex_clean(txt, {"[ \t\r\f]{1,}": r" "}) return " ".join(txt.split())
Remove all new lines and extra spaces
Remove all new lines and extra spaces
[ "Remove", "all", "new", "lines", "and", "extra", "spaces" ]
def light_clean(txt): txt = txt.replace('\n', ' ').replace('\r', ' ').replace("\t", " ") txt = txt.strip(' \t\n\r') return " ".join(txt.split())
[ "def", "light_clean", "(", "txt", ")", ":", "txt", "=", "txt", ".", "replace", "(", "'\\n'", ",", "' '", ")", ".", "replace", "(", "'\\r'", ",", "' '", ")", ".", "replace", "(", "\"\\t\"", ",", "\" \"", ")", "txt", "=", "txt", ".", "strip", "(", "' \\t\\n\\r'", ")", "return", "\" \"", ".", "join", "(", "txt", ".", "split", "(", ")", ")" ]
Remove all new lines and extra spaces
[ "Remove", "all", "new", "lines", "and", "extra", "spaces" ]
[ "\"\"\"Remove all new lines and extra spaces\"\"\"", "# return multi_regex_clean(txt, {WHITESPACE_REGEX: r\" \"})", "# txt = multi_regex_clean(txt, {\"[ \\t\\r\\f]{1,}\": r\" \"})" ]
[ { "param": "txt", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "txt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def light_clean(txt): txt = txt.replace('\n', ' ').replace('\r', ' ').replace("\t", " ") txt = txt.strip(' \t\n\r') return " ".join(txt.split())
610,531
589
08771311cfaa2d9c2f0c42ac081e7c6dc7f690e7
murtazahassaniq21/zoom
zoom/validators.py
[ "MIT" ]
Python
image_mime_type_valid
<not_specific>
def image_mime_type_valid(data): """check data against the more commonly browser supported mime types """ accept = ['gif', 'jpeg', 'png', 'xbm', 'bmp'] if ( isinstance(data, cgi.FieldStorage) and data.file and imghdr.what('a', data.file.read()) in accept ): return True if ( not data or isinstance(data, (str, bytes)) and imghdr.what('a', data) in accept ): return True return False
check data against the more commonly browser supported mime types
check data against the more commonly browser supported mime types
[ "check", "data", "against", "the", "more", "commonly", "browser", "supported", "mime", "types" ]
def image_mime_type_valid(data): accept = ['gif', 'jpeg', 'png', 'xbm', 'bmp'] if ( isinstance(data, cgi.FieldStorage) and data.file and imghdr.what('a', data.file.read()) in accept ): return True if ( not data or isinstance(data, (str, bytes)) and imghdr.what('a', data) in accept ): return True return False
[ "def", "image_mime_type_valid", "(", "data", ")", ":", "accept", "=", "[", "'gif'", ",", "'jpeg'", ",", "'png'", ",", "'xbm'", ",", "'bmp'", "]", "if", "(", "isinstance", "(", "data", ",", "cgi", ".", "FieldStorage", ")", "and", "data", ".", "file", "and", "imghdr", ".", "what", "(", "'a'", ",", "data", ".", "file", ".", "read", "(", ")", ")", "in", "accept", ")", ":", "return", "True", "if", "(", "not", "data", "or", "isinstance", "(", "data", ",", "(", "str", ",", "bytes", ")", ")", "and", "imghdr", ".", "what", "(", "'a'", ",", "data", ")", "in", "accept", ")", ":", "return", "True", "return", "False" ]
check data against the more commonly browser supported mime types
[ "check", "data", "against", "the", "more", "commonly", "browser", "supported", "mime", "types" ]
[ "\"\"\"check data against the more commonly browser supported mime types\n \"\"\"" ]
[ { "param": "data", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import cgi import imghdr def image_mime_type_valid(data): accept = ['gif', 'jpeg', 'png', 'xbm', 'bmp'] if ( isinstance(data, cgi.FieldStorage) and data.file and imghdr.what('a', data.file.read()) in accept ): return True if ( not data or isinstance(data, (str, bytes)) and imghdr.what('a', data) in accept ): return True return False
610,532
142
645aeb0a7e86e6962d25dcc22748ebcd763ed7fc
NSNC-Lab/2dReconstruction
main.py
[ "MIT" ]
Python
check_input
null
def check_input(spk, h, env): """ Make sure the dimensions of inputs are correct. :param spk: pytorch tensor, shape (1, channels, time1). Spikes from which env will be reconstructed. :param h: pytorch tensor, requires gradient, shape (1, channels, time2). 2d reconstruction filter to fit. :param env: pytorch tensor, shape (time1, ). Envelope signal that spk and h are attempting to reconstruct. :return: None """ # dimensionality check if len(spk.shape) != 3: raise ValueError('Invalid spk shape, requires shape (1, channels, time)') else: if spk.shape[0] == 1 and spk.shape[1] >= 1 and spk.shape[2] >= 1: pass else: raise ValueError('Invalid spk shape, required shape: (1, channels, time)') if len(h.shape) != 3: raise ValueError('Invalid h shape, requires shape (1, channels, time)') else: if h.shape[0] == 1 and h.shape[1] >= 1 and h.shape[2] >= 1: pass else: raise ValueError('Invalid h shape, required shape: (1, channels, time)') if len(env.shape) != 1: raise ValueError('Invalid env shape, requires shape (time, )')
Make sure the dimensions of inputs are correct. :param spk: pytorch tensor, shape (1, channels, time1). Spikes from which env will be reconstructed. :param h: pytorch tensor, requires gradient, shape (1, channels, time2). 2d reconstruction filter to fit. :param env: pytorch tensor, shape (time1, ). Envelope signal that spk and h are attempting to reconstruct. :return: None
Make sure the dimensions of inputs are correct.
[ "Make", "sure", "the", "dimensions", "of", "inputs", "are", "correct", "." ]
def check_input(spk, h, env): if len(spk.shape) != 3: raise ValueError('Invalid spk shape, requires shape (1, channels, time)') else: if spk.shape[0] == 1 and spk.shape[1] >= 1 and spk.shape[2] >= 1: pass else: raise ValueError('Invalid spk shape, required shape: (1, channels, time)') if len(h.shape) != 3: raise ValueError('Invalid h shape, requires shape (1, channels, time)') else: if h.shape[0] == 1 and h.shape[1] >= 1 and h.shape[2] >= 1: pass else: raise ValueError('Invalid h shape, required shape: (1, channels, time)') if len(env.shape) != 1: raise ValueError('Invalid env shape, requires shape (time, )')
[ "def", "check_input", "(", "spk", ",", "h", ",", "env", ")", ":", "if", "len", "(", "spk", ".", "shape", ")", "!=", "3", ":", "raise", "ValueError", "(", "'Invalid spk shape, requires shape (1, channels, time)'", ")", "else", ":", "if", "spk", ".", "shape", "[", "0", "]", "==", "1", "and", "spk", ".", "shape", "[", "1", "]", ">=", "1", "and", "spk", ".", "shape", "[", "2", "]", ">=", "1", ":", "pass", "else", ":", "raise", "ValueError", "(", "'Invalid spk shape, required shape: (1, channels, time)'", ")", "if", "len", "(", "h", ".", "shape", ")", "!=", "3", ":", "raise", "ValueError", "(", "'Invalid h shape, requires shape (1, channels, time)'", ")", "else", ":", "if", "h", ".", "shape", "[", "0", "]", "==", "1", "and", "h", ".", "shape", "[", "1", "]", ">=", "1", "and", "h", ".", "shape", "[", "2", "]", ">=", "1", ":", "pass", "else", ":", "raise", "ValueError", "(", "'Invalid h shape, required shape: (1, channels, time)'", ")", "if", "len", "(", "env", ".", "shape", ")", "!=", "1", ":", "raise", "ValueError", "(", "'Invalid env shape, requires shape (time, )'", ")" ]
Make sure the dimensions of inputs are correct.
[ "Make", "sure", "the", "dimensions", "of", "inputs", "are", "correct", "." ]
[ "\"\"\"\n Make sure the dimensions of inputs are correct.\n\n :param spk: pytorch tensor, shape (1, channels, time1). Spikes from which env will be reconstructed.\n :param h: pytorch tensor, requires gradient, shape (1, channels, time2). 2d reconstruction filter to fit.\n :param env: pytorch tensor, shape (time1, ). Envelope signal that spk and h are attempting to reconstruct.\n :return: None\n \"\"\"", "# dimensionality check" ]
[ { "param": "spk", "type": null }, { "param": "h", "type": null }, { "param": "env", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "spk", "type": null, "docstring": "pytorch tensor, shape (1, channels, time1). Spikes from which env will be reconstructed.", "docstring_tokens": [ "pytorch", "tensor", "shape", "(", "1", "channels", "time1", ")", ".", "Spikes", "from", "which", "env", "will", "be", "reconstructed", "." ], "default": null, "is_optional": null }, { "identifier": "h", "type": null, "docstring": "pytorch tensor, requires gradient, shape (1, channels, time2). 2d reconstruction filter to fit.", "docstring_tokens": [ "pytorch", "tensor", "requires", "gradient", "shape", "(", "1", "channels", "time2", ")", ".", "2d", "reconstruction", "filter", "to", "fit", "." ], "default": null, "is_optional": null }, { "identifier": "env", "type": null, "docstring": "pytorch tensor, shape (time1, ). Envelope signal that spk and h are attempting to reconstruct.", "docstring_tokens": [ "pytorch", "tensor", "shape", "(", "time1", ")", ".", "Envelope", "signal", "that", "spk", "and", "h", "are", "attempting", "to", "reconstruct", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def check_input(spk, h, env): if len(spk.shape) != 3: raise ValueError('Invalid spk shape, requires shape (1, channels, time)') else: if spk.shape[0] == 1 and spk.shape[1] >= 1 and spk.shape[2] >= 1: pass else: raise ValueError('Invalid spk shape, required shape: (1, channels, time)') if len(h.shape) != 3: raise ValueError('Invalid h shape, requires shape (1, channels, time)') else: if h.shape[0] == 1 and h.shape[1] >= 1 and h.shape[2] >= 1: pass else: raise ValueError('Invalid h shape, required shape: (1, channels, time)') if len(env.shape) != 1: raise ValueError('Invalid env shape, requires shape (time, )')
610,533
433
3a0685db114a2d505bfb4d4128705773a3cb9bb3
gikf/advent-of-code
advent-of-code-2017/day 18/main.py
[ "MIT" ]
Python
play_sound
<not_specific>
def play_sound(register, cur_instruction, name): """Play sound/send instruction.""" register['sound'].append(register[name]) register['counter'] += 1 return cur_instruction
Play sound/send instruction.
Play sound/send instruction.
[ "Play", "sound", "/", "send", "instruction", "." ]
def play_sound(register, cur_instruction, name): register['sound'].append(register[name]) register['counter'] += 1 return cur_instruction
[ "def", "play_sound", "(", "register", ",", "cur_instruction", ",", "name", ")", ":", "register", "[", "'sound'", "]", ".", "append", "(", "register", "[", "name", "]", ")", "register", "[", "'counter'", "]", "+=", "1", "return", "cur_instruction" ]
Play sound/send instruction.
[ "Play", "sound", "/", "send", "instruction", "." ]
[ "\"\"\"Play sound/send instruction.\"\"\"" ]
[ { "param": "register", "type": null }, { "param": "cur_instruction", "type": null }, { "param": "name", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "register", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cur_instruction", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def play_sound(register, cur_instruction, name): register['sound'].append(register[name]) register['counter'] += 1 return cur_instruction
610,534
819
775b0cbd5af67ca7d1b0585533cf0eaa61f04561
mcfongtw/MkConfig
mkconfig/conf/utils.py
[ "MIT" ]
Python
boolean_to_lowercase_literal
<not_specific>
def boolean_to_lowercase_literal(value): """ Convert the boolean value into a lowercase literal value :param value: a boolean value to convert with :return: a lowercase literal value equivalent to value, if it is of type boolean """ if isinstance(value, bool): value = str(value).lower() return value
Convert the boolean value into a lowercase literal value :param value: a boolean value to convert with :return: a lowercase literal value equivalent to value, if it is of type boolean
Convert the boolean value into a lowercase literal value
[ "Convert", "the", "boolean", "value", "into", "a", "lowercase", "literal", "value" ]
def boolean_to_lowercase_literal(value): if isinstance(value, bool): value = str(value).lower() return value
[ "def", "boolean_to_lowercase_literal", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "value", "=", "str", "(", "value", ")", ".", "lower", "(", ")", "return", "value" ]
Convert the boolean value into a lowercase literal value
[ "Convert", "the", "boolean", "value", "into", "a", "lowercase", "literal", "value" ]
[ "\"\"\"\n Convert the boolean value into a lowercase literal value\n :param value: a boolean value to convert with\n :return: a lowercase literal value equivalent to value, if it is of type boolean\n \"\"\"" ]
[ { "param": "value", "type": null } ]
{ "returns": [ { "docstring": "a lowercase literal value equivalent to value, if it is of type boolean", "docstring_tokens": [ "a", "lowercase", "literal", "value", "equivalent", "to", "value", "if", "it", "is", "of", "type", "boolean" ], "type": null } ], "raises": [], "params": [ { "identifier": "value", "type": null, "docstring": "a boolean value to convert with", "docstring_tokens": [ "a", "boolean", "value", "to", "convert", "with" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def boolean_to_lowercase_literal(value): if isinstance(value, bool): value = str(value).lower() return value
610,536
817
30d3229940adf253ee7ec03e8614de25913470bf
piolug93/king-phisher
king_phisher/utilities.py
[ "BSD-3-Clause" ]
Python
password_is_complex
<not_specific>
def password_is_complex(password, min_len=12): """ Check that the specified string meets standard password complexity requirements. :param str password: The password to validate. :param int min_len: The minimum length the password should be. :return: Whether the strings appears to be complex or not. :rtype: bool """ has_upper = False has_lower = False has_digit = False if len(password) < min_len: return False for char in password: if char.isupper(): has_upper = True if char.islower(): has_lower = True if char.isdigit(): has_digit = True if has_upper and has_lower and has_digit: return True return False
Check that the specified string meets standard password complexity requirements. :param str password: The password to validate. :param int min_len: The minimum length the password should be. :return: Whether the strings appears to be complex or not. :rtype: bool
Check that the specified string meets standard password complexity requirements.
[ "Check", "that", "the", "specified", "string", "meets", "standard", "password", "complexity", "requirements", "." ]
def password_is_complex(password, min_len=12): has_upper = False has_lower = False has_digit = False if len(password) < min_len: return False for char in password: if char.isupper(): has_upper = True if char.islower(): has_lower = True if char.isdigit(): has_digit = True if has_upper and has_lower and has_digit: return True return False
[ "def", "password_is_complex", "(", "password", ",", "min_len", "=", "12", ")", ":", "has_upper", "=", "False", "has_lower", "=", "False", "has_digit", "=", "False", "if", "len", "(", "password", ")", "<", "min_len", ":", "return", "False", "for", "char", "in", "password", ":", "if", "char", ".", "isupper", "(", ")", ":", "has_upper", "=", "True", "if", "char", ".", "islower", "(", ")", ":", "has_lower", "=", "True", "if", "char", ".", "isdigit", "(", ")", ":", "has_digit", "=", "True", "if", "has_upper", "and", "has_lower", "and", "has_digit", ":", "return", "True", "return", "False" ]
Check that the specified string meets standard password complexity requirements.
[ "Check", "that", "the", "specified", "string", "meets", "standard", "password", "complexity", "requirements", "." ]
[ "\"\"\"\n\tCheck that the specified string meets standard password complexity\n\trequirements.\n\t:param str password: The password to validate.\n\t:param int min_len: The minimum length the password should be.\n\t:return: Whether the strings appears to be complex or not.\n\t:rtype: bool\n\t\"\"\"" ]
[ { "param": "password", "type": null }, { "param": "min_len", "type": null } ]
{ "returns": [ { "docstring": "Whether the strings appears to be complex or not.", "docstring_tokens": [ "Whether", "the", "strings", "appears", "to", "be", "complex", "or", "not", "." ], "type": "bool" } ], "raises": [], "params": [ { "identifier": "password", "type": null, "docstring": "The password to validate.", "docstring_tokens": [ "The", "password", "to", "validate", "." ], "default": null, "is_optional": false }, { "identifier": "min_len", "type": null, "docstring": "The minimum length the password should be.", "docstring_tokens": [ "The", "minimum", "length", "the", "password", "should", "be", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def password_is_complex(password, min_len=12): has_upper = False has_lower = False has_digit = False if len(password) < min_len: return False for char in password: if char.isupper(): has_upper = True if char.islower(): has_lower = True if char.isdigit(): has_digit = True if has_upper and has_lower and has_digit: return True return False
610,537
436
4527826c59a74bb9de0e9b24b88bcf6813aca2d4
cilki/super-palm-tree
data/main/sources/util.py
[ "Apache-2.0" ]
Python
xappend
null
def xappend(collection, item): """ Append to the given collection unless the item is already present """ if item not in collection: collection.append(item)
Append to the given collection unless the item is already present
Append to the given collection unless the item is already present
[ "Append", "to", "the", "given", "collection", "unless", "the", "item", "is", "already", "present" ]
def xappend(collection, item): if item not in collection: collection.append(item)
[ "def", "xappend", "(", "collection", ",", "item", ")", ":", "if", "item", "not", "in", "collection", ":", "collection", ".", "append", "(", "item", ")" ]
Append to the given collection unless the item is already present
[ "Append", "to", "the", "given", "collection", "unless", "the", "item", "is", "already", "present" ]
[ "\"\"\"\n Append to the given collection unless the item is already present\n \"\"\"" ]
[ { "param": "collection", "type": null }, { "param": "item", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "collection", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "item", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def xappend(collection, item): if item not in collection: collection.append(item)
610,538
604
752bb0535b4f2ce1b23c4d74de0f9618d6afeffe
betonr/bdc-collection-builder
bdc_collection_builder/collections/landsat/tasks.py
[ "MIT" ]
Python
download_landsat
<not_specific>
def download_landsat(scene): """Represent a celery task definition for handling Landsat-8 Download files. This celery tasks listen only for queues 'download'. It also retries following errors occurs: - NewConnectionError, MaxRetryError Internet Connection Problem Args: scene (dict): Radcor Activity Returns: Returns processed activity """ return download_landsat.download(scene)
Represent a celery task definition for handling Landsat-8 Download files. This celery tasks listen only for queues 'download'. It also retries following errors occurs: - NewConnectionError, MaxRetryError Internet Connection Problem Args: scene (dict): Radcor Activity Returns: Returns processed activity
Represent a celery task definition for handling Landsat-8 Download files. This celery tasks listen only for queues 'download'. It also retries following errors occurs: NewConnectionError, MaxRetryError Internet Connection Problem
[ "Represent", "a", "celery", "task", "definition", "for", "handling", "Landsat", "-", "8", "Download", "files", ".", "This", "celery", "tasks", "listen", "only", "for", "queues", "'", "download", "'", ".", "It", "also", "retries", "following", "errors", "occurs", ":", "NewConnectionError", "MaxRetryError", "Internet", "Connection", "Problem" ]
def download_landsat(scene): return download_landsat.download(scene)
[ "def", "download_landsat", "(", "scene", ")", ":", "return", "download_landsat", ".", "download", "(", "scene", ")" ]
Represent a celery task definition for handling Landsat-8 Download files.
[ "Represent", "a", "celery", "task", "definition", "for", "handling", "Landsat", "-", "8", "Download", "files", "." ]
[ "\"\"\"Represent a celery task definition for handling Landsat-8 Download files.\n\n This celery tasks listen only for queues 'download'.\n\n It also retries following errors occurs:\n - NewConnectionError, MaxRetryError Internet Connection Problem\n\n Args:\n scene (dict): Radcor Activity\n\n Returns:\n Returns processed activity\n \"\"\"" ]
[ { "param": "scene", "type": null } ]
{ "returns": [ { "docstring": "Returns processed activity", "docstring_tokens": [ "Returns", "processed", "activity" ], "type": null } ], "raises": [], "params": [ { "identifier": "scene", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def download_landsat(scene): return download_landsat.download(scene)
610,539
651
646cd024aab87cbd3e44fd87195a6af006b45d05
unswpcsoc/pcsoc-verification-bot
iam/verify.py
[ "MIT" ]
Python
proc_request_email
null
async def proc_request_email(db, member): """DM email address request to member and await response. Args: member: Member object to make request to. """ await member.send("What is your email address?")
DM email address request to member and await response. Args: member: Member object to make request to.
DM email address request to member and await response.
[ "DM", "email", "address", "request", "to", "member", "and", "await", "response", "." ]
async def proc_request_email(db, member): await member.send("What is your email address?")
[ "async", "def", "proc_request_email", "(", "db", ",", "member", ")", ":", "await", "member", ".", "send", "(", "\"What is your email address?\"", ")" ]
DM email address request to member and await response.
[ "DM", "email", "address", "request", "to", "member", "and", "await", "response", "." ]
[ "\"\"\"DM email address request to member and await response.\n\n Args:\n member: Member object to make request to.\n \"\"\"" ]
[ { "param": "db", "type": null }, { "param": "member", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "db", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "member", "type": null, "docstring": "Member object to make request to.", "docstring_tokens": [ "Member", "object", "to", "make", "request", "to", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
async def proc_request_email(db, member): await member.send("What is your email address?")
610,540
391
db53e213fbc26df1d75e78828bd5ba87c5719b18
beloglazov/openstack-neat
neat/locals/overload/statistics.py
[ "Apache-2.0" ]
Python
tricube_weights
<not_specific>
def tricube_weights(n): """ Generates a list of weights according to the tricube function. :param n: The number of weights to generate. :type n: int :return: A list of generated weights. :rtype: list(float) """ spread = top = float(n - 1) weights = [] for i in range(2, n): weights.append((1 - ((top - i) / spread) ** 3) ** 3) return [weights[0], weights[0]] + weights
Generates a list of weights according to the tricube function. :param n: The number of weights to generate. :type n: int :return: A list of generated weights. :rtype: list(float)
Generates a list of weights according to the tricube function.
[ "Generates", "a", "list", "of", "weights", "according", "to", "the", "tricube", "function", "." ]
def tricube_weights(n): spread = top = float(n - 1) weights = [] for i in range(2, n): weights.append((1 - ((top - i) / spread) ** 3) ** 3) return [weights[0], weights[0]] + weights
[ "def", "tricube_weights", "(", "n", ")", ":", "spread", "=", "top", "=", "float", "(", "n", "-", "1", ")", "weights", "=", "[", "]", "for", "i", "in", "range", "(", "2", ",", "n", ")", ":", "weights", ".", "append", "(", "(", "1", "-", "(", "(", "top", "-", "i", ")", "/", "spread", ")", "**", "3", ")", "**", "3", ")", "return", "[", "weights", "[", "0", "]", ",", "weights", "[", "0", "]", "]", "+", "weights" ]
Generates a list of weights according to the tricube function.
[ "Generates", "a", "list", "of", "weights", "according", "to", "the", "tricube", "function", "." ]
[ "\"\"\" Generates a list of weights according to the tricube function.\n\n :param n: The number of weights to generate.\n :type n: int\n\n :return: A list of generated weights.\n :rtype: list(float)\n \"\"\"" ]
[ { "param": "n", "type": null } ]
{ "returns": [ { "docstring": "A list of generated weights.\n:rtype: list(float)", "docstring_tokens": [ "A", "list", "of", "generated", "weights", ".", ":", "rtype", ":", "list", "(", "float", ")" ], "type": null } ], "raises": [], "params": [ { "identifier": "n", "type": null, "docstring": "The number of weights to generate.\n:type n: int", "docstring_tokens": [ "The", "number", "of", "weights", "to", "generate", ".", ":", "type", "n", ":", "int" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def tricube_weights(n): spread = top = float(n - 1) weights = [] for i in range(2, n): weights.append((1 - ((top - i) / spread) ** 3) ** 3) return [weights[0], weights[0]] + weights
610,541
275
3d875d222cb0ec217c038346af059138637b1485
XroixHD/PyMastermindGame
src/mastermind.py
[ "MIT" ]
Python
from_config
<not_specific>
def from_config(cls, root, config): """ Generates a new Mastermind instance from a config state :returns: instance """ return cls(root, secret_code=config["last_code"], _i=(config["last_count"] * len(cls.COLORS)), history=config["history"])
Generates a new Mastermind instance from a config state :returns: instance
Generates a new Mastermind instance from a config state
[ "Generates", "a", "new", "Mastermind", "instance", "from", "a", "config", "state" ]
def from_config(cls, root, config): return cls(root, secret_code=config["last_code"], _i=(config["last_count"] * len(cls.COLORS)), history=config["history"])
[ "def", "from_config", "(", "cls", ",", "root", ",", "config", ")", ":", "return", "cls", "(", "root", ",", "secret_code", "=", "config", "[", "\"last_code\"", "]", ",", "_i", "=", "(", "config", "[", "\"last_count\"", "]", "*", "len", "(", "cls", ".", "COLORS", ")", ")", ",", "history", "=", "config", "[", "\"history\"", "]", ")" ]
Generates a new Mastermind instance from a config state
[ "Generates", "a", "new", "Mastermind", "instance", "from", "a", "config", "state" ]
[ "\"\"\" Generates a new Mastermind instance from a config state\n :returns: instance\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "root", "type": null }, { "param": "config", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "root", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "config", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def from_config(cls, root, config): return cls(root, secret_code=config["last_code"], _i=(config["last_count"] * len(cls.COLORS)), history=config["history"])
610,542
464
2d91b3ac1c7feb5d55bb57effe3b6ef1c10ec970
FabianGroeger96/deep-embedded-music
src/utils/utils.py
[ "MIT" ]
Python
check_if_path_exists
<not_specific>
def check_if_path_exists(path: Union[str, pathlib.Path]): """ Checks if a given path exists. :param path: the path to check. :return: the path which was checked. :raises: ValueError: if the path is not valid, does not exist. """ path = pathlib.Path(path) if not path.exists(): raise ValueError( "Path {} is not valid. Please provide an existing path".format(path)) return path
Checks if a given path exists. :param path: the path to check. :return: the path which was checked. :raises: ValueError: if the path is not valid, does not exist.
Checks if a given path exists.
[ "Checks", "if", "a", "given", "path", "exists", "." ]
def check_if_path_exists(path: Union[str, pathlib.Path]): path = pathlib.Path(path) if not path.exists(): raise ValueError( "Path {} is not valid. Please provide an existing path".format(path)) return path
[ "def", "check_if_path_exists", "(", "path", ":", "Union", "[", "str", ",", "pathlib", ".", "Path", "]", ")", ":", "path", "=", "pathlib", ".", "Path", "(", "path", ")", "if", "not", "path", ".", "exists", "(", ")", ":", "raise", "ValueError", "(", "\"Path {} is not valid. Please provide an existing path\"", ".", "format", "(", "path", ")", ")", "return", "path" ]
Checks if a given path exists.
[ "Checks", "if", "a", "given", "path", "exists", "." ]
[ "\"\"\"\n Checks if a given path exists.\n\n :param path: the path to check.\n :return: the path which was checked.\n :raises: ValueError: if the path is not valid, does not exist.\n \"\"\"" ]
[ { "param": "path", "type": "Union[str, pathlib.Path]" } ]
{ "returns": [ { "docstring": "the path which was checked.", "docstring_tokens": [ "the", "path", "which", "was", "checked", "." ], "type": null } ], "raises": [ { "docstring": "if the path is not valid, does not exist.", "docstring_tokens": [ "if", "the", "path", "is", "not", "valid", "does", "not", "exist", "." ], "type": null } ], "params": [ { "identifier": "path", "type": "Union[str, pathlib.Path]", "docstring": "the path to check.", "docstring_tokens": [ "the", "path", "to", "check", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import pathlib def check_if_path_exists(path: Union[str, pathlib.Path]): path = pathlib.Path(path) if not path.exists(): raise ValueError( "Path {} is not valid. Please provide an existing path".format(path)) return path
610,543
905