repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
saltstack/salt
salt/modules/boto_ec2.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_ec2.py#L1107-L1128
def create_key(key_name, save_path, region=None, key=None, keyid=None, profile=None): ''' Creates a key and saves it to a given path. Returns the private key. CLI Example: .. code-block:: bash salt myminion boto_ec2.create_key mykey /root/ ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: key = conn.create_key_pair(key_name) log.debug("the key to return is : %s", key) key.save(save_path) return key.material except boto.exception.BotoServerError as e: log.debug(e) return False
[ "def", "create_key", "(", "key_name", ",", "save_path", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "try", ":", "key", "=", "conn", ".", "create_key_pair", "(", "key_name", ")", "log", ".", "debug", "(", "\"the key to return is : %s\"", ",", "key", ")", "key", ".", "save", "(", "save_path", ")", "return", "key", ".", "material", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "log", ".", "debug", "(", "e", ")", "return", "False" ]
Creates a key and saves it to a given path. Returns the private key. CLI Example: .. code-block:: bash salt myminion boto_ec2.create_key mykey /root/
[ "Creates", "a", "key", "and", "saves", "it", "to", "a", "given", "path", ".", "Returns", "the", "private", "key", "." ]
python
train
27.272727
dls-controls/pymalcolm
malcolm/modules/builtin/controllers/managercontroller.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/modules/builtin/controllers/managercontroller.py#L419-L423
def save(self, designName=""): # type: (ASaveDesign) -> None """Save the current design to file""" self.try_stateful_function( ss.SAVING, ss.READY, self.do_save, designName)
[ "def", "save", "(", "self", ",", "designName", "=", "\"\"", ")", ":", "# type: (ASaveDesign) -> None", "self", ".", "try_stateful_function", "(", "ss", ".", "SAVING", ",", "ss", ".", "READY", ",", "self", ".", "do_save", ",", "designName", ")" ]
Save the current design to file
[ "Save", "the", "current", "design", "to", "file" ]
python
train
41
muatik/flask-profiler
flask_profiler/flask_profiler.py
https://github.com/muatik/flask-profiler/blob/51b4354fc14f8cfdd538ec9db98fa1cf545ccd18/flask_profiler/flask_profiler.py#L167-L241
def registerInternalRouters(app): """ These are the endpoints which are used to display measurements in the flask-profiler dashboard. Note: these should be defined after wrapping user defined endpoints via wrapAppEndpoints() :param app: Flask application instance :return: """ urlPath = CONF.get("endpointRoot", "flask-profiler") fp = Blueprint( 'flask-profiler', __name__, url_prefix="/" + urlPath, static_folder="static/dist/", static_url_path='/static/dist') @fp.route("/".format(urlPath)) @auth.login_required def index(): return fp.send_static_file("index.html") @fp.route("/api/measurements/".format(urlPath)) @auth.login_required def filterMeasurements(): args = dict(request.args.items()) measurements = collection.filter(args) return jsonify({"measurements": list(measurements)}) @fp.route("/api/measurements/grouped".format(urlPath)) @auth.login_required def getMeasurementsSummary(): args = dict(request.args.items()) measurements = collection.getSummary(args) return jsonify({"measurements": list(measurements)}) @fp.route("/api/measurements/<measurementId>".format(urlPath)) @auth.login_required def getContext(measurementId): return jsonify(collection.get(measurementId)) @fp.route("/api/measurements/timeseries/".format(urlPath)) @auth.login_required def getRequestsTimeseries(): args = dict(request.args.items()) return jsonify({"series": collection.getTimeseries(args)}) @fp.route("/api/measurements/methodDistribution/".format(urlPath)) @auth.login_required def getMethodDistribution(): args = dict(request.args.items()) return jsonify({ "distribution": collection.getMethodDistribution(args)}) @fp.route("/db/dumpDatabase") @auth.login_required def dumpDatabase(): response = jsonify({ "summary": collection.getSummary()}) response.headers["Content-Disposition"] = "attachment; filename=dump.json" return response @fp.route("/db/deleteDatabase") @auth.login_required def deleteDatabase(): response = jsonify({ "status": collection.truncate()}) return response @fp.after_request def x_robots_tag_header(response): response.headers['X-Robots-Tag'] = 'noindex, nofollow' return response app.register_blueprint(fp)
[ "def", "registerInternalRouters", "(", "app", ")", ":", "urlPath", "=", "CONF", ".", "get", "(", "\"endpointRoot\"", ",", "\"flask-profiler\"", ")", "fp", "=", "Blueprint", "(", "'flask-profiler'", ",", "__name__", ",", "url_prefix", "=", "\"/\"", "+", "urlPath", ",", "static_folder", "=", "\"static/dist/\"", ",", "static_url_path", "=", "'/static/dist'", ")", "@", "fp", ".", "route", "(", "\"/\"", ".", "format", "(", "urlPath", ")", ")", "@", "auth", ".", "login_required", "def", "index", "(", ")", ":", "return", "fp", ".", "send_static_file", "(", "\"index.html\"", ")", "@", "fp", ".", "route", "(", "\"/api/measurements/\"", ".", "format", "(", "urlPath", ")", ")", "@", "auth", ".", "login_required", "def", "filterMeasurements", "(", ")", ":", "args", "=", "dict", "(", "request", ".", "args", ".", "items", "(", ")", ")", "measurements", "=", "collection", ".", "filter", "(", "args", ")", "return", "jsonify", "(", "{", "\"measurements\"", ":", "list", "(", "measurements", ")", "}", ")", "@", "fp", ".", "route", "(", "\"/api/measurements/grouped\"", ".", "format", "(", "urlPath", ")", ")", "@", "auth", ".", "login_required", "def", "getMeasurementsSummary", "(", ")", ":", "args", "=", "dict", "(", "request", ".", "args", ".", "items", "(", ")", ")", "measurements", "=", "collection", ".", "getSummary", "(", "args", ")", "return", "jsonify", "(", "{", "\"measurements\"", ":", "list", "(", "measurements", ")", "}", ")", "@", "fp", ".", "route", "(", "\"/api/measurements/<measurementId>\"", ".", "format", "(", "urlPath", ")", ")", "@", "auth", ".", "login_required", "def", "getContext", "(", "measurementId", ")", ":", "return", "jsonify", "(", "collection", ".", "get", "(", "measurementId", ")", ")", "@", "fp", ".", "route", "(", "\"/api/measurements/timeseries/\"", ".", "format", "(", "urlPath", ")", ")", "@", "auth", ".", "login_required", "def", "getRequestsTimeseries", "(", ")", ":", "args", "=", "dict", "(", "request", ".", "args", ".", "items", "(", ")", ")", "return", "jsonify", "(", "{", "\"series\"", ":", "collection", ".", "getTimeseries", "(", "args", ")", "}", ")", "@", "fp", ".", "route", "(", "\"/api/measurements/methodDistribution/\"", ".", "format", "(", "urlPath", ")", ")", "@", "auth", ".", "login_required", "def", "getMethodDistribution", "(", ")", ":", "args", "=", "dict", "(", "request", ".", "args", ".", "items", "(", ")", ")", "return", "jsonify", "(", "{", "\"distribution\"", ":", "collection", ".", "getMethodDistribution", "(", "args", ")", "}", ")", "@", "fp", ".", "route", "(", "\"/db/dumpDatabase\"", ")", "@", "auth", ".", "login_required", "def", "dumpDatabase", "(", ")", ":", "response", "=", "jsonify", "(", "{", "\"summary\"", ":", "collection", ".", "getSummary", "(", ")", "}", ")", "response", ".", "headers", "[", "\"Content-Disposition\"", "]", "=", "\"attachment; filename=dump.json\"", "return", "response", "@", "fp", ".", "route", "(", "\"/db/deleteDatabase\"", ")", "@", "auth", ".", "login_required", "def", "deleteDatabase", "(", ")", ":", "response", "=", "jsonify", "(", "{", "\"status\"", ":", "collection", ".", "truncate", "(", ")", "}", ")", "return", "response", "@", "fp", ".", "after_request", "def", "x_robots_tag_header", "(", "response", ")", ":", "response", ".", "headers", "[", "'X-Robots-Tag'", "]", "=", "'noindex, nofollow'", "return", "response", "app", ".", "register_blueprint", "(", "fp", ")" ]
These are the endpoints which are used to display measurements in the flask-profiler dashboard. Note: these should be defined after wrapping user defined endpoints via wrapAppEndpoints() :param app: Flask application instance :return:
[ "These", "are", "the", "endpoints", "which", "are", "used", "to", "display", "measurements", "in", "the", "flask", "-", "profiler", "dashboard", "." ]
python
train
32.36
hyperledger/indy-plenum
plenum/common/stack_manager.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/stack_manager.py#L58-L98
def _parse_pool_transaction_file( ledger, nodeReg, cliNodeReg, nodeKeys, activeValidators, ledger_size=None): """ helper function for parseLedgerForHaAndKeys """ for _, txn in ledger.getAllTxn(to=ledger_size): if get_type(txn) == NODE: txn_data = get_payload_data(txn) nodeName = txn_data[DATA][ALIAS] clientStackName = nodeName + CLIENT_STACK_SUFFIX nHa = (txn_data[DATA][NODE_IP], txn_data[DATA][NODE_PORT]) \ if (NODE_IP in txn_data[DATA] and NODE_PORT in txn_data[DATA]) \ else None cHa = (txn_data[DATA][CLIENT_IP], txn_data[DATA][CLIENT_PORT]) \ if (CLIENT_IP in txn_data[DATA] and CLIENT_PORT in txn_data[DATA]) \ else None if nHa: nodeReg[nodeName] = HA(*nHa) if cHa: cliNodeReg[clientStackName] = HA(*cHa) try: # TODO: Need to handle abbreviated verkey key_type = 'verkey' verkey = cryptonymToHex(str(txn_data[TARGET_NYM])) key_type = 'identifier' cryptonymToHex(get_from(txn)) except ValueError: logger.exception( 'Invalid {}. Rebuild pool transactions.'.format(key_type)) exit('Invalid {}. Rebuild pool transactions.'.format(key_type)) nodeKeys[nodeName] = verkey services = txn_data[DATA].get(SERVICES) if isinstance(services, list): if VALIDATOR in services: activeValidators.add(nodeName) else: activeValidators.discard(nodeName)
[ "def", "_parse_pool_transaction_file", "(", "ledger", ",", "nodeReg", ",", "cliNodeReg", ",", "nodeKeys", ",", "activeValidators", ",", "ledger_size", "=", "None", ")", ":", "for", "_", ",", "txn", "in", "ledger", ".", "getAllTxn", "(", "to", "=", "ledger_size", ")", ":", "if", "get_type", "(", "txn", ")", "==", "NODE", ":", "txn_data", "=", "get_payload_data", "(", "txn", ")", "nodeName", "=", "txn_data", "[", "DATA", "]", "[", "ALIAS", "]", "clientStackName", "=", "nodeName", "+", "CLIENT_STACK_SUFFIX", "nHa", "=", "(", "txn_data", "[", "DATA", "]", "[", "NODE_IP", "]", ",", "txn_data", "[", "DATA", "]", "[", "NODE_PORT", "]", ")", "if", "(", "NODE_IP", "in", "txn_data", "[", "DATA", "]", "and", "NODE_PORT", "in", "txn_data", "[", "DATA", "]", ")", "else", "None", "cHa", "=", "(", "txn_data", "[", "DATA", "]", "[", "CLIENT_IP", "]", ",", "txn_data", "[", "DATA", "]", "[", "CLIENT_PORT", "]", ")", "if", "(", "CLIENT_IP", "in", "txn_data", "[", "DATA", "]", "and", "CLIENT_PORT", "in", "txn_data", "[", "DATA", "]", ")", "else", "None", "if", "nHa", ":", "nodeReg", "[", "nodeName", "]", "=", "HA", "(", "*", "nHa", ")", "if", "cHa", ":", "cliNodeReg", "[", "clientStackName", "]", "=", "HA", "(", "*", "cHa", ")", "try", ":", "# TODO: Need to handle abbreviated verkey", "key_type", "=", "'verkey'", "verkey", "=", "cryptonymToHex", "(", "str", "(", "txn_data", "[", "TARGET_NYM", "]", ")", ")", "key_type", "=", "'identifier'", "cryptonymToHex", "(", "get_from", "(", "txn", ")", ")", "except", "ValueError", ":", "logger", ".", "exception", "(", "'Invalid {}. Rebuild pool transactions.'", ".", "format", "(", "key_type", ")", ")", "exit", "(", "'Invalid {}. Rebuild pool transactions.'", ".", "format", "(", "key_type", ")", ")", "nodeKeys", "[", "nodeName", "]", "=", "verkey", "services", "=", "txn_data", "[", "DATA", "]", ".", "get", "(", "SERVICES", ")", "if", "isinstance", "(", "services", ",", "list", ")", ":", "if", "VALIDATOR", "in", "services", ":", "activeValidators", ".", "add", "(", "nodeName", ")", "else", ":", "activeValidators", ".", "discard", "(", "nodeName", ")" ]
helper function for parseLedgerForHaAndKeys
[ "helper", "function", "for", "parseLedgerForHaAndKeys" ]
python
train
44.829268
google/transitfeed
feedvalidator.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/feedvalidator.py#L700-L705
def RunValidationFromOptions(feed, options): """Validate feed, run in profiler if in options, and return an exit code.""" if options.performance: return ProfileRunValidationOutputFromOptions(feed, options) else: return RunValidationOutputFromOptions(feed, options)
[ "def", "RunValidationFromOptions", "(", "feed", ",", "options", ")", ":", "if", "options", ".", "performance", ":", "return", "ProfileRunValidationOutputFromOptions", "(", "feed", ",", "options", ")", "else", ":", "return", "RunValidationOutputFromOptions", "(", "feed", ",", "options", ")" ]
Validate feed, run in profiler if in options, and return an exit code.
[ "Validate", "feed", "run", "in", "profiler", "if", "in", "options", "and", "return", "an", "exit", "code", "." ]
python
train
45.5
ethereum/py-evm
eth/db/journal.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/journal.py#L372-L377
def discard(self, changeset_id: uuid.UUID) -> None: """ Throws away all journaled data starting at the given changeset """ self._validate_changeset(changeset_id) self.journal.pop_changeset(changeset_id)
[ "def", "discard", "(", "self", ",", "changeset_id", ":", "uuid", ".", "UUID", ")", "->", "None", ":", "self", ".", "_validate_changeset", "(", "changeset_id", ")", "self", ".", "journal", ".", "pop_changeset", "(", "changeset_id", ")" ]
Throws away all journaled data starting at the given changeset
[ "Throws", "away", "all", "journaled", "data", "starting", "at", "the", "given", "changeset" ]
python
train
39.5
Richienb/quilt
src/quilt_lang/__init__.py
https://github.com/Richienb/quilt/blob/4a659cac66f5286ad046d54a12fd850be5606643/src/quilt_lang/__init__.py#L817-L829
def textalign(text, maxlength, align='left'): """ Align Text When Given Full Length """ if align == 'left': return text elif align == 'centre' or align == 'center': spaces = ' ' * (int((maxlength - len(text)) / 2)) elif align == 'right': spaces = (maxlength - len(text)) else: raise ValueError("Invalid alignment specified.") return spaces + text
[ "def", "textalign", "(", "text", ",", "maxlength", ",", "align", "=", "'left'", ")", ":", "if", "align", "==", "'left'", ":", "return", "text", "elif", "align", "==", "'centre'", "or", "align", "==", "'center'", ":", "spaces", "=", "' '", "*", "(", "int", "(", "(", "maxlength", "-", "len", "(", "text", ")", ")", "/", "2", ")", ")", "elif", "align", "==", "'right'", ":", "spaces", "=", "(", "maxlength", "-", "len", "(", "text", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid alignment specified.\"", ")", "return", "spaces", "+", "text" ]
Align Text When Given Full Length
[ "Align", "Text", "When", "Given", "Full", "Length" ]
python
train
30.615385
jobovy/galpy
galpy/orbit/planarOrbit.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/planarOrbit.py#L221-L248
def integrate(self,t,pot,method='symplec4_c',dt=None): """ NAME: integrate PURPOSE: integrate the orbit INPUT: t - list of times at which to output (0 has to be in this!) pot - potential instance or list of instances method= 'odeint' for scipy's odeint 'leapfrog' for a simple leapfrog implementation 'leapfrog_c' for a simple leapfrog implementation in C 'rk4_c' for a 4th-order Runge-Kutta integrator in C 'rk6_c' for a 6-th order Runge-Kutta integrator in C 'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest) dt= (None) if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize OUTPUT: error message number (get the actual orbit using getOrbit() HISTORY: 2010-07-20 """ if hasattr(self,'_orbInterp'): delattr(self,'_orbInterp') if hasattr(self,'rs'): delattr(self,'rs') thispot= RZToplanarPotential(pot) self.t= nu.array(t) self._pot= thispot self.orbit, msg= _integrateROrbit(self.vxvv,thispot,t,method,dt) return msg
[ "def", "integrate", "(", "self", ",", "t", ",", "pot", ",", "method", "=", "'symplec4_c'", ",", "dt", "=", "None", ")", ":", "if", "hasattr", "(", "self", ",", "'_orbInterp'", ")", ":", "delattr", "(", "self", ",", "'_orbInterp'", ")", "if", "hasattr", "(", "self", ",", "'rs'", ")", ":", "delattr", "(", "self", ",", "'rs'", ")", "thispot", "=", "RZToplanarPotential", "(", "pot", ")", "self", ".", "t", "=", "nu", ".", "array", "(", "t", ")", "self", ".", "_pot", "=", "thispot", "self", ".", "orbit", ",", "msg", "=", "_integrateROrbit", "(", "self", ".", "vxvv", ",", "thispot", ",", "t", ",", "method", ",", "dt", ")", "return", "msg" ]
NAME: integrate PURPOSE: integrate the orbit INPUT: t - list of times at which to output (0 has to be in this!) pot - potential instance or list of instances method= 'odeint' for scipy's odeint 'leapfrog' for a simple leapfrog implementation 'leapfrog_c' for a simple leapfrog implementation in C 'rk4_c' for a 4th-order Runge-Kutta integrator in C 'rk6_c' for a 6-th order Runge-Kutta integrator in C 'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest) dt= (None) if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize OUTPUT: error message number (get the actual orbit using getOrbit() HISTORY: 2010-07-20
[ "NAME", ":", "integrate", "PURPOSE", ":", "integrate", "the", "orbit", "INPUT", ":", "t", "-", "list", "of", "times", "at", "which", "to", "output", "(", "0", "has", "to", "be", "in", "this!", ")", "pot", "-", "potential", "instance", "or", "list", "of", "instances", "method", "=", "odeint", "for", "scipy", "s", "odeint", "leapfrog", "for", "a", "simple", "leapfrog", "implementation", "leapfrog_c", "for", "a", "simple", "leapfrog", "implementation", "in", "C", "rk4_c", "for", "a", "4th", "-", "order", "Runge", "-", "Kutta", "integrator", "in", "C", "rk6_c", "for", "a", "6", "-", "th", "order", "Runge", "-", "Kutta", "integrator", "in", "C", "dopr54_c", "for", "a", "Dormand", "-", "Prince", "integrator", "in", "C", "(", "generally", "the", "fastest", ")", "dt", "=", "(", "None", ")", "if", "set", "force", "the", "integrator", "to", "use", "this", "basic", "stepsize", ";", "must", "be", "an", "integer", "divisor", "of", "output", "stepsize", "OUTPUT", ":", "error", "message", "number", "(", "get", "the", "actual", "orbit", "using", "getOrbit", "()", "HISTORY", ":", "2010", "-", "07", "-", "20" ]
python
train
44.821429
MosesSymeonidis/aggregation_builder
aggregation_builder/operators/string.py
https://github.com/MosesSymeonidis/aggregation_builder/blob/a1f4b580401d400c53206e9c020e413166254274/aggregation_builder/operators/string.py#L147-L164
def INDEX_OF_CP(string_expression, substring_expression, start=None, end=None): """ Searches a string for an occurence of a substring and returns the UTF-8 code point index (zero-based) of the first occurence. If the substring is not found, returns -1. https://docs.mongodb.com/manual/reference/operator/aggregation/indexOfCP/ for more details :param string_expression: The string or expression of string :param substring_expression: The string or expression of substring :param start: A number that can be represented as integers (or expression), that specifies the starting index position for the search. :param end: A number that can be represented as integers (or expression), that specifies the ending index position for the search. :return: Aggregation operator """ res = [string_expression, substring_expression] if start is not None: res.append(start) if end is not None: res.append(end) return {'$indexOfCP': res}
[ "def", "INDEX_OF_CP", "(", "string_expression", ",", "substring_expression", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "res", "=", "[", "string_expression", ",", "substring_expression", "]", "if", "start", "is", "not", "None", ":", "res", ".", "append", "(", "start", ")", "if", "end", "is", "not", "None", ":", "res", ".", "append", "(", "end", ")", "return", "{", "'$indexOfCP'", ":", "res", "}" ]
Searches a string for an occurence of a substring and returns the UTF-8 code point index (zero-based) of the first occurence. If the substring is not found, returns -1. https://docs.mongodb.com/manual/reference/operator/aggregation/indexOfCP/ for more details :param string_expression: The string or expression of string :param substring_expression: The string or expression of substring :param start: A number that can be represented as integers (or expression), that specifies the starting index position for the search. :param end: A number that can be represented as integers (or expression), that specifies the ending index position for the search. :return: Aggregation operator
[ "Searches", "a", "string", "for", "an", "occurence", "of", "a", "substring", "and", "returns", "the", "UTF", "-", "8", "code", "point", "index", "(", "zero", "-", "based", ")", "of", "the", "first", "occurence", ".", "If", "the", "substring", "is", "not", "found", "returns", "-", "1", ".", "https", ":", "//", "docs", ".", "mongodb", ".", "com", "/", "manual", "/", "reference", "/", "operator", "/", "aggregation", "/", "indexOfCP", "/", "for", "more", "details", ":", "param", "string_expression", ":", "The", "string", "or", "expression", "of", "string", ":", "param", "substring_expression", ":", "The", "string", "or", "expression", "of", "substring", ":", "param", "start", ":", "A", "number", "that", "can", "be", "represented", "as", "integers", "(", "or", "expression", ")", "that", "specifies", "the", "starting", "index", "position", "for", "the", "search", ".", ":", "param", "end", ":", "A", "number", "that", "can", "be", "represented", "as", "integers", "(", "or", "expression", ")", "that", "specifies", "the", "ending", "index", "position", "for", "the", "search", ".", ":", "return", ":", "Aggregation", "operator" ]
python
train
54.5
numenta/nupic
src/nupic/database/client_jobs_dao.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/client_jobs_dao.py#L2012-L2041
def getFieldsForActiveJobsOfType(self, jobType, fields=[]): """ Helper function for querying the models table including relevant job info where the job type matches the specified jobType. Only records for which there is a matching jobId in both tables is returned, and only the requested fields are returned in each result, assuming that there is not a conflict. This function is useful, for example, in querying a cluster for a list of actively running production models (according to the state of the client jobs database). jobType must be one of the JOB_TYPE_XXXX enumerations. Parameters: ---------------------------------------------------------------- jobType: jobType enum fields: list of fields to return Returns: List of tuples containing the jobId and requested field values """ dbFields = [self._jobs.pubToDBNameDict[x] for x in fields] dbFieldsStr = ','.join(['job_id'] + dbFields) with ConnectionFactory.get() as conn: query = \ 'SELECT DISTINCT %s ' \ 'FROM %s j ' \ 'LEFT JOIN %s m USING(job_id) '\ 'WHERE j.status != %%s ' \ 'AND _eng_job_type = %%s' % (dbFieldsStr, self.jobsTableName, self.modelsTableName) conn.cursor.execute(query, [self.STATUS_COMPLETED, jobType]) return conn.cursor.fetchall()
[ "def", "getFieldsForActiveJobsOfType", "(", "self", ",", "jobType", ",", "fields", "=", "[", "]", ")", ":", "dbFields", "=", "[", "self", ".", "_jobs", ".", "pubToDBNameDict", "[", "x", "]", "for", "x", "in", "fields", "]", "dbFieldsStr", "=", "','", ".", "join", "(", "[", "'job_id'", "]", "+", "dbFields", ")", "with", "ConnectionFactory", ".", "get", "(", ")", "as", "conn", ":", "query", "=", "'SELECT DISTINCT %s '", "'FROM %s j '", "'LEFT JOIN %s m USING(job_id) '", "'WHERE j.status != %%s '", "'AND _eng_job_type = %%s'", "%", "(", "dbFieldsStr", ",", "self", ".", "jobsTableName", ",", "self", ".", "modelsTableName", ")", "conn", ".", "cursor", ".", "execute", "(", "query", ",", "[", "self", ".", "STATUS_COMPLETED", ",", "jobType", "]", ")", "return", "conn", ".", "cursor", ".", "fetchall", "(", ")" ]
Helper function for querying the models table including relevant job info where the job type matches the specified jobType. Only records for which there is a matching jobId in both tables is returned, and only the requested fields are returned in each result, assuming that there is not a conflict. This function is useful, for example, in querying a cluster for a list of actively running production models (according to the state of the client jobs database). jobType must be one of the JOB_TYPE_XXXX enumerations. Parameters: ---------------------------------------------------------------- jobType: jobType enum fields: list of fields to return Returns: List of tuples containing the jobId and requested field values
[ "Helper", "function", "for", "querying", "the", "models", "table", "including", "relevant", "job", "info", "where", "the", "job", "type", "matches", "the", "specified", "jobType", ".", "Only", "records", "for", "which", "there", "is", "a", "matching", "jobId", "in", "both", "tables", "is", "returned", "and", "only", "the", "requested", "fields", "are", "returned", "in", "each", "result", "assuming", "that", "there", "is", "not", "a", "conflict", ".", "This", "function", "is", "useful", "for", "example", "in", "querying", "a", "cluster", "for", "a", "list", "of", "actively", "running", "production", "models", "(", "according", "to", "the", "state", "of", "the", "client", "jobs", "database", ")", ".", "jobType", "must", "be", "one", "of", "the", "JOB_TYPE_XXXX", "enumerations", "." ]
python
valid
44.666667
saltstack/salt
salt/states/glassfish.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glassfish.py#L52-L71
def _is_updated(old_conf, new_conf): ''' Compare the API results to the current statefile data ''' changed = {} # Dirty json hacking to get parameters in the same format new_conf = _json_to_unicode(salt.utils.json.loads( salt.utils.json.dumps(new_conf, ensure_ascii=False))) old_conf = salt.utils.json.loads(salt.utils.json.dumps(old_conf, ensure_ascii=False)) for key, value in old_conf.items(): oldval = six.text_type(value).lower() if key in new_conf: newval = six.text_type(new_conf[key]).lower() if oldval == 'null' or oldval == 'none': oldval = '' if key in new_conf and newval != oldval: changed[key] = {'old': oldval, 'new': newval} return changed
[ "def", "_is_updated", "(", "old_conf", ",", "new_conf", ")", ":", "changed", "=", "{", "}", "# Dirty json hacking to get parameters in the same format", "new_conf", "=", "_json_to_unicode", "(", "salt", ".", "utils", ".", "json", ".", "loads", "(", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "new_conf", ",", "ensure_ascii", "=", "False", ")", ")", ")", "old_conf", "=", "salt", ".", "utils", ".", "json", ".", "loads", "(", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "old_conf", ",", "ensure_ascii", "=", "False", ")", ")", "for", "key", ",", "value", "in", "old_conf", ".", "items", "(", ")", ":", "oldval", "=", "six", ".", "text_type", "(", "value", ")", ".", "lower", "(", ")", "if", "key", "in", "new_conf", ":", "newval", "=", "six", ".", "text_type", "(", "new_conf", "[", "key", "]", ")", ".", "lower", "(", ")", "if", "oldval", "==", "'null'", "or", "oldval", "==", "'none'", ":", "oldval", "=", "''", "if", "key", "in", "new_conf", "and", "newval", "!=", "oldval", ":", "changed", "[", "key", "]", "=", "{", "'old'", ":", "oldval", ",", "'new'", ":", "newval", "}", "return", "changed" ]
Compare the API results to the current statefile data
[ "Compare", "the", "API", "results", "to", "the", "current", "statefile", "data" ]
python
train
37.5
agile4you/SchemaFactory
schema_factory/nodes.py
https://github.com/agile4you/SchemaFactory/blob/515e3fb84cddf70fc17e5d300c74c3a63539f223/schema_factory/nodes.py#L131-L139
def field_value(self, value): """Validate against NodeType. """ if not self.is_array: return self.field_type(value) if isinstance(value, (list, tuple, set)): return [self.field_type(item) for item in value] return self.field_type(value)
[ "def", "field_value", "(", "self", ",", "value", ")", ":", "if", "not", "self", ".", "is_array", ":", "return", "self", ".", "field_type", "(", "value", ")", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", ":", "return", "[", "self", ".", "field_type", "(", "item", ")", "for", "item", "in", "value", "]", "return", "self", ".", "field_type", "(", "value", ")" ]
Validate against NodeType.
[ "Validate", "against", "NodeType", "." ]
python
train
32.555556
fastai/fastai
fastai/callbacks/tracker.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/tracker.py#L39-L51
def get_monitor_value(self): "Pick the monitored value." if self.monitor=='trn_loss' and len(self.learn.recorder.losses) == 0: return None elif len(self.learn.recorder.val_losses) == 0: return None values = {'train_loss':self.learn.recorder.losses[-1].cpu().numpy(), 'valid_loss':self.learn.recorder.val_losses[-1]} if values['valid_loss'] is None: return if self.learn.recorder.metrics: for m, n in zip(self.learn.recorder.metrics[-1],self.learn.recorder.names[3:-1]): values[n] = m if values.get(self.monitor) is None: warn(f'{self.__class__} conditioned on metric `{self.monitor}` which is not available. Available metrics are: {", ".join(map(str, self.learn.recorder.names[1:-1]))}') return values.get(self.monitor)
[ "def", "get_monitor_value", "(", "self", ")", ":", "if", "self", ".", "monitor", "==", "'trn_loss'", "and", "len", "(", "self", ".", "learn", ".", "recorder", ".", "losses", ")", "==", "0", ":", "return", "None", "elif", "len", "(", "self", ".", "learn", ".", "recorder", ".", "val_losses", ")", "==", "0", ":", "return", "None", "values", "=", "{", "'train_loss'", ":", "self", ".", "learn", ".", "recorder", ".", "losses", "[", "-", "1", "]", ".", "cpu", "(", ")", ".", "numpy", "(", ")", ",", "'valid_loss'", ":", "self", ".", "learn", ".", "recorder", ".", "val_losses", "[", "-", "1", "]", "}", "if", "values", "[", "'valid_loss'", "]", "is", "None", ":", "return", "if", "self", ".", "learn", ".", "recorder", ".", "metrics", ":", "for", "m", ",", "n", "in", "zip", "(", "self", ".", "learn", ".", "recorder", ".", "metrics", "[", "-", "1", "]", ",", "self", ".", "learn", ".", "recorder", ".", "names", "[", "3", ":", "-", "1", "]", ")", ":", "values", "[", "n", "]", "=", "m", "if", "values", ".", "get", "(", "self", ".", "monitor", ")", "is", "None", ":", "warn", "(", "f'{self.__class__} conditioned on metric `{self.monitor}` which is not available. Available metrics are: {\", \".join(map(str, self.learn.recorder.names[1:-1]))}'", ")", "return", "values", ".", "get", "(", "self", ".", "monitor", ")" ]
Pick the monitored value.
[ "Pick", "the", "monitored", "value", "." ]
python
train
63.769231
takuti/flurs
flurs/utils/metric.py
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/metric.py#L159-L189
def ndcg(truth, recommend, k=None): """Normalized Discounted Cumulative Grain (NDCG). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: NDCG. """ if k is None: k = len(recommend) def idcg(n_possible_truth): res = 0. for n in range(n_possible_truth): res += 1. / np.log2(n + 2) return res dcg = 0. for n, r in enumerate(recommend[:k]): if r not in truth: continue dcg += 1. / np.log2(n + 2) res_idcg = idcg(np.min([truth.size, k])) if res_idcg == 0.: return 0. return dcg / res_idcg
[ "def", "ndcg", "(", "truth", ",", "recommend", ",", "k", "=", "None", ")", ":", "if", "k", "is", "None", ":", "k", "=", "len", "(", "recommend", ")", "def", "idcg", "(", "n_possible_truth", ")", ":", "res", "=", "0.", "for", "n", "in", "range", "(", "n_possible_truth", ")", ":", "res", "+=", "1.", "/", "np", ".", "log2", "(", "n", "+", "2", ")", "return", "res", "dcg", "=", "0.", "for", "n", ",", "r", "in", "enumerate", "(", "recommend", "[", ":", "k", "]", ")", ":", "if", "r", "not", "in", "truth", ":", "continue", "dcg", "+=", "1.", "/", "np", ".", "log2", "(", "n", "+", "2", ")", "res_idcg", "=", "idcg", "(", "np", ".", "min", "(", "[", "truth", ".", "size", ",", "k", "]", ")", ")", "if", "res_idcg", "==", "0.", ":", "return", "0.", "return", "dcg", "/", "res_idcg" ]
Normalized Discounted Cumulative Grain (NDCG). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: NDCG.
[ "Normalized", "Discounted", "Cumulative", "Grain", "(", "NDCG", ")", "." ]
python
train
24.258065
jim-easterbrook/pyctools
src/pyctools/core/compound.py
https://github.com/jim-easterbrook/pyctools/blob/2a958665326892f45f249bebe62c2c23f306732b/src/pyctools/core/compound.py#L178-L185
def bind(self, source, dest, destmeth): """Guild compatible version of :py:meth:`connect`. This allows Pyctools compound components to be used in `Guild <https://github.com/sparkslabs/guild>`_ pipelines. """ self.connect(source, getattr(dest, destmeth))
[ "def", "bind", "(", "self", ",", "source", ",", "dest", ",", "destmeth", ")", ":", "self", ".", "connect", "(", "source", ",", "getattr", "(", "dest", ",", "destmeth", ")", ")" ]
Guild compatible version of :py:meth:`connect`. This allows Pyctools compound components to be used in `Guild <https://github.com/sparkslabs/guild>`_ pipelines.
[ "Guild", "compatible", "version", "of", ":", "py", ":", "meth", ":", "connect", "." ]
python
train
36
aws/aws-iot-device-sdk-python
AWSIoTPythonSDK/core/protocol/paho/client.py
https://github.com/aws/aws-iot-device-sdk-python/blob/f0aa2ce34b21dd2e44f4fb7e1d058656aaf2fc62/AWSIoTPythonSDK/core/protocol/paho/client.py#L1257-L1293
def will_set(self, topic, payload=None, qos=0, retain=False): """Set a Will to be sent by the broker in case the client disconnects unexpectedly. This must be called before connect() to have any effect. topic: The topic that the will message should be published on. payload: The message to send as a will. If not given, or set to None a zero length message will be used as the will. Passing an int or float will result in the payload being converted to a string representing that number. If you wish to send a true int/float, use struct.pack() to create the payload you require. qos: The quality of service level to use for the will. retain: If set to true, the will message will be set as the "last known good"/retained message for the topic. Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has zero string length. """ if topic is None or len(topic) == 0: raise ValueError('Invalid topic.') if qos<0 or qos>2: raise ValueError('Invalid QoS level.') if isinstance(payload, str): self._will_payload = payload.encode('utf-8') elif isinstance(payload, bytearray): self._will_payload = payload elif isinstance(payload, int) or isinstance(payload, float): self._will_payload = str(payload) elif payload is None: self._will_payload = None else: raise TypeError('payload must be a string, bytearray, int, float or None.') self._will = True self._will_topic = topic.encode('utf-8') self._will_qos = qos self._will_retain = retain
[ "def", "will_set", "(", "self", ",", "topic", ",", "payload", "=", "None", ",", "qos", "=", "0", ",", "retain", "=", "False", ")", ":", "if", "topic", "is", "None", "or", "len", "(", "topic", ")", "==", "0", ":", "raise", "ValueError", "(", "'Invalid topic.'", ")", "if", "qos", "<", "0", "or", "qos", ">", "2", ":", "raise", "ValueError", "(", "'Invalid QoS level.'", ")", "if", "isinstance", "(", "payload", ",", "str", ")", ":", "self", ".", "_will_payload", "=", "payload", ".", "encode", "(", "'utf-8'", ")", "elif", "isinstance", "(", "payload", ",", "bytearray", ")", ":", "self", ".", "_will_payload", "=", "payload", "elif", "isinstance", "(", "payload", ",", "int", ")", "or", "isinstance", "(", "payload", ",", "float", ")", ":", "self", ".", "_will_payload", "=", "str", "(", "payload", ")", "elif", "payload", "is", "None", ":", "self", ".", "_will_payload", "=", "None", "else", ":", "raise", "TypeError", "(", "'payload must be a string, bytearray, int, float or None.'", ")", "self", ".", "_will", "=", "True", "self", ".", "_will_topic", "=", "topic", ".", "encode", "(", "'utf-8'", ")", "self", ".", "_will_qos", "=", "qos", "self", ".", "_will_retain", "=", "retain" ]
Set a Will to be sent by the broker in case the client disconnects unexpectedly. This must be called before connect() to have any effect. topic: The topic that the will message should be published on. payload: The message to send as a will. If not given, or set to None a zero length message will be used as the will. Passing an int or float will result in the payload being converted to a string representing that number. If you wish to send a true int/float, use struct.pack() to create the payload you require. qos: The quality of service level to use for the will. retain: If set to true, the will message will be set as the "last known good"/retained message for the topic. Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has zero string length.
[ "Set", "a", "Will", "to", "be", "sent", "by", "the", "broker", "in", "case", "the", "client", "disconnects", "unexpectedly", "." ]
python
train
45.756757
google/grr
grr/server/grr_response_server/aff4.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4.py#L1880-L1903
def Close(self): """Close and destroy the object. This is similar to Flush, but does not maintain object validity. Hence the object should not be interacted with after Close(). Raises: LockError: The lease for this object has expired. """ if self.locked and self.CheckLease() == 0: raise LockError("Can not update lease that has already expired.") self._WriteAttributes() # Releasing this lock allows another thread to own it. if self.locked: self.transaction.Release() if self.parent: self.parent.Close() # Interacting with a closed object is a bug. We need to catch this ASAP so # we remove all mode permissions from this object. self.mode = ""
[ "def", "Close", "(", "self", ")", ":", "if", "self", ".", "locked", "and", "self", ".", "CheckLease", "(", ")", "==", "0", ":", "raise", "LockError", "(", "\"Can not update lease that has already expired.\"", ")", "self", ".", "_WriteAttributes", "(", ")", "# Releasing this lock allows another thread to own it.", "if", "self", ".", "locked", ":", "self", ".", "transaction", ".", "Release", "(", ")", "if", "self", ".", "parent", ":", "self", ".", "parent", ".", "Close", "(", ")", "# Interacting with a closed object is a bug. We need to catch this ASAP so", "# we remove all mode permissions from this object.", "self", ".", "mode", "=", "\"\"" ]
Close and destroy the object. This is similar to Flush, but does not maintain object validity. Hence the object should not be interacted with after Close(). Raises: LockError: The lease for this object has expired.
[ "Close", "and", "destroy", "the", "object", "." ]
python
train
29.416667
fastai/fastai
fastai/vision/image.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L416-L424
def rle_decode(mask_rle:str, shape:Tuple[int,int])->NPArrayMask: "Return an image array from run-length encoded string `mask_rle` with `shape`." s = mask_rle.split() starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])] starts -= 1 ends = starts + lengths img = np.zeros(shape[0]*shape[1], dtype=np.uint) for low, up in zip(starts, ends): img[low:up] = 1 return img.reshape(shape)
[ "def", "rle_decode", "(", "mask_rle", ":", "str", ",", "shape", ":", "Tuple", "[", "int", ",", "int", "]", ")", "->", "NPArrayMask", ":", "s", "=", "mask_rle", ".", "split", "(", ")", "starts", ",", "lengths", "=", "[", "np", ".", "asarray", "(", "x", ",", "dtype", "=", "int", ")", "for", "x", "in", "(", "s", "[", "0", ":", "]", "[", ":", ":", "2", "]", ",", "s", "[", "1", ":", "]", "[", ":", ":", "2", "]", ")", "]", "starts", "-=", "1", "ends", "=", "starts", "+", "lengths", "img", "=", "np", ".", "zeros", "(", "shape", "[", "0", "]", "*", "shape", "[", "1", "]", ",", "dtype", "=", "np", ".", "uint", ")", "for", "low", ",", "up", "in", "zip", "(", "starts", ",", "ends", ")", ":", "img", "[", "low", ":", "up", "]", "=", "1", "return", "img", ".", "reshape", "(", "shape", ")" ]
Return an image array from run-length encoded string `mask_rle` with `shape`.
[ "Return", "an", "image", "array", "from", "run", "-", "length", "encoded", "string", "mask_rle", "with", "shape", "." ]
python
train
47.666667
noumar/iso639
iso639/iso639.py
https://github.com/noumar/iso639/blob/2175cf04b8b8cec79d99a6c4ad31295d67c22cd6/iso639/iso639.py#L230-L256
def retired(self): """ Function for generating retired languages. Returns a dict('code', (datetime, [language, ...], 'description')). """ def gen(): import csv import re from datetime import datetime from pkg_resources import resource_filename with open(resource_filename(__package__, 'iso-639-3_Retirements.tab')) as rf: rtd = list(csv.reader(rf, delimiter='\t'))[1:] rc = [r[0] for r in rtd] for i, _, _, m, s, d in rtd: d = datetime.strptime(d, '%Y-%m-%d') if not m: m = re.findall('\[([a-z]{3})\]', s) if m: m = [m] if isinstance(m, str) else m yield i, (d, [self.get(part3=x) for x in m if x not in rc], s) else: yield i, (d, [], s) yield 'sh', self.get(part3='hbs') # Add 'sh' as deprecated return dict(gen())
[ "def", "retired", "(", "self", ")", ":", "def", "gen", "(", ")", ":", "import", "csv", "import", "re", "from", "datetime", "import", "datetime", "from", "pkg_resources", "import", "resource_filename", "with", "open", "(", "resource_filename", "(", "__package__", ",", "'iso-639-3_Retirements.tab'", ")", ")", "as", "rf", ":", "rtd", "=", "list", "(", "csv", ".", "reader", "(", "rf", ",", "delimiter", "=", "'\\t'", ")", ")", "[", "1", ":", "]", "rc", "=", "[", "r", "[", "0", "]", "for", "r", "in", "rtd", "]", "for", "i", ",", "_", ",", "_", ",", "m", ",", "s", ",", "d", "in", "rtd", ":", "d", "=", "datetime", ".", "strptime", "(", "d", ",", "'%Y-%m-%d'", ")", "if", "not", "m", ":", "m", "=", "re", ".", "findall", "(", "'\\[([a-z]{3})\\]'", ",", "s", ")", "if", "m", ":", "m", "=", "[", "m", "]", "if", "isinstance", "(", "m", ",", "str", ")", "else", "m", "yield", "i", ",", "(", "d", ",", "[", "self", ".", "get", "(", "part3", "=", "x", ")", "for", "x", "in", "m", "if", "x", "not", "in", "rc", "]", ",", "s", ")", "else", ":", "yield", "i", ",", "(", "d", ",", "[", "]", ",", "s", ")", "yield", "'sh'", ",", "self", ".", "get", "(", "part3", "=", "'hbs'", ")", "# Add 'sh' as deprecated", "return", "dict", "(", "gen", "(", ")", ")" ]
Function for generating retired languages. Returns a dict('code', (datetime, [language, ...], 'description')).
[ "Function", "for", "generating", "retired", "languages", ".", "Returns", "a", "dict", "(", "code", "(", "datetime", "[", "language", "...", "]", "description", "))", "." ]
python
train
38.148148
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network.py#L2226-L2268
def add_mvn(self, name, input_name, output_name, across_channels = True, normalize_variance = True, epsilon = 1e-5): """ Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. across_channels: boolean If False, each channel plane is normalized separately If True, mean/variance is computed across all C, H and W dimensions normalize_variance: boolean If False, only mean subtraction is performed. epsilon: float small bias to avoid division by zero. See Also -------- add_l2_normalize, add_lrn """ spec = self.spec nn_spec = self.nn_spec # Add a new layer spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.mvn spec_layer_params.acrossChannels = across_channels spec_layer_params.normalizeVariance = normalize_variance spec_layer_params.epsilon = epsilon
[ "def", "add_mvn", "(", "self", ",", "name", ",", "input_name", ",", "output_name", ",", "across_channels", "=", "True", ",", "normalize_variance", "=", "True", ",", "epsilon", "=", "1e-5", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "spec_layer", ".", "input", ".", "append", "(", "input_name", ")", "spec_layer", ".", "output", ".", "append", "(", "output_name", ")", "spec_layer_params", "=", "spec_layer", ".", "mvn", "spec_layer_params", ".", "acrossChannels", "=", "across_channels", "spec_layer_params", ".", "normalizeVariance", "=", "normalize_variance", "spec_layer_params", ".", "epsilon", "=", "epsilon" ]
Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. across_channels: boolean If False, each channel plane is normalized separately If True, mean/variance is computed across all C, H and W dimensions normalize_variance: boolean If False, only mean subtraction is performed. epsilon: float small bias to avoid division by zero. See Also -------- add_l2_normalize, add_lrn
[ "Add", "an", "MVN", "(", "mean", "variance", "normalization", ")", "layer", ".", "Computes", "mean", "variance", "and", "normalizes", "the", "input", "." ]
python
train
30.697674
cloudendpoints/endpoints-management-python
endpoints_management/control/vendor/py3/sched.py
https://github.com/cloudendpoints/endpoints-management-python/blob/ec3c4a330ae9d65738861ce6df4dd6c3cb9f7731/endpoints_management/control/vendor/py3/sched.py#L85-L92
def enter(self, delay, priority, action, argument=(), kwargs=_sentinel): """A variant that specifies the time as a relative time. This is actually the more commonly used interface. """ time = self.timefunc() + delay return self.enterabs(time, priority, action, argument, kwargs)
[ "def", "enter", "(", "self", ",", "delay", ",", "priority", ",", "action", ",", "argument", "=", "(", ")", ",", "kwargs", "=", "_sentinel", ")", ":", "time", "=", "self", ".", "timefunc", "(", ")", "+", "delay", "return", "self", ".", "enterabs", "(", "time", ",", "priority", ",", "action", ",", "argument", ",", "kwargs", ")" ]
A variant that specifies the time as a relative time. This is actually the more commonly used interface.
[ "A", "variant", "that", "specifies", "the", "time", "as", "a", "relative", "time", "." ]
python
train
39.125
PhilippeFerreiraDeSousa/bitext-matching
lib/enpc_aligner/words_correspondence.py
https://github.com/PhilippeFerreiraDeSousa/bitext-matching/blob/195c3e98775cfa5e63e4bb0bb1da6f741880d980/lib/enpc_aligner/words_correspondence.py#L12-L74
def _alignment(elist, flist, e2f, f2e): ''' elist, flist wordlist for each language e2f translatoin alignment from e to f alignment is [(e, f)] f2e translatoin alignment from f to e alignment is [(e, f)] return alignment: {(f, e)} flist ----------------- e | | l | | i | | s | | t | | ----------------- ''' neighboring = {(-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)} e2f = set(e2f) f2e = set(f2e) m = len(elist) n = len(flist) alignment = e2f.intersection(f2e) # marge with neighborhood while True: set_len = len(alignment) for e_word in range(1, m+1): for f_word in range(1, n+1): if (e_word, f_word) in alignment: for (e_diff, f_diff) in neighboring: e_new = e_word + e_diff f_new = f_word + f_diff if not alignment: if (e_new, f_new) in e2f.union(f2e): alignment.add((e_new, f_new)) else: if ((e_new not in list(zip(*alignment))[0] or f_new not in list(zip(*alignment))[1]) and (e_new, f_new) in e2f.union(f2e)): alignment.add((e_new, f_new)) if set_len == len(alignment): break # finalize for e_word in range(1, m+1): for f_word in range(1, n+1): # for alignment = set([]) if not alignment: if (e_word, f_word) in e2f.union(f2e): alignment.add((e_word, f_word)) else: if ((e_word not in list(zip(*alignment))[0] or f_word not in list(zip(*alignment))[1]) and (e_word, f_word) in e2f.union(f2e)): alignment.add((e_word, f_word)) return alignment
[ "def", "_alignment", "(", "elist", ",", "flist", ",", "e2f", ",", "f2e", ")", ":", "neighboring", "=", "{", "(", "-", "1", ",", "0", ")", ",", "(", "0", ",", "-", "1", ")", ",", "(", "1", ",", "0", ")", ",", "(", "0", ",", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "1", ")", ",", "(", "1", ",", "-", "1", ")", ",", "(", "1", ",", "1", ")", "}", "e2f", "=", "set", "(", "e2f", ")", "f2e", "=", "set", "(", "f2e", ")", "m", "=", "len", "(", "elist", ")", "n", "=", "len", "(", "flist", ")", "alignment", "=", "e2f", ".", "intersection", "(", "f2e", ")", "# marge with neighborhood\r", "while", "True", ":", "set_len", "=", "len", "(", "alignment", ")", "for", "e_word", "in", "range", "(", "1", ",", "m", "+", "1", ")", ":", "for", "f_word", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "if", "(", "e_word", ",", "f_word", ")", "in", "alignment", ":", "for", "(", "e_diff", ",", "f_diff", ")", "in", "neighboring", ":", "e_new", "=", "e_word", "+", "e_diff", "f_new", "=", "f_word", "+", "f_diff", "if", "not", "alignment", ":", "if", "(", "e_new", ",", "f_new", ")", "in", "e2f", ".", "union", "(", "f2e", ")", ":", "alignment", ".", "add", "(", "(", "e_new", ",", "f_new", ")", ")", "else", ":", "if", "(", "(", "e_new", "not", "in", "list", "(", "zip", "(", "*", "alignment", ")", ")", "[", "0", "]", "or", "f_new", "not", "in", "list", "(", "zip", "(", "*", "alignment", ")", ")", "[", "1", "]", ")", "and", "(", "e_new", ",", "f_new", ")", "in", "e2f", ".", "union", "(", "f2e", ")", ")", ":", "alignment", ".", "add", "(", "(", "e_new", ",", "f_new", ")", ")", "if", "set_len", "==", "len", "(", "alignment", ")", ":", "break", "# finalize\r", "for", "e_word", "in", "range", "(", "1", ",", "m", "+", "1", ")", ":", "for", "f_word", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "# for alignment = set([])\r", "if", "not", "alignment", ":", "if", "(", "e_word", ",", "f_word", ")", "in", "e2f", ".", "union", "(", "f2e", ")", ":", "alignment", ".", "add", "(", "(", "e_word", ",", "f_word", ")", ")", "else", ":", "if", "(", "(", "e_word", "not", "in", "list", "(", "zip", "(", "*", "alignment", ")", ")", "[", "0", "]", "or", "f_word", "not", "in", "list", "(", "zip", "(", "*", "alignment", ")", ")", "[", "1", "]", ")", "and", "(", "e_word", ",", "f_word", ")", "in", "e2f", ".", "union", "(", "f2e", ")", ")", ":", "alignment", ".", "add", "(", "(", "e_word", ",", "f_word", ")", ")", "return", "alignment" ]
elist, flist wordlist for each language e2f translatoin alignment from e to f alignment is [(e, f)] f2e translatoin alignment from f to e alignment is [(e, f)] return alignment: {(f, e)} flist ----------------- e | | l | | i | | s | | t | | -----------------
[ "elist", "flist", "wordlist", "for", "each", "language", "e2f", "translatoin", "alignment", "from", "e", "to", "f", "alignment", "is", "[", "(", "e", "f", ")", "]", "f2e", "translatoin", "alignment", "from", "f", "to", "e", "alignment", "is", "[", "(", "e", "f", ")", "]", "return", "alignment", ":", "{", "(", "f", "e", ")", "}", "flist", "-----------------", "e", "|", "|", "l", "|", "|", "i", "|", "|", "s", "|", "|", "t", "|", "|", "-----------------" ]
python
train
34.984127
edx/i18n-tools
i18n/extract.py
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/extract.py#L184-L216
def fix_header(pofile): """ Replace default headers with edX headers """ # By default, django-admin.py makemessages creates this header: # # SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR. pofile.metadata_is_fuzzy = [] # remove [u'fuzzy'] header = pofile.header fixes = ( ('SOME DESCRIPTIVE TITLE', EDX_MARKER), ('Translations template for PROJECT.', EDX_MARKER), ('YEAR', str(datetime.utcnow().year)), ('ORGANIZATION', 'edX'), ("THE PACKAGE'S COPYRIGHT HOLDER", "EdX"), ( 'This file is distributed under the same license as the PROJECT project.', 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ), ( 'This file is distributed under the same license as the PACKAGE package.', 'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.' ), ('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <info@edx.org>'), ) for src, dest in fixes: header = header.replace(src, dest) pofile.header = header
[ "def", "fix_header", "(", "pofile", ")", ":", "# By default, django-admin.py makemessages creates this header:", "#", "# SOME DESCRIPTIVE TITLE.", "# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER", "# This file is distributed under the same license as the PACKAGE package.", "# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.", "pofile", ".", "metadata_is_fuzzy", "=", "[", "]", "# remove [u'fuzzy']", "header", "=", "pofile", ".", "header", "fixes", "=", "(", "(", "'SOME DESCRIPTIVE TITLE'", ",", "EDX_MARKER", ")", ",", "(", "'Translations template for PROJECT.'", ",", "EDX_MARKER", ")", ",", "(", "'YEAR'", ",", "str", "(", "datetime", ".", "utcnow", "(", ")", ".", "year", ")", ")", ",", "(", "'ORGANIZATION'", ",", "'edX'", ")", ",", "(", "\"THE PACKAGE'S COPYRIGHT HOLDER\"", ",", "\"EdX\"", ")", ",", "(", "'This file is distributed under the same license as the PROJECT project.'", ",", "'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'", ")", ",", "(", "'This file is distributed under the same license as the PACKAGE package.'", ",", "'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'", ")", ",", "(", "'FIRST AUTHOR <EMAIL@ADDRESS>'", ",", "'EdX Team <info@edx.org>'", ")", ",", ")", "for", "src", ",", "dest", "in", "fixes", ":", "header", "=", "header", ".", "replace", "(", "src", ",", "dest", ")", "pofile", ".", "header", "=", "header" ]
Replace default headers with edX headers
[ "Replace", "default", "headers", "with", "edX", "headers" ]
python
train
37.424242
rigetti/grove
grove/pyqaoa/utils.py
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/pyqaoa/utils.py#L18-L27
def isclose(a, b, rel_tol=1e-10, abs_tol=0.0): """ Compares two parameter values. :param a: First parameter :param b: Second parameter :param rel_tol: Relative tolerance :param abs_tol: Absolute tolerance :return: Boolean telling whether or not the parameters are close enough to be the same """ return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
[ "def", "isclose", "(", "a", ",", "b", ",", "rel_tol", "=", "1e-10", ",", "abs_tol", "=", "0.0", ")", ":", "return", "abs", "(", "a", "-", "b", ")", "<=", "max", "(", "rel_tol", "*", "max", "(", "abs", "(", "a", ")", ",", "abs", "(", "b", ")", ")", ",", "abs_tol", ")" ]
Compares two parameter values. :param a: First parameter :param b: Second parameter :param rel_tol: Relative tolerance :param abs_tol: Absolute tolerance :return: Boolean telling whether or not the parameters are close enough to be the same
[ "Compares", "two", "parameter", "values", ".", ":", "param", "a", ":", "First", "parameter", ":", "param", "b", ":", "Second", "parameter", ":", "param", "rel_tol", ":", "Relative", "tolerance", ":", "param", "abs_tol", ":", "Absolute", "tolerance", ":", "return", ":", "Boolean", "telling", "whether", "or", "not", "the", "parameters", "are", "close", "enough", "to", "be", "the", "same" ]
python
train
38.5
codelv/enaml-native
src/enamlnative/android/android_popup_window.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_popup_window.py#L70-L80
def create_widget(self): """ Create the underlying widget. A dialog is not a subclass of view, hence we don't set name as widget or children will try to use it as their parent. """ d = self.declaration style = d.style or '@style/Widget.DeviceDefault.PopupMenu' self.window = PopupWindow(self.get_context(), None, 0, style) self.showing = False
[ "def", "create_widget", "(", "self", ")", ":", "d", "=", "self", ".", "declaration", "style", "=", "d", ".", "style", "or", "'@style/Widget.DeviceDefault.PopupMenu'", "self", ".", "window", "=", "PopupWindow", "(", "self", ".", "get_context", "(", ")", ",", "None", ",", "0", ",", "style", ")", "self", ".", "showing", "=", "False" ]
Create the underlying widget. A dialog is not a subclass of view, hence we don't set name as widget or children will try to use it as their parent.
[ "Create", "the", "underlying", "widget", "." ]
python
train
36.272727
cloudera/impyla
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
https://github.com/cloudera/impyla/blob/547fa2ba3b6151e2a98b3544301471a643212dc3/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L5147-L5155
def revoke_role(self, role_name, principal_name, principal_type): """ Parameters: - role_name - principal_name - principal_type """ self.send_revoke_role(role_name, principal_name, principal_type) return self.recv_revoke_role()
[ "def", "revoke_role", "(", "self", ",", "role_name", ",", "principal_name", ",", "principal_type", ")", ":", "self", ".", "send_revoke_role", "(", "role_name", ",", "principal_name", ",", "principal_type", ")", "return", "self", ".", "recv_revoke_role", "(", ")" ]
Parameters: - role_name - principal_name - principal_type
[ "Parameters", ":", "-", "role_name", "-", "principal_name", "-", "principal_type" ]
python
train
28.222222
python-diamond/Diamond
src/collectors/endecadgraph/endecadgraph.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/endecadgraph/endecadgraph.py#L61-L72
def get_default_config(self): """ Returns the default collector settings """ config = super(EndecaDgraphCollector, self).get_default_config() config.update({ 'path': 'endeca.dgraph', 'host': 'localhost', 'port': 8080, 'timeout': 1, }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "EndecaDgraphCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'path'", ":", "'endeca.dgraph'", ",", "'host'", ":", "'localhost'", ",", "'port'", ":", "8080", ",", "'timeout'", ":", "1", ",", "}", ")", "return", "config" ]
Returns the default collector settings
[ "Returns", "the", "default", "collector", "settings" ]
python
train
28.416667
senaite/senaite.core
bika/lims/utils/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/utils/__init__.py#L754-L763
def get_registry_value(key, default=None): """ Gets the utility for IRegistry and returns the value for the key passed in. If there is no value for the key passed in, returns default value :param key: the key in the registry to look for :param default: default value if the key is not registered :return: value in the registry for the key passed in """ registry = queryUtility(IRegistry) return registry.get(key, default)
[ "def", "get_registry_value", "(", "key", ",", "default", "=", "None", ")", ":", "registry", "=", "queryUtility", "(", "IRegistry", ")", "return", "registry", ".", "get", "(", "key", ",", "default", ")" ]
Gets the utility for IRegistry and returns the value for the key passed in. If there is no value for the key passed in, returns default value :param key: the key in the registry to look for :param default: default value if the key is not registered :return: value in the registry for the key passed in
[ "Gets", "the", "utility", "for", "IRegistry", "and", "returns", "the", "value", "for", "the", "key", "passed", "in", ".", "If", "there", "is", "no", "value", "for", "the", "key", "passed", "in", "returns", "default", "value", ":", "param", "key", ":", "the", "key", "in", "the", "registry", "to", "look", "for", ":", "param", "default", ":", "default", "value", "if", "the", "key", "is", "not", "registered", ":", "return", ":", "value", "in", "the", "registry", "for", "the", "key", "passed", "in" ]
python
train
44.8
iotile/coretools
iotileemulate/iotile/emulate/virtual/emulated_tile.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/virtual/emulated_tile.py#L446-L479
def parse_size_name(type_name): """Calculate size and encoding from a type name. This method takes a C-style type string like uint8_t[10] and returns - the total size in bytes - the unit size of each member (if it's an array) - the scruct.{pack,unpack} format code for decoding the base type - whether it is an array. """ if ' ' in type_name: raise ArgumentError("There should not be a space in config variable type specifier", specifier=type_name) variable = False count = 1 base_type = type_name if type_name[-1] == ']': variable = True start_index = type_name.find('[') if start_index == -1: raise ArgumentError("Could not find matching [ for ] character", specifier=type_name) count = int(type_name[start_index+1:-1], 0) base_type = type_name[:start_index] matched_type = TYPE_CODES.get(base_type) if matched_type is None: raise ArgumentError("Could not find base type name", base_type=base_type, type_string=type_name) base_size = struct.calcsize("<%s" % matched_type) total_size = base_size*count return total_size, base_size, matched_type, variable
[ "def", "parse_size_name", "(", "type_name", ")", ":", "if", "' '", "in", "type_name", ":", "raise", "ArgumentError", "(", "\"There should not be a space in config variable type specifier\"", ",", "specifier", "=", "type_name", ")", "variable", "=", "False", "count", "=", "1", "base_type", "=", "type_name", "if", "type_name", "[", "-", "1", "]", "==", "']'", ":", "variable", "=", "True", "start_index", "=", "type_name", ".", "find", "(", "'['", ")", "if", "start_index", "==", "-", "1", ":", "raise", "ArgumentError", "(", "\"Could not find matching [ for ] character\"", ",", "specifier", "=", "type_name", ")", "count", "=", "int", "(", "type_name", "[", "start_index", "+", "1", ":", "-", "1", "]", ",", "0", ")", "base_type", "=", "type_name", "[", ":", "start_index", "]", "matched_type", "=", "TYPE_CODES", ".", "get", "(", "base_type", ")", "if", "matched_type", "is", "None", ":", "raise", "ArgumentError", "(", "\"Could not find base type name\"", ",", "base_type", "=", "base_type", ",", "type_string", "=", "type_name", ")", "base_size", "=", "struct", ".", "calcsize", "(", "\"<%s\"", "%", "matched_type", ")", "total_size", "=", "base_size", "*", "count", "return", "total_size", ",", "base_size", ",", "matched_type", ",", "variable" ]
Calculate size and encoding from a type name. This method takes a C-style type string like uint8_t[10] and returns - the total size in bytes - the unit size of each member (if it's an array) - the scruct.{pack,unpack} format code for decoding the base type - whether it is an array.
[ "Calculate", "size", "and", "encoding", "from", "a", "type", "name", "." ]
python
train
34.294118
drdoctr/doctr
doctr/local.py
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L86-L109
def encrypt_to_file(contents, filename): """ Encrypts ``contents`` and writes it to ``filename``. ``contents`` should be a bytes string. ``filename`` should end with ``.enc``. Returns the secret key used for the encryption. Decrypt the file with :func:`doctr.travis.decrypt_file`. """ if not filename.endswith('.enc'): raise ValueError("%s does not end with .enc" % filename) key = Fernet.generate_key() fer = Fernet(key) encrypted_file = fer.encrypt(contents) with open(filename, 'wb') as f: f.write(encrypted_file) return key
[ "def", "encrypt_to_file", "(", "contents", ",", "filename", ")", ":", "if", "not", "filename", ".", "endswith", "(", "'.enc'", ")", ":", "raise", "ValueError", "(", "\"%s does not end with .enc\"", "%", "filename", ")", "key", "=", "Fernet", ".", "generate_key", "(", ")", "fer", "=", "Fernet", "(", "key", ")", "encrypted_file", "=", "fer", ".", "encrypt", "(", "contents", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "encrypted_file", ")", "return", "key" ]
Encrypts ``contents`` and writes it to ``filename``. ``contents`` should be a bytes string. ``filename`` should end with ``.enc``. Returns the secret key used for the encryption. Decrypt the file with :func:`doctr.travis.decrypt_file`.
[ "Encrypts", "contents", "and", "writes", "it", "to", "filename", "." ]
python
train
24.166667
saltstack/salt
salt/modules/yumpkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/yumpkg.py#L156-L170
def _call_yum(args, **kwargs): ''' Call yum/dnf. ''' params = {'output_loglevel': 'trace', 'python_shell': False, 'env': salt.utils.environment.get_module_environment(globals())} params.update(kwargs) cmd = [] if salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True): cmd.extend(['systemd-run', '--scope']) cmd.append(_yum()) cmd.extend(args) return __salt__['cmd.run_all'](cmd, **params)
[ "def", "_call_yum", "(", "args", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'output_loglevel'", ":", "'trace'", ",", "'python_shell'", ":", "False", ",", "'env'", ":", "salt", ".", "utils", ".", "environment", ".", "get_module_environment", "(", "globals", "(", ")", ")", "}", "params", ".", "update", "(", "kwargs", ")", "cmd", "=", "[", "]", "if", "salt", ".", "utils", ".", "systemd", ".", "has_scope", "(", "__context__", ")", "and", "__salt__", "[", "'config.get'", "]", "(", "'systemd.scope'", ",", "True", ")", ":", "cmd", ".", "extend", "(", "[", "'systemd-run'", ",", "'--scope'", "]", ")", "cmd", ".", "append", "(", "_yum", "(", ")", ")", "cmd", ".", "extend", "(", "args", ")", "return", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "*", "*", "params", ")" ]
Call yum/dnf.
[ "Call", "yum", "/", "dnf", "." ]
python
train
32.6
mlperf/training
rnn_translator/pytorch/seq2seq/data/dataset.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/rnn_translator/pytorch/seq2seq/data/dataset.py#L137-L155
def filter_data(self, min_len, max_len): """ Preserves only samples which satisfy the following inequality: min_len <= sample sequence length <= max_len :param min_len: minimum sequence length :param max_len: maximum sequence length """ logging.info(f'Filtering data, min len: {min_len}, max len: {max_len}') initial_len = len(self.src) filtered_src = [] for src in self.src: if min_len <= len(src) <= max_len: filtered_src.append(src) self.src = filtered_src filtered_len = len(self.src) logging.info(f'Pairs before: {initial_len}, after: {filtered_len}')
[ "def", "filter_data", "(", "self", ",", "min_len", ",", "max_len", ")", ":", "logging", ".", "info", "(", "f'Filtering data, min len: {min_len}, max len: {max_len}'", ")", "initial_len", "=", "len", "(", "self", ".", "src", ")", "filtered_src", "=", "[", "]", "for", "src", "in", "self", ".", "src", ":", "if", "min_len", "<=", "len", "(", "src", ")", "<=", "max_len", ":", "filtered_src", ".", "append", "(", "src", ")", "self", ".", "src", "=", "filtered_src", "filtered_len", "=", "len", "(", "self", ".", "src", ")", "logging", ".", "info", "(", "f'Pairs before: {initial_len}, after: {filtered_len}'", ")" ]
Preserves only samples which satisfy the following inequality: min_len <= sample sequence length <= max_len :param min_len: minimum sequence length :param max_len: maximum sequence length
[ "Preserves", "only", "samples", "which", "satisfy", "the", "following", "inequality", ":", "min_len", "<", "=", "sample", "sequence", "length", "<", "=", "max_len" ]
python
train
35.631579
floydhub/floyd-cli
floyd/cli/data.py
https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/cli/data.py#L148-L196
def clone(id, path): """ - Download all files in a dataset or from a Job output Eg: alice/projects/mnist/1/files, alice/projects/mnist/1/output or alice/dataset/mnist-data/1/ Using /output will download the files that are saved at the end of the job. Note: This will download the files that are saved at the end of the job. - Download a directory from a dataset or from Job output Specify the path to a directory and download all its files and subdirectories. Eg: --path models/checkpoint1 """ data_source = get_data_object(id, use_data_config=False) if not data_source: if 'output' in id: floyd_logger.info("Note: You cannot clone the output of a running job. You need to wait for it to finish.") sys.exit() if path: # Download a directory from Dataset or Files # Get the type of data resource from the id (foo/projects/bar/ or foo/datasets/bar/) if '/datasets/' in id: resource_type = 'data' resource_id = data_source.id else: resource_type = 'files' try: experiment = ExperimentClient().get(normalize_job_name(id, use_config=False)) except FloydException: experiment = ExperimentClient().get(id) resource_id = experiment.id data_url = "{}/api/v1/download/artifacts/{}/{}?is_dir=true&path={}".format(floyd.floyd_host, resource_type, resource_id, path) else: # Download the full Dataset data_url = "{}/api/v1/resources/{}?content=true&download=true".format(floyd.floyd_host, data_source.resource_id) DataClient().download_tar(url=data_url, untar=True, delete_after_untar=True)
[ "def", "clone", "(", "id", ",", "path", ")", ":", "data_source", "=", "get_data_object", "(", "id", ",", "use_data_config", "=", "False", ")", "if", "not", "data_source", ":", "if", "'output'", "in", "id", ":", "floyd_logger", ".", "info", "(", "\"Note: You cannot clone the output of a running job. You need to wait for it to finish.\"", ")", "sys", ".", "exit", "(", ")", "if", "path", ":", "# Download a directory from Dataset or Files", "# Get the type of data resource from the id (foo/projects/bar/ or foo/datasets/bar/)", "if", "'/datasets/'", "in", "id", ":", "resource_type", "=", "'data'", "resource_id", "=", "data_source", ".", "id", "else", ":", "resource_type", "=", "'files'", "try", ":", "experiment", "=", "ExperimentClient", "(", ")", ".", "get", "(", "normalize_job_name", "(", "id", ",", "use_config", "=", "False", ")", ")", "except", "FloydException", ":", "experiment", "=", "ExperimentClient", "(", ")", ".", "get", "(", "id", ")", "resource_id", "=", "experiment", ".", "id", "data_url", "=", "\"{}/api/v1/download/artifacts/{}/{}?is_dir=true&path={}\"", ".", "format", "(", "floyd", ".", "floyd_host", ",", "resource_type", ",", "resource_id", ",", "path", ")", "else", ":", "# Download the full Dataset", "data_url", "=", "\"{}/api/v1/resources/{}?content=true&download=true\"", ".", "format", "(", "floyd", ".", "floyd_host", ",", "data_source", ".", "resource_id", ")", "DataClient", "(", ")", ".", "download_tar", "(", "url", "=", "data_url", ",", "untar", "=", "True", ",", "delete_after_untar", "=", "True", ")" ]
- Download all files in a dataset or from a Job output Eg: alice/projects/mnist/1/files, alice/projects/mnist/1/output or alice/dataset/mnist-data/1/ Using /output will download the files that are saved at the end of the job. Note: This will download the files that are saved at the end of the job. - Download a directory from a dataset or from Job output Specify the path to a directory and download all its files and subdirectories. Eg: --path models/checkpoint1
[ "-", "Download", "all", "files", "in", "a", "dataset", "or", "from", "a", "Job", "output" ]
python
train
42.306122
ND-CSE-30151/tock
tock/grammars.py
https://github.com/ND-CSE-30151/tock/blob/b8d21901aaf0e6ac913c2afa855f5b5a882a16c6/tock/grammars.py#L76-L119
def remove_useless(self): """Returns a new grammar containing just useful rules.""" if not self.is_contextfree(): raise ValueError("grammar must be context-free") by_lhs = collections.defaultdict(list) by_rhs = collections.defaultdict(list) for [lhs], rhs in self.rules: by_lhs[lhs].append((lhs, rhs)) for y in rhs: if y in self.nonterminals: by_rhs[y].append((lhs, rhs)) agenda = collections.deque([self.start]) reachable = set() while len(agenda) > 0: x = agenda.popleft() if x in reachable: continue reachable.add(x) for _, rhs in by_lhs[x]: for y in rhs: if y in by_lhs: agenda.append(y) agenda = collections.deque() productive = set() for [lhs], rhs in self.rules: if all(y not in self.nonterminals for y in rhs): agenda.append(lhs) while len(agenda) > 0: y = agenda.popleft() if y in productive: continue productive.add(y) for lhs, rhs in by_rhs[y]: if all(y not in self.nonterminals or y in productive for y in rhs): agenda.append(lhs) g = Grammar() g.set_start(self.start) for [lhs], rhs in self.rules: if (lhs in reachable & productive and all(y not in self.nonterminals or y in reachable & productive for y in rhs)): g.add_rule([lhs], rhs) return g
[ "def", "remove_useless", "(", "self", ")", ":", "if", "not", "self", ".", "is_contextfree", "(", ")", ":", "raise", "ValueError", "(", "\"grammar must be context-free\"", ")", "by_lhs", "=", "collections", ".", "defaultdict", "(", "list", ")", "by_rhs", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "[", "lhs", "]", ",", "rhs", "in", "self", ".", "rules", ":", "by_lhs", "[", "lhs", "]", ".", "append", "(", "(", "lhs", ",", "rhs", ")", ")", "for", "y", "in", "rhs", ":", "if", "y", "in", "self", ".", "nonterminals", ":", "by_rhs", "[", "y", "]", ".", "append", "(", "(", "lhs", ",", "rhs", ")", ")", "agenda", "=", "collections", ".", "deque", "(", "[", "self", ".", "start", "]", ")", "reachable", "=", "set", "(", ")", "while", "len", "(", "agenda", ")", ">", "0", ":", "x", "=", "agenda", ".", "popleft", "(", ")", "if", "x", "in", "reachable", ":", "continue", "reachable", ".", "add", "(", "x", ")", "for", "_", ",", "rhs", "in", "by_lhs", "[", "x", "]", ":", "for", "y", "in", "rhs", ":", "if", "y", "in", "by_lhs", ":", "agenda", ".", "append", "(", "y", ")", "agenda", "=", "collections", ".", "deque", "(", ")", "productive", "=", "set", "(", ")", "for", "[", "lhs", "]", ",", "rhs", "in", "self", ".", "rules", ":", "if", "all", "(", "y", "not", "in", "self", ".", "nonterminals", "for", "y", "in", "rhs", ")", ":", "agenda", ".", "append", "(", "lhs", ")", "while", "len", "(", "agenda", ")", ">", "0", ":", "y", "=", "agenda", ".", "popleft", "(", ")", "if", "y", "in", "productive", ":", "continue", "productive", ".", "add", "(", "y", ")", "for", "lhs", ",", "rhs", "in", "by_rhs", "[", "y", "]", ":", "if", "all", "(", "y", "not", "in", "self", ".", "nonterminals", "or", "y", "in", "productive", "for", "y", "in", "rhs", ")", ":", "agenda", ".", "append", "(", "lhs", ")", "g", "=", "Grammar", "(", ")", "g", ".", "set_start", "(", "self", ".", "start", ")", "for", "[", "lhs", "]", ",", "rhs", "in", "self", ".", "rules", ":", "if", "(", "lhs", "in", "reachable", "&", "productive", "and", "all", "(", "y", "not", "in", "self", ".", "nonterminals", "or", "y", "in", "reachable", "&", "productive", "for", "y", "in", "rhs", ")", ")", ":", "g", ".", "add_rule", "(", "[", "lhs", "]", ",", "rhs", ")", "return", "g" ]
Returns a new grammar containing just useful rules.
[ "Returns", "a", "new", "grammar", "containing", "just", "useful", "rules", "." ]
python
train
36.25
Chilipp/psyplot
psyplot/project.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/project.py#L1135-L1322
def load_project(cls, fname, auto_update=None, make_plot=True, draw=False, alternative_axes=None, main=False, encoding=None, enable_post=False, new_fig=True, clear=None, **kwargs): """ Load a project from a file or dict This classmethod allows to load a project that has been stored using the :meth:`save_project` method and reads all the data and creates the figures. Since the data is stored in external files when saving a project, make sure that the data is accessible under the relative paths as stored in the file `fname` or from the current working directory if `fname` is a dictionary. Alternatively use the `alternative_paths` parameter or the `pwd` parameter Parameters ---------- fname: str or dict The string might be the path to a file created with the :meth:`save_project` method, or it might be a dictionary from this method %(InteractiveBase.parameters.auto_update)s %(Project._add_data.parameters.make_plot)s %(InteractiveBase.start_update.parameters.draw)s alternative_axes: dict, None or list alternative axes instances to use - If it is None, the axes and figures from the saving point will be reproduced. - a dictionary should map from array names in the created project to matplotlib axes instances - a list should contain axes instances that will be used for iteration main: bool, optional If True, a new main project is created and returned. Otherwise (by default default) the data is added to the current main project. encoding: str The encoding to use for loading the project. If None, it is automatically determined by pickle. Note: Set this to ``'latin1'`` if using a project created with python2 on python3. enable_post: bool If True, the :attr:`~psyplot.plotter.Plotter.post` formatoption is enabled and post processing scripts are allowed. Do only set this parameter to ``True`` if you know you can trust the information in `fname` new_fig: bool If True (default) and `alternative_axes` is None, new figures are created if the figure already exists %(Project._add_data.parameters.clear)s pwd: str or None, optional Path to the working directory from where the data can be imported. If None and `fname` is the path to a file, `pwd` is set to the directory of this file. Otherwise the current working directory is used. %(ArrayList.from_dict.parameters.no_d|pwd)s Other Parameters ---------------- %(ArrayList.from_dict.parameters)s Returns ------- Project The project in state of the saving point""" from pkg_resources import iter_entry_points def get_ax_base(name, alternatives): ax_base = next(iter(obj(arr_name=name).axes), None) if ax_base is None: ax_base = next(iter(obj(arr_name=alternatives).axes), None) if ax_base is not None: alternatives.difference_update(obj(ax=ax_base).arr_names) return ax_base pwd = kwargs.pop('pwd', None) if isinstance(fname, six.string_types): with open(fname, 'rb') as f: pickle_kws = {} if not encoding else {'encoding': encoding} d = pickle.load(f, **pickle_kws) pwd = pwd or os.path.dirname(fname) else: d = dict(fname) pwd = pwd or getcwd() # check for patches of plugins for ep in iter_entry_points('psyplot', name='patches'): patches = ep.load() for arr_d in d.get('arrays').values(): plotter_cls = arr_d.get('plotter', {}).get('cls') if plotter_cls is not None and plotter_cls in patches: # apply the patch patches[plotter_cls](arr_d['plotter'], d.get('versions', {})) fig_map = {} if alternative_axes is None: for fig_dict in six.itervalues(d.get('figs', {})): orig_num = fig_dict.get('num') or 1 fig_map[orig_num] = _ProjectLoader.load_figure( fig_dict, new_fig=new_fig).number elif not isinstance(alternative_axes, dict): alternative_axes = cycle(iter(alternative_axes)) obj = cls.from_dict(d['arrays'], pwd=pwd, **kwargs) if main: # we create a new project with the project factory to make sure # that everything is handled correctly obj = project(None, obj) axes = {} arr_names = obj.arr_names sharex = defaultdict(set) sharey = defaultdict(set) for arr, (arr_name, arr_dict) in zip( obj, filter(lambda t: t[0] in arr_names, six.iteritems(d['arrays']))): if not arr_dict.get('plotter'): continue plot_dict = arr_dict['plotter'] plotter_cls = getattr( import_module(plot_dict['cls'][0]), plot_dict['cls'][1]) ax = None if alternative_axes is not None: if isinstance(alternative_axes, dict): ax = alternative_axes.get(arr.arr_name) else: ax = next(alternative_axes, None) if ax is None and 'ax' in plot_dict: already_opened = plot_dict['ax'].get( 'shared', set()).intersection(axes) if already_opened: ax = axes[next(iter(already_opened))] else: plot_dict['ax'].pop('shared', None) plot_dict['ax']['fig'] = fig_map[ plot_dict['ax'].get('fig') or 1] if plot_dict['ax'].get('sharex'): sharex[plot_dict['ax'].pop('sharex')].add( arr.psy.arr_name) if plot_dict['ax'].get('sharey'): sharey[plot_dict['ax'].pop('sharey')].add( arr.psy.arr_name) axes[arr.psy.arr_name] = ax = _ProjectLoader.load_axes( plot_dict['ax']) plotter_cls( arr, make_plot=False, draw=False, clear=False, ax=ax, project=obj.main, enable_post=enable_post, **plot_dict['fmt']) # handle shared x and y-axes for key, names in sharex.items(): ax_base = get_ax_base(key, names) if ax_base is not None: ax_base.get_shared_x_axes().join( ax_base, *obj(arr_name=names).axes) for ax in obj(arr_name=names).axes: ax._sharex = ax_base for key, names in sharey.items(): ax_base = get_ax_base(key, names) if ax_base is not None: ax_base.get_shared_y_axes().join( ax_base, *obj(arr_name=names).axes) for ax in obj(arr_name=names).axes: ax._sharey = ax_base for arr in obj.with_plotter: shared = d['arrays'][arr.psy.arr_name]['plotter'].get('shared', {}) for key, arr_names in six.iteritems(shared): arr.psy.plotter.share(obj(arr_name=arr_names).plotters, keys=[key]) if make_plot: for plotter in obj.plotters: plotter.reinit( draw=False, clear=clear or ( clear is None and plotter_cls._get_sample_projection() is not None)) if draw is None: draw = rcParams['auto_draw'] if draw: obj.draw() if rcParams['auto_show']: obj.show() if auto_update is None: auto_update = rcParams['lists.auto_update'] if not main: obj._main = gcp(True) obj.main.extend(obj, new_name=True) obj.no_auto_update = not auto_update scp(obj) return obj
[ "def", "load_project", "(", "cls", ",", "fname", ",", "auto_update", "=", "None", ",", "make_plot", "=", "True", ",", "draw", "=", "False", ",", "alternative_axes", "=", "None", ",", "main", "=", "False", ",", "encoding", "=", "None", ",", "enable_post", "=", "False", ",", "new_fig", "=", "True", ",", "clear", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "pkg_resources", "import", "iter_entry_points", "def", "get_ax_base", "(", "name", ",", "alternatives", ")", ":", "ax_base", "=", "next", "(", "iter", "(", "obj", "(", "arr_name", "=", "name", ")", ".", "axes", ")", ",", "None", ")", "if", "ax_base", "is", "None", ":", "ax_base", "=", "next", "(", "iter", "(", "obj", "(", "arr_name", "=", "alternatives", ")", ".", "axes", ")", ",", "None", ")", "if", "ax_base", "is", "not", "None", ":", "alternatives", ".", "difference_update", "(", "obj", "(", "ax", "=", "ax_base", ")", ".", "arr_names", ")", "return", "ax_base", "pwd", "=", "kwargs", ".", "pop", "(", "'pwd'", ",", "None", ")", "if", "isinstance", "(", "fname", ",", "six", ".", "string_types", ")", ":", "with", "open", "(", "fname", ",", "'rb'", ")", "as", "f", ":", "pickle_kws", "=", "{", "}", "if", "not", "encoding", "else", "{", "'encoding'", ":", "encoding", "}", "d", "=", "pickle", ".", "load", "(", "f", ",", "*", "*", "pickle_kws", ")", "pwd", "=", "pwd", "or", "os", ".", "path", ".", "dirname", "(", "fname", ")", "else", ":", "d", "=", "dict", "(", "fname", ")", "pwd", "=", "pwd", "or", "getcwd", "(", ")", "# check for patches of plugins", "for", "ep", "in", "iter_entry_points", "(", "'psyplot'", ",", "name", "=", "'patches'", ")", ":", "patches", "=", "ep", ".", "load", "(", ")", "for", "arr_d", "in", "d", ".", "get", "(", "'arrays'", ")", ".", "values", "(", ")", ":", "plotter_cls", "=", "arr_d", ".", "get", "(", "'plotter'", ",", "{", "}", ")", ".", "get", "(", "'cls'", ")", "if", "plotter_cls", "is", "not", "None", "and", "plotter_cls", "in", "patches", ":", "# apply the patch", "patches", "[", "plotter_cls", "]", "(", "arr_d", "[", "'plotter'", "]", ",", "d", ".", "get", "(", "'versions'", ",", "{", "}", ")", ")", "fig_map", "=", "{", "}", "if", "alternative_axes", "is", "None", ":", "for", "fig_dict", "in", "six", ".", "itervalues", "(", "d", ".", "get", "(", "'figs'", ",", "{", "}", ")", ")", ":", "orig_num", "=", "fig_dict", ".", "get", "(", "'num'", ")", "or", "1", "fig_map", "[", "orig_num", "]", "=", "_ProjectLoader", ".", "load_figure", "(", "fig_dict", ",", "new_fig", "=", "new_fig", ")", ".", "number", "elif", "not", "isinstance", "(", "alternative_axes", ",", "dict", ")", ":", "alternative_axes", "=", "cycle", "(", "iter", "(", "alternative_axes", ")", ")", "obj", "=", "cls", ".", "from_dict", "(", "d", "[", "'arrays'", "]", ",", "pwd", "=", "pwd", ",", "*", "*", "kwargs", ")", "if", "main", ":", "# we create a new project with the project factory to make sure", "# that everything is handled correctly", "obj", "=", "project", "(", "None", ",", "obj", ")", "axes", "=", "{", "}", "arr_names", "=", "obj", ".", "arr_names", "sharex", "=", "defaultdict", "(", "set", ")", "sharey", "=", "defaultdict", "(", "set", ")", "for", "arr", ",", "(", "arr_name", ",", "arr_dict", ")", "in", "zip", "(", "obj", ",", "filter", "(", "lambda", "t", ":", "t", "[", "0", "]", "in", "arr_names", ",", "six", ".", "iteritems", "(", "d", "[", "'arrays'", "]", ")", ")", ")", ":", "if", "not", "arr_dict", ".", "get", "(", "'plotter'", ")", ":", "continue", "plot_dict", "=", "arr_dict", "[", "'plotter'", "]", "plotter_cls", "=", "getattr", "(", "import_module", "(", "plot_dict", "[", "'cls'", "]", "[", "0", "]", ")", ",", "plot_dict", "[", "'cls'", "]", "[", "1", "]", ")", "ax", "=", "None", "if", "alternative_axes", "is", "not", "None", ":", "if", "isinstance", "(", "alternative_axes", ",", "dict", ")", ":", "ax", "=", "alternative_axes", ".", "get", "(", "arr", ".", "arr_name", ")", "else", ":", "ax", "=", "next", "(", "alternative_axes", ",", "None", ")", "if", "ax", "is", "None", "and", "'ax'", "in", "plot_dict", ":", "already_opened", "=", "plot_dict", "[", "'ax'", "]", ".", "get", "(", "'shared'", ",", "set", "(", ")", ")", ".", "intersection", "(", "axes", ")", "if", "already_opened", ":", "ax", "=", "axes", "[", "next", "(", "iter", "(", "already_opened", ")", ")", "]", "else", ":", "plot_dict", "[", "'ax'", "]", ".", "pop", "(", "'shared'", ",", "None", ")", "plot_dict", "[", "'ax'", "]", "[", "'fig'", "]", "=", "fig_map", "[", "plot_dict", "[", "'ax'", "]", ".", "get", "(", "'fig'", ")", "or", "1", "]", "if", "plot_dict", "[", "'ax'", "]", ".", "get", "(", "'sharex'", ")", ":", "sharex", "[", "plot_dict", "[", "'ax'", "]", ".", "pop", "(", "'sharex'", ")", "]", ".", "add", "(", "arr", ".", "psy", ".", "arr_name", ")", "if", "plot_dict", "[", "'ax'", "]", ".", "get", "(", "'sharey'", ")", ":", "sharey", "[", "plot_dict", "[", "'ax'", "]", ".", "pop", "(", "'sharey'", ")", "]", ".", "add", "(", "arr", ".", "psy", ".", "arr_name", ")", "axes", "[", "arr", ".", "psy", ".", "arr_name", "]", "=", "ax", "=", "_ProjectLoader", ".", "load_axes", "(", "plot_dict", "[", "'ax'", "]", ")", "plotter_cls", "(", "arr", ",", "make_plot", "=", "False", ",", "draw", "=", "False", ",", "clear", "=", "False", ",", "ax", "=", "ax", ",", "project", "=", "obj", ".", "main", ",", "enable_post", "=", "enable_post", ",", "*", "*", "plot_dict", "[", "'fmt'", "]", ")", "# handle shared x and y-axes", "for", "key", ",", "names", "in", "sharex", ".", "items", "(", ")", ":", "ax_base", "=", "get_ax_base", "(", "key", ",", "names", ")", "if", "ax_base", "is", "not", "None", ":", "ax_base", ".", "get_shared_x_axes", "(", ")", ".", "join", "(", "ax_base", ",", "*", "obj", "(", "arr_name", "=", "names", ")", ".", "axes", ")", "for", "ax", "in", "obj", "(", "arr_name", "=", "names", ")", ".", "axes", ":", "ax", ".", "_sharex", "=", "ax_base", "for", "key", ",", "names", "in", "sharey", ".", "items", "(", ")", ":", "ax_base", "=", "get_ax_base", "(", "key", ",", "names", ")", "if", "ax_base", "is", "not", "None", ":", "ax_base", ".", "get_shared_y_axes", "(", ")", ".", "join", "(", "ax_base", ",", "*", "obj", "(", "arr_name", "=", "names", ")", ".", "axes", ")", "for", "ax", "in", "obj", "(", "arr_name", "=", "names", ")", ".", "axes", ":", "ax", ".", "_sharey", "=", "ax_base", "for", "arr", "in", "obj", ".", "with_plotter", ":", "shared", "=", "d", "[", "'arrays'", "]", "[", "arr", ".", "psy", ".", "arr_name", "]", "[", "'plotter'", "]", ".", "get", "(", "'shared'", ",", "{", "}", ")", "for", "key", ",", "arr_names", "in", "six", ".", "iteritems", "(", "shared", ")", ":", "arr", ".", "psy", ".", "plotter", ".", "share", "(", "obj", "(", "arr_name", "=", "arr_names", ")", ".", "plotters", ",", "keys", "=", "[", "key", "]", ")", "if", "make_plot", ":", "for", "plotter", "in", "obj", ".", "plotters", ":", "plotter", ".", "reinit", "(", "draw", "=", "False", ",", "clear", "=", "clear", "or", "(", "clear", "is", "None", "and", "plotter_cls", ".", "_get_sample_projection", "(", ")", "is", "not", "None", ")", ")", "if", "draw", "is", "None", ":", "draw", "=", "rcParams", "[", "'auto_draw'", "]", "if", "draw", ":", "obj", ".", "draw", "(", ")", "if", "rcParams", "[", "'auto_show'", "]", ":", "obj", ".", "show", "(", ")", "if", "auto_update", "is", "None", ":", "auto_update", "=", "rcParams", "[", "'lists.auto_update'", "]", "if", "not", "main", ":", "obj", ".", "_main", "=", "gcp", "(", "True", ")", "obj", ".", "main", ".", "extend", "(", "obj", ",", "new_name", "=", "True", ")", "obj", ".", "no_auto_update", "=", "not", "auto_update", "scp", "(", "obj", ")", "return", "obj" ]
Load a project from a file or dict This classmethod allows to load a project that has been stored using the :meth:`save_project` method and reads all the data and creates the figures. Since the data is stored in external files when saving a project, make sure that the data is accessible under the relative paths as stored in the file `fname` or from the current working directory if `fname` is a dictionary. Alternatively use the `alternative_paths` parameter or the `pwd` parameter Parameters ---------- fname: str or dict The string might be the path to a file created with the :meth:`save_project` method, or it might be a dictionary from this method %(InteractiveBase.parameters.auto_update)s %(Project._add_data.parameters.make_plot)s %(InteractiveBase.start_update.parameters.draw)s alternative_axes: dict, None or list alternative axes instances to use - If it is None, the axes and figures from the saving point will be reproduced. - a dictionary should map from array names in the created project to matplotlib axes instances - a list should contain axes instances that will be used for iteration main: bool, optional If True, a new main project is created and returned. Otherwise (by default default) the data is added to the current main project. encoding: str The encoding to use for loading the project. If None, it is automatically determined by pickle. Note: Set this to ``'latin1'`` if using a project created with python2 on python3. enable_post: bool If True, the :attr:`~psyplot.plotter.Plotter.post` formatoption is enabled and post processing scripts are allowed. Do only set this parameter to ``True`` if you know you can trust the information in `fname` new_fig: bool If True (default) and `alternative_axes` is None, new figures are created if the figure already exists %(Project._add_data.parameters.clear)s pwd: str or None, optional Path to the working directory from where the data can be imported. If None and `fname` is the path to a file, `pwd` is set to the directory of this file. Otherwise the current working directory is used. %(ArrayList.from_dict.parameters.no_d|pwd)s Other Parameters ---------------- %(ArrayList.from_dict.parameters)s Returns ------- Project The project in state of the saving point
[ "Load", "a", "project", "from", "a", "file", "or", "dict" ]
python
train
44.489362
klmitch/turnstile
turnstile/tools.py
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L712-L803
def turnstile_command(conf_file, command, arguments=[], channel=None, debug=False): """ Issue a command to all running control daemons. :param conf_file: Name of the configuration file. :param command: The command to execute. Note that 'ping' is handled specially; in particular, the "channel" parameter is implied. (A random value will be used for the channel to listen on.) :param arguments: A list of arguments for the command. Note that the colon character (':') cannot be used. :param channel: If not None, specifies the name of a message channel to listen for responses on. Will wait indefinitely; to terminate the listening loop, use the keyboard interrupt sequence. :param debug: If True, debugging messages are emitted while sending the command. """ # Connect to the database... conf = config.Config(conf_file=conf_file) db = conf.get_database() control_channel = conf['control'].get('channel', 'control') # Now, set up the command command = command.lower() ts_conv = False if command == 'ping': # We handle 'ping' specially; first, figure out the channel if arguments: channel = arguments[0] else: channel = str(uuid.uuid4()) arguments = [channel] # Next, add on a timestamp if len(arguments) < 2: arguments.append(time.time()) ts_conv = True # Limit the argument list length arguments = arguments[:2] # OK, the command is all set up. Let us now send the command... if debug: cmd = [command] + arguments print >>sys.stderr, ("Issuing command: %s" % ' '.join(cmd)) database.command(db, control_channel, command, *arguments) # Were we asked to listen on a channel? if not channel: return # OK, let's subscribe to the channel... pubsub = db.pubsub() pubsub.subscribe(channel) # Now we listen... try: count = 0 for msg in pubsub.listen(): # Make sure the message is one we're interested in if debug: formatted = pprint.pformat(msg) print >>sys.stderr, "Received message: %s" % formatted if (msg['type'] not in ('pmessage', 'message') or msg['channel'] != channel): continue count += 1 # Figure out the response response = msg['data'].split(':') # If this is a 'pong' and ts_conv is true, add an RTT to # the response if ts_conv and response[0] == 'pong': try: rtt = (time.time() - float(response[2])) * 100 response.append('(RTT %.2fms)' % rtt) except Exception: # IndexError or ValueError, probably; ignore it pass # Print out the response print "Response % 5d: %s" % (count, ' '.join(response)) except KeyboardInterrupt: # We want to break out of the loop, but not return any error # to the caller... pass
[ "def", "turnstile_command", "(", "conf_file", ",", "command", ",", "arguments", "=", "[", "]", ",", "channel", "=", "None", ",", "debug", "=", "False", ")", ":", "# Connect to the database...", "conf", "=", "config", ".", "Config", "(", "conf_file", "=", "conf_file", ")", "db", "=", "conf", ".", "get_database", "(", ")", "control_channel", "=", "conf", "[", "'control'", "]", ".", "get", "(", "'channel'", ",", "'control'", ")", "# Now, set up the command", "command", "=", "command", ".", "lower", "(", ")", "ts_conv", "=", "False", "if", "command", "==", "'ping'", ":", "# We handle 'ping' specially; first, figure out the channel", "if", "arguments", ":", "channel", "=", "arguments", "[", "0", "]", "else", ":", "channel", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "arguments", "=", "[", "channel", "]", "# Next, add on a timestamp", "if", "len", "(", "arguments", ")", "<", "2", ":", "arguments", ".", "append", "(", "time", ".", "time", "(", ")", ")", "ts_conv", "=", "True", "# Limit the argument list length", "arguments", "=", "arguments", "[", ":", "2", "]", "# OK, the command is all set up. Let us now send the command...", "if", "debug", ":", "cmd", "=", "[", "command", "]", "+", "arguments", "print", ">>", "sys", ".", "stderr", ",", "(", "\"Issuing command: %s\"", "%", "' '", ".", "join", "(", "cmd", ")", ")", "database", ".", "command", "(", "db", ",", "control_channel", ",", "command", ",", "*", "arguments", ")", "# Were we asked to listen on a channel?", "if", "not", "channel", ":", "return", "# OK, let's subscribe to the channel...", "pubsub", "=", "db", ".", "pubsub", "(", ")", "pubsub", ".", "subscribe", "(", "channel", ")", "# Now we listen...", "try", ":", "count", "=", "0", "for", "msg", "in", "pubsub", ".", "listen", "(", ")", ":", "# Make sure the message is one we're interested in", "if", "debug", ":", "formatted", "=", "pprint", ".", "pformat", "(", "msg", ")", "print", ">>", "sys", ".", "stderr", ",", "\"Received message: %s\"", "%", "formatted", "if", "(", "msg", "[", "'type'", "]", "not", "in", "(", "'pmessage'", ",", "'message'", ")", "or", "msg", "[", "'channel'", "]", "!=", "channel", ")", ":", "continue", "count", "+=", "1", "# Figure out the response", "response", "=", "msg", "[", "'data'", "]", ".", "split", "(", "':'", ")", "# If this is a 'pong' and ts_conv is true, add an RTT to", "# the response", "if", "ts_conv", "and", "response", "[", "0", "]", "==", "'pong'", ":", "try", ":", "rtt", "=", "(", "time", ".", "time", "(", ")", "-", "float", "(", "response", "[", "2", "]", ")", ")", "*", "100", "response", ".", "append", "(", "'(RTT %.2fms)'", "%", "rtt", ")", "except", "Exception", ":", "# IndexError or ValueError, probably; ignore it", "pass", "# Print out the response", "print", "\"Response % 5d: %s\"", "%", "(", "count", ",", "' '", ".", "join", "(", "response", ")", ")", "except", "KeyboardInterrupt", ":", "# We want to break out of the loop, but not return any error", "# to the caller...", "pass" ]
Issue a command to all running control daemons. :param conf_file: Name of the configuration file. :param command: The command to execute. Note that 'ping' is handled specially; in particular, the "channel" parameter is implied. (A random value will be used for the channel to listen on.) :param arguments: A list of arguments for the command. Note that the colon character (':') cannot be used. :param channel: If not None, specifies the name of a message channel to listen for responses on. Will wait indefinitely; to terminate the listening loop, use the keyboard interrupt sequence. :param debug: If True, debugging messages are emitted while sending the command.
[ "Issue", "a", "command", "to", "all", "running", "control", "daemons", "." ]
python
train
35.336957
sendwithus/sendwithus_python
sendwithus/__init__.py
https://github.com/sendwithus/sendwithus_python/blob/8ae50d514febd44f7d9be3c838b4d92f99412832/sendwithus/__init__.py#L252-L254
def create_email(self, name, subject, html, text=''): """ [DECPRECATED] API call to create an email """ return self.create_template(name, subject, html, text)
[ "def", "create_email", "(", "self", ",", "name", ",", "subject", ",", "html", ",", "text", "=", "''", ")", ":", "return", "self", ".", "create_template", "(", "name", ",", "subject", ",", "html", ",", "text", ")" ]
[DECPRECATED] API call to create an email
[ "[", "DECPRECATED", "]", "API", "call", "to", "create", "an", "email" ]
python
valid
57.333333
Yubico/python-pyhsm
pyhsm/hmac_cmd.py
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/hmac_cmd.py#L110-L122
def _raw_pack(key_handle, flags, data): """ Common code for packing payload to YHSM_HMAC_SHA1_GENERATE command. """ # #define YHSM_HMAC_RESET 0x01 // Flag to indicate reset at first packet # #define YHSM_HMAC_FINAL 0x02 // Flag to indicate that the hash shall be calculated # typedef struct { # uint32_t keyHandle; // Key handle # uint8_t flags; // Flags # uint8_t numBytes; // Number of bytes in data packet # uint8_t data[YHSM_MAX_PKT_SIZE - 6]; // Data to be written # } YHSM_HMAC_SHA1_GENERATE_REQ; return struct.pack('<IBB', key_handle, flags, len(data)) + data
[ "def", "_raw_pack", "(", "key_handle", ",", "flags", ",", "data", ")", ":", "# #define YHSM_HMAC_RESET 0x01 // Flag to indicate reset at first packet", "# #define YHSM_HMAC_FINAL 0x02 // Flag to indicate that the hash shall be calculated", "# typedef struct {", "# uint32_t keyHandle; // Key handle", "# uint8_t flags; // Flags", "# uint8_t numBytes; // Number of bytes in data packet", "# uint8_t data[YHSM_MAX_PKT_SIZE - 6]; // Data to be written", "# } YHSM_HMAC_SHA1_GENERATE_REQ;", "return", "struct", ".", "pack", "(", "'<IBB'", ",", "key_handle", ",", "flags", ",", "len", "(", "data", ")", ")", "+", "data" ]
Common code for packing payload to YHSM_HMAC_SHA1_GENERATE command.
[ "Common", "code", "for", "packing", "payload", "to", "YHSM_HMAC_SHA1_GENERATE", "command", "." ]
python
train
53
GNS3/gns3-server
gns3server/compute/vmware/vmware_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vmware/vmware_vm.py#L399-L411
def _stop_ubridge_capture(self, adapter_number): """ Stop a packet capture in uBridge. :param adapter_number: adapter number """ vnet = "ethernet{}.vnet".format(adapter_number) if vnet not in self._vmx_pairs: raise VMwareError("vnet {} not in VMX file".format(vnet)) if not self._ubridge_hypervisor: raise VMwareError("Cannot stop the packet capture: uBridge is not running") yield from self._ubridge_send("bridge stop_capture {name}".format(name=vnet))
[ "def", "_stop_ubridge_capture", "(", "self", ",", "adapter_number", ")", ":", "vnet", "=", "\"ethernet{}.vnet\"", ".", "format", "(", "adapter_number", ")", "if", "vnet", "not", "in", "self", ".", "_vmx_pairs", ":", "raise", "VMwareError", "(", "\"vnet {} not in VMX file\"", ".", "format", "(", "vnet", ")", ")", "if", "not", "self", ".", "_ubridge_hypervisor", ":", "raise", "VMwareError", "(", "\"Cannot stop the packet capture: uBridge is not running\"", ")", "yield", "from", "self", ".", "_ubridge_send", "(", "\"bridge stop_capture {name}\"", ".", "format", "(", "name", "=", "vnet", ")", ")" ]
Stop a packet capture in uBridge. :param adapter_number: adapter number
[ "Stop", "a", "packet", "capture", "in", "uBridge", "." ]
python
train
40.846154
tensorflow/tensor2tensor
tensor2tensor/models/revnet.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L386-L400
def revnet_cifar_base(): """Tiny hparams suitable for CIFAR/etc.""" hparams = revnet_base() hparams.num_channels_init_block = 32 hparams.first_batch_norm = [False, True, True] hparams.init_stride = 1 hparams.init_kernel_size = 3 hparams.init_maxpool = False hparams.strides = [1, 2, 2] hparams.batch_size = 128 hparams.weight_decay = 1e-4 hparams.learning_rate = 0.1 hparams.learning_rate_cosine_cycle_steps = 5000 return hparams
[ "def", "revnet_cifar_base", "(", ")", ":", "hparams", "=", "revnet_base", "(", ")", "hparams", ".", "num_channels_init_block", "=", "32", "hparams", ".", "first_batch_norm", "=", "[", "False", ",", "True", ",", "True", "]", "hparams", ".", "init_stride", "=", "1", "hparams", ".", "init_kernel_size", "=", "3", "hparams", ".", "init_maxpool", "=", "False", "hparams", ".", "strides", "=", "[", "1", ",", "2", ",", "2", "]", "hparams", ".", "batch_size", "=", "128", "hparams", ".", "weight_decay", "=", "1e-4", "hparams", ".", "learning_rate", "=", "0.1", "hparams", ".", "learning_rate_cosine_cycle_steps", "=", "5000", "return", "hparams" ]
Tiny hparams suitable for CIFAR/etc.
[ "Tiny", "hparams", "suitable", "for", "CIFAR", "/", "etc", "." ]
python
train
29.466667
demurgos/py-pathmatch
pathmatch/gitmatch.py
https://github.com/demurgos/py-pathmatch/blob/70b3aa99ee34da8b80b6ec6340862b760159d2a1/pathmatch/gitmatch.py#L30-L69
def normalize_path(path, base_path=u'/', is_dir=None): u""" Normalize a path to use it with a gitmatch pattern. This ensures that the separators are forward slashes. If a path is rooted (starts with a slash), it has to be a subdirectory of `base_path`. The path root is then changed to be based of `base_path`. :type path: text_type :param path: A POSIX path to normalize :type base_path: text_type :param base_path: A POSIX path to the base directory, `path` must be inside `base_path`. :type is_dir: text_type :param is_dir: If `true`, adds a trailing slash. If `false` removes any trailing slash. If `None`, keeps the current ending. :return: """ path = posixpath.normpath(path) base_path = posixpath.normpath(base_path) if len(base_path) == 0: raise ValueError(u'`project_root` cannot be an empty string after normalization') if base_path[-1] != u'/': base_path += u'/' if path.startswith(base_path): path = u'/' + posixpath.relpath(path, base_path) elif path.startswith(u'/'): raise ValueError(u'`path` ({}) is absolute but not inside base_path ({})'.format(path, base_path)) if is_dir is None: return path elif is_dir and path[-1:] != u'/': return path + u'/' elif not is_dir and path[-1:] == u'/': return path[:-1] return path
[ "def", "normalize_path", "(", "path", ",", "base_path", "=", "u'/'", ",", "is_dir", "=", "None", ")", ":", "path", "=", "posixpath", ".", "normpath", "(", "path", ")", "base_path", "=", "posixpath", ".", "normpath", "(", "base_path", ")", "if", "len", "(", "base_path", ")", "==", "0", ":", "raise", "ValueError", "(", "u'`project_root` cannot be an empty string after normalization'", ")", "if", "base_path", "[", "-", "1", "]", "!=", "u'/'", ":", "base_path", "+=", "u'/'", "if", "path", ".", "startswith", "(", "base_path", ")", ":", "path", "=", "u'/'", "+", "posixpath", ".", "relpath", "(", "path", ",", "base_path", ")", "elif", "path", ".", "startswith", "(", "u'/'", ")", ":", "raise", "ValueError", "(", "u'`path` ({}) is absolute but not inside base_path ({})'", ".", "format", "(", "path", ",", "base_path", ")", ")", "if", "is_dir", "is", "None", ":", "return", "path", "elif", "is_dir", "and", "path", "[", "-", "1", ":", "]", "!=", "u'/'", ":", "return", "path", "+", "u'/'", "elif", "not", "is_dir", "and", "path", "[", "-", "1", ":", "]", "==", "u'/'", ":", "return", "path", "[", ":", "-", "1", "]", "return", "path" ]
u""" Normalize a path to use it with a gitmatch pattern. This ensures that the separators are forward slashes. If a path is rooted (starts with a slash), it has to be a subdirectory of `base_path`. The path root is then changed to be based of `base_path`. :type path: text_type :param path: A POSIX path to normalize :type base_path: text_type :param base_path: A POSIX path to the base directory, `path` must be inside `base_path`. :type is_dir: text_type :param is_dir: If `true`, adds a trailing slash. If `false` removes any trailing slash. If `None`, keeps the current ending. :return:
[ "u", "Normalize", "a", "path", "to", "use", "it", "with", "a", "gitmatch", "pattern", ".", "This", "ensures", "that", "the", "separators", "are", "forward", "slashes", ".", "If", "a", "path", "is", "rooted", "(", "starts", "with", "a", "slash", ")", "it", "has", "to", "be", "a", "subdirectory", "of", "base_path", ".", "The", "path", "root", "is", "then", "changed", "to", "be", "based", "of", "base_path", "." ]
python
train
36.425
cytoscape/py2cytoscape
py2cytoscape/cyrest/styles.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/styles.py#L46-L59
def getSingleVisualProperty(self, visualProperty, verbose=None): """ Return the Visual Property specified by the `visualProperty` parameter. Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html) :param visualProperty: ID of the Visual Property :param verbose: print more :returns: 200: successful operation """ response=api(url=self.___url+'styles/visualproperties/'+str(visualProperty)+'', method="GET", verbose=verbose, parse_params=False) return response
[ "def", "getSingleVisualProperty", "(", "self", ",", "visualProperty", ",", "verbose", "=", "None", ")", ":", "response", "=", "api", "(", "url", "=", "self", ".", "___url", "+", "'styles/visualproperties/'", "+", "str", "(", "visualProperty", ")", "+", "''", ",", "method", "=", "\"GET\"", ",", "verbose", "=", "verbose", ",", "parse_params", "=", "False", ")", "return", "response" ]
Return the Visual Property specified by the `visualProperty` parameter. Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html) :param visualProperty: ID of the Visual Property :param verbose: print more :returns: 200: successful operation
[ "Return", "the", "Visual", "Property", "specified", "by", "the", "visualProperty", "parameter", ".", "Additional", "details", "on", "common", "Visual", "Properties", "can", "be", "found", "in", "the", "[", "Basic", "Visual", "Lexicon", "JavaDoc", "API", "]", "(", "http", ":", "//", "chianti", ".", "ucsd", ".", "edu", "/", "cytoscape", "-", "3", ".", "6", ".", "1", "/", "API", "/", "org", "/", "cytoscape", "/", "view", "/", "presentation", "/", "property", "/", "BasicVisualLexicon", ".", "html", ")" ]
python
train
49
daethnir/authprogs
authprogs/authprogs.py
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L344-L368
def find_match_command(self, rule): """Return a matching (possibly munged) command, if found in rule.""" command_string = rule['command'] command_list = command_string.split() self.logdebug('comparing "%s" to "%s"\n' % (command_list, self.original_command_list)) if rule.get('allow_trailing_args'): self.logdebug('allow_trailing_args is true - comparing initial ' 'list.\n') # Verify the initial arguments are all the same if (self.original_command_list[:len(command_list)] == command_list): self.logdebug('initial list is same\n') return {'command': self.original_command_list} else: self.logdebug('initial list is not same\n') elif rule.get('pcre_match'): if re.search(command_string, self.original_command_string): return {'command': self.original_command_list} elif command_list == self.original_command_list: return {'command': command_list}
[ "def", "find_match_command", "(", "self", ",", "rule", ")", ":", "command_string", "=", "rule", "[", "'command'", "]", "command_list", "=", "command_string", ".", "split", "(", ")", "self", ".", "logdebug", "(", "'comparing \"%s\" to \"%s\"\\n'", "%", "(", "command_list", ",", "self", ".", "original_command_list", ")", ")", "if", "rule", ".", "get", "(", "'allow_trailing_args'", ")", ":", "self", ".", "logdebug", "(", "'allow_trailing_args is true - comparing initial '", "'list.\\n'", ")", "# Verify the initial arguments are all the same", "if", "(", "self", ".", "original_command_list", "[", ":", "len", "(", "command_list", ")", "]", "==", "command_list", ")", ":", "self", ".", "logdebug", "(", "'initial list is same\\n'", ")", "return", "{", "'command'", ":", "self", ".", "original_command_list", "}", "else", ":", "self", ".", "logdebug", "(", "'initial list is not same\\n'", ")", "elif", "rule", ".", "get", "(", "'pcre_match'", ")", ":", "if", "re", ".", "search", "(", "command_string", ",", "self", ".", "original_command_string", ")", ":", "return", "{", "'command'", ":", "self", ".", "original_command_list", "}", "elif", "command_list", "==", "self", ".", "original_command_list", ":", "return", "{", "'command'", ":", "command_list", "}" ]
Return a matching (possibly munged) command, if found in rule.
[ "Return", "a", "matching", "(", "possibly", "munged", ")", "command", "if", "found", "in", "rule", "." ]
python
train
43.44
junzis/pyModeS
pyModeS/decoder/adsb.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/adsb.py#L441-L465
def nac_v(msg): """Calculate NACv, Navigation Accuracy Category - Velocity Args: msg (string): 28 bytes hexadecimal message string, TC = 19 Returns: int or string: 95% horizontal accuracy bounds for velocity, Horizontal Figure of Merit int or string: 95% vertical accuracy bounds for velocity, Vertical Figure of Merit """ tc = typecode(msg) if tc != 19: raise RuntimeError("%s: Not an airborne velocity message, expecting TC = 19" % msg) msgbin = common.hex2bin(msg) NACv = common.bin2int(msgbin[42:45]) try: HFOMr = uncertainty.NACv[NACv]['HFOMr'] VFOMr = uncertainty.NACv[NACv]['VFOMr'] except KeyError: HFOMr, VFOMr = uncertainty.NA, uncertainty.NA return HFOMr, VFOMr
[ "def", "nac_v", "(", "msg", ")", ":", "tc", "=", "typecode", "(", "msg", ")", "if", "tc", "!=", "19", ":", "raise", "RuntimeError", "(", "\"%s: Not an airborne velocity message, expecting TC = 19\"", "%", "msg", ")", "msgbin", "=", "common", ".", "hex2bin", "(", "msg", ")", "NACv", "=", "common", ".", "bin2int", "(", "msgbin", "[", "42", ":", "45", "]", ")", "try", ":", "HFOMr", "=", "uncertainty", ".", "NACv", "[", "NACv", "]", "[", "'HFOMr'", "]", "VFOMr", "=", "uncertainty", ".", "NACv", "[", "NACv", "]", "[", "'VFOMr'", "]", "except", "KeyError", ":", "HFOMr", ",", "VFOMr", "=", "uncertainty", ".", "NA", ",", "uncertainty", ".", "NA", "return", "HFOMr", ",", "VFOMr" ]
Calculate NACv, Navigation Accuracy Category - Velocity Args: msg (string): 28 bytes hexadecimal message string, TC = 19 Returns: int or string: 95% horizontal accuracy bounds for velocity, Horizontal Figure of Merit int or string: 95% vertical accuracy bounds for velocity, Vertical Figure of Merit
[ "Calculate", "NACv", "Navigation", "Accuracy", "Category", "-", "Velocity" ]
python
train
30.16
pyBookshelf/bookshelf
bookshelf/api_v1.py
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v1.py#L361-L372
def create_server(cloud, **kwargs): """ Create a new instance """ if cloud == 'ec2': _create_server_ec2(**kwargs) elif cloud == 'rackspace': _create_server_rackspace(**kwargs) elif cloud == 'gce': _create_server_gce(**kwargs) else: raise ValueError("Unknown cloud type: {}".format(cloud))
[ "def", "create_server", "(", "cloud", ",", "*", "*", "kwargs", ")", ":", "if", "cloud", "==", "'ec2'", ":", "_create_server_ec2", "(", "*", "*", "kwargs", ")", "elif", "cloud", "==", "'rackspace'", ":", "_create_server_rackspace", "(", "*", "*", "kwargs", ")", "elif", "cloud", "==", "'gce'", ":", "_create_server_gce", "(", "*", "*", "kwargs", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown cloud type: {}\"", ".", "format", "(", "cloud", ")", ")" ]
Create a new instance
[ "Create", "a", "new", "instance" ]
python
train
28.416667
emory-libraries/eulcommon
eulcommon/djangoextras/auth/decorators.py
https://github.com/emory-libraries/eulcommon/blob/dc63a9b3b5e38205178235e0d716d1b28158d3a9/eulcommon/djangoextras/auth/decorators.py#L132-L147
def permission_required_with_ajax(perm, login_url=None): """ Decorator for views that checks whether a user has a particular permission enabled, redirecting to the log-in page if necessary, but returns a special response for ajax requests. See :meth:`eulcore.django.auth.decorators.user_passes_test_with_ajax`. Usage is the same as :meth:`django.contrib.auth.decorators.permission_required` :: @permission_required_with_ajax('polls.can_vote', login_url='/loginpage/') def my_view(request): ... """ return user_passes_test_with_ajax(lambda u: u.has_perm(perm), login_url=login_url)
[ "def", "permission_required_with_ajax", "(", "perm", ",", "login_url", "=", "None", ")", ":", "return", "user_passes_test_with_ajax", "(", "lambda", "u", ":", "u", ".", "has_perm", "(", "perm", ")", ",", "login_url", "=", "login_url", ")" ]
Decorator for views that checks whether a user has a particular permission enabled, redirecting to the log-in page if necessary, but returns a special response for ajax requests. See :meth:`eulcore.django.auth.decorators.user_passes_test_with_ajax`. Usage is the same as :meth:`django.contrib.auth.decorators.permission_required` :: @permission_required_with_ajax('polls.can_vote', login_url='/loginpage/') def my_view(request): ...
[ "Decorator", "for", "views", "that", "checks", "whether", "a", "user", "has", "a", "particular", "permission", "enabled", "redirecting", "to", "the", "log", "-", "in", "page", "if", "necessary", "but", "returns", "a", "special", "response", "for", "ajax", "requests", ".", "See", ":", "meth", ":", "eulcore", ".", "django", ".", "auth", ".", "decorators", ".", "user_passes_test_with_ajax", "." ]
python
train
39.5625
yyuu/botornado
boto/s3/bucket.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/s3/bucket.py#L1233-L1242
def get_website_endpoint(self): """ Returns the fully qualified hostname to use is you want to access this bucket as a website. This doesn't validate whether the bucket has been correctly configured as a website or not. """ l = [self.name] l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location())) l.append('.'.join(self.connection.host.split('.')[-2:])) return '.'.join(l)
[ "def", "get_website_endpoint", "(", "self", ")", ":", "l", "=", "[", "self", ".", "name", "]", "l", ".", "append", "(", "S3WebsiteEndpointTranslate", ".", "translate_region", "(", "self", ".", "get_location", "(", ")", ")", ")", "l", ".", "append", "(", "'.'", ".", "join", "(", "self", ".", "connection", ".", "host", ".", "split", "(", "'.'", ")", "[", "-", "2", ":", "]", ")", ")", "return", "'.'", ".", "join", "(", "l", ")" ]
Returns the fully qualified hostname to use is you want to access this bucket as a website. This doesn't validate whether the bucket has been correctly configured as a website or not.
[ "Returns", "the", "fully", "qualified", "hostname", "to", "use", "is", "you", "want", "to", "access", "this", "bucket", "as", "a", "website", ".", "This", "doesn", "t", "validate", "whether", "the", "bucket", "has", "been", "correctly", "configured", "as", "a", "website", "or", "not", "." ]
python
train
45.4
googledatalab/pydatalab
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L722-L739
def batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size=16, shard_files=True, output_format='csv', cloud=False): """Blocking versoin of batch_predict. See documentation of batch_prediction_async. """ job = batch_predict_async( training_dir=training_dir, prediction_input_file=prediction_input_file, output_dir=output_dir, mode=mode, batch_size=batch_size, shard_files=shard_files, output_format=output_format, cloud=cloud) job.wait() print('Batch predict: ' + str(job.state))
[ "def", "batch_predict", "(", "training_dir", ",", "prediction_input_file", ",", "output_dir", ",", "mode", ",", "batch_size", "=", "16", ",", "shard_files", "=", "True", ",", "output_format", "=", "'csv'", ",", "cloud", "=", "False", ")", ":", "job", "=", "batch_predict_async", "(", "training_dir", "=", "training_dir", ",", "prediction_input_file", "=", "prediction_input_file", ",", "output_dir", "=", "output_dir", ",", "mode", "=", "mode", ",", "batch_size", "=", "batch_size", ",", "shard_files", "=", "shard_files", ",", "output_format", "=", "output_format", ",", "cloud", "=", "cloud", ")", "job", ".", "wait", "(", ")", "print", "(", "'Batch predict: '", "+", "str", "(", "job", ".", "state", ")", ")" ]
Blocking versoin of batch_predict. See documentation of batch_prediction_async.
[ "Blocking", "versoin", "of", "batch_predict", "." ]
python
train
32.388889
pycontribs/pyrax
pyrax/cloudblockstorage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudblockstorage.py#L501-L509
def create_snapshot(self, volume, name=None, description=None, force=False): """ Creates a snapshot of the volume, with an optional name and description. Normally snapshots will not happen if the volume is attached. To override this default behavior, pass force=True. """ return self._snapshot_manager.create(volume=volume, name=name, description=description, force=force)
[ "def", "create_snapshot", "(", "self", ",", "volume", ",", "name", "=", "None", ",", "description", "=", "None", ",", "force", "=", "False", ")", ":", "return", "self", ".", "_snapshot_manager", ".", "create", "(", "volume", "=", "volume", ",", "name", "=", "name", ",", "description", "=", "description", ",", "force", "=", "force", ")" ]
Creates a snapshot of the volume, with an optional name and description. Normally snapshots will not happen if the volume is attached. To override this default behavior, pass force=True.
[ "Creates", "a", "snapshot", "of", "the", "volume", "with", "an", "optional", "name", "and", "description", "." ]
python
train
47.666667
spyder-ide/spyder
spyder/plugins/editor/utils/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/utils/editor.py#L618-L622
def clear_selection(self): """Clears text cursor selection.""" text_cursor = self._editor.textCursor() text_cursor.clearSelection() self._editor.setTextCursor(text_cursor)
[ "def", "clear_selection", "(", "self", ")", ":", "text_cursor", "=", "self", ".", "_editor", ".", "textCursor", "(", ")", "text_cursor", ".", "clearSelection", "(", ")", "self", ".", "_editor", ".", "setTextCursor", "(", "text_cursor", ")" ]
Clears text cursor selection.
[ "Clears", "text", "cursor", "selection", "." ]
python
train
39.8
django-blog-zinnia/wordpress2zinnia
zinnia_wordpress/management/commands/wp2zinnia.py
https://github.com/django-blog-zinnia/wordpress2zinnia/blob/656df6d431418a660f0e590d2226af5e6dd7a3e6/zinnia_wordpress/management/commands/wp2zinnia.py#L134-L158
def import_authors(self, tree): """ Retrieve all the authors used in posts and convert it to new or existing author and return the conversion. """ self.write_out(self.style.STEP('- Importing authors\n')) post_authors = set() for item in tree.findall('channel/item'): post_type = item.find('{%s}post_type' % WP_NS).text if post_type == 'post': post_authors.add(item.find( '{http://purl.org/dc/elements/1.1/}creator').text) self.write_out('> %i authors found.\n' % len(post_authors)) authors = {} for post_author in post_authors: if self.default_author: authors[post_author] = self.default_author else: authors[post_author] = self.migrate_author( post_author.replace(' ', '-')) return authors
[ "def", "import_authors", "(", "self", ",", "tree", ")", ":", "self", ".", "write_out", "(", "self", ".", "style", ".", "STEP", "(", "'- Importing authors\\n'", ")", ")", "post_authors", "=", "set", "(", ")", "for", "item", "in", "tree", ".", "findall", "(", "'channel/item'", ")", ":", "post_type", "=", "item", ".", "find", "(", "'{%s}post_type'", "%", "WP_NS", ")", ".", "text", "if", "post_type", "==", "'post'", ":", "post_authors", ".", "add", "(", "item", ".", "find", "(", "'{http://purl.org/dc/elements/1.1/}creator'", ")", ".", "text", ")", "self", ".", "write_out", "(", "'> %i authors found.\\n'", "%", "len", "(", "post_authors", ")", ")", "authors", "=", "{", "}", "for", "post_author", "in", "post_authors", ":", "if", "self", ".", "default_author", ":", "authors", "[", "post_author", "]", "=", "self", ".", "default_author", "else", ":", "authors", "[", "post_author", "]", "=", "self", ".", "migrate_author", "(", "post_author", ".", "replace", "(", "' '", ",", "'-'", ")", ")", "return", "authors" ]
Retrieve all the authors used in posts and convert it to new or existing author and return the conversion.
[ "Retrieve", "all", "the", "authors", "used", "in", "posts", "and", "convert", "it", "to", "new", "or", "existing", "author", "and", "return", "the", "conversion", "." ]
python
train
36.04
knipknap/exscript
Exscript/host.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/host.py#L317-L328
def set(self, name, value): """ Stores the given variable/value in the object for later retrieval. :type name: string :param name: The name of the variable. :type value: object :param value: The value of the variable. """ if self.vars is None: self.vars = {} self.vars[name] = value
[ "def", "set", "(", "self", ",", "name", ",", "value", ")", ":", "if", "self", ".", "vars", "is", "None", ":", "self", ".", "vars", "=", "{", "}", "self", ".", "vars", "[", "name", "]", "=", "value" ]
Stores the given variable/value in the object for later retrieval. :type name: string :param name: The name of the variable. :type value: object :param value: The value of the variable.
[ "Stores", "the", "given", "variable", "/", "value", "in", "the", "object", "for", "later", "retrieval", "." ]
python
train
29.833333
heitzmann/gdspy
gdspy/__init__.py
https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L1103-L1230
def arc(self, radius, initial_angle, final_angle, number_of_points=0.01, max_points=199, final_width=None, final_distance=None, layer=0, datatype=0): """ Add a curved section to the path. Parameters ---------- radius : number Central radius of the section. initial_angle : number Initial angle of the curve (in *radians*). final_angle : number Final angle of the curve (in *radians*). number_of_points : integer or float If integer: number of vertices that form the object (polygonal approximation). If float: approximate curvature resolution. The actual number of points is automatically calculated. max_points : integer if ``number_of_points > max_points``, the element will be fractured in smaller polygons with at most ``max_points`` each. final_width : number If set, the paths of this segment will have their widths linearly changed from their current value to this one. final_distance : number If set, the distance between paths is linearly change from its current value to this one along this segment. layer : integer, list The GDSII layer numbers for the elements of each path. If the number of layers in the list is less than the number of paths, the list is repeated. datatype : integer, list The GDSII datatype for the elements of each path (between 0 and 255). If the number of datatypes in the list is less than the number of paths, the list is repeated. Returns ------- out : ``Path`` This object. Notes ----- The GDSII specification supports only a maximum of 199 vertices per polygon. """ warn = True cx = self.x - radius * numpy.cos(initial_angle) cy = self.y - radius * numpy.sin(initial_angle) self.x = cx + radius * numpy.cos(final_angle) self.y = cy + radius * numpy.sin(final_angle) if final_angle > initial_angle: self.direction = final_angle + numpy.pi * 0.5 else: self.direction = final_angle - numpy.pi * 0.5 old_w = self.w old_distance = self.distance if final_width is not None: self.w = final_width * 0.5 if final_distance is not None: self.distance = final_distance if isinstance(number_of_points, float): number_of_points = 2 * int( abs((final_angle - initial_angle) * (radius + max(old_distance, self.distance) * (self.n - 1) * 0.5 + max(old_w, self.w)) / number_of_points) + 0.5) + 2 number_of_points = max(number_of_points, 3) pieces = int(numpy.ceil(number_of_points / float(max_points))) number_of_points = number_of_points // pieces widths = numpy.linspace(old_w, self.w, pieces + 1) distances = numpy.linspace(old_distance, self.distance, pieces + 1) angles = numpy.linspace(initial_angle, final_angle, pieces + 1) if (self.w != 0) or (old_w != 0): for jj in range(pieces): for ii in range(self.n): self.polygons.append(numpy.zeros((number_of_points, 2))) r0 = radius + ii * distances[jj + 1] - ( self.n - 1) * distances[jj + 1] * 0.5 old_r0 = radius + ii * distances[jj] - ( self.n - 1) * distances[jj] * 0.5 pts2 = number_of_points // 2 pts1 = number_of_points - pts2 ang = numpy.linspace(angles[jj], angles[jj + 1], pts1) rad = numpy.linspace(old_r0 + widths[jj], r0 + widths[jj + 1], pts1) self.polygons[-1][:pts1, 0] = numpy.cos(ang) * rad + cx self.polygons[-1][:pts1, 1] = numpy.sin(ang) * rad + cy if widths[jj + 1] == 0: pts1 -= 1 pts2 += 1 if widths[jj] == 0: self.polygons[-1][:pts1 - 1, :] = numpy.array( self.polygons[-1][1:pts1, :]) pts1 -= 1 pts2 += 1 ang = numpy.linspace(angles[jj + 1], angles[jj], pts2) rad = numpy.linspace(r0 - widths[jj + 1], old_r0 - widths[jj], pts2) if (rad[0] <= 0 or rad[-1] <= 0) and warn: warnings.warn( "[GDSPY] Path arc with width larger than radius " "created: possible self-intersecting polygon.", stacklevel=2) warn = False self.polygons[-1][pts1:, 0] = numpy.cos(ang) * rad + cx self.polygons[-1][pts1:, 1] = numpy.sin(ang) * rad + cy self.length += abs((angles[jj + 1] - angles[jj]) * radius) if isinstance(layer, list): self.layers.extend( (layer * (self.n // len(layer) + 1))[:self.n]) else: self.layers.extend(layer for _ in range(self.n)) if isinstance(datatype, list): self.datatypes.extend( (datatype * (self.n // len(datatype) + 1))[:self.n]) else: self.datatypes.extend(datatype for _ in range(self.n)) return self
[ "def", "arc", "(", "self", ",", "radius", ",", "initial_angle", ",", "final_angle", ",", "number_of_points", "=", "0.01", ",", "max_points", "=", "199", ",", "final_width", "=", "None", ",", "final_distance", "=", "None", ",", "layer", "=", "0", ",", "datatype", "=", "0", ")", ":", "warn", "=", "True", "cx", "=", "self", ".", "x", "-", "radius", "*", "numpy", ".", "cos", "(", "initial_angle", ")", "cy", "=", "self", ".", "y", "-", "radius", "*", "numpy", ".", "sin", "(", "initial_angle", ")", "self", ".", "x", "=", "cx", "+", "radius", "*", "numpy", ".", "cos", "(", "final_angle", ")", "self", ".", "y", "=", "cy", "+", "radius", "*", "numpy", ".", "sin", "(", "final_angle", ")", "if", "final_angle", ">", "initial_angle", ":", "self", ".", "direction", "=", "final_angle", "+", "numpy", ".", "pi", "*", "0.5", "else", ":", "self", ".", "direction", "=", "final_angle", "-", "numpy", ".", "pi", "*", "0.5", "old_w", "=", "self", ".", "w", "old_distance", "=", "self", ".", "distance", "if", "final_width", "is", "not", "None", ":", "self", ".", "w", "=", "final_width", "*", "0.5", "if", "final_distance", "is", "not", "None", ":", "self", ".", "distance", "=", "final_distance", "if", "isinstance", "(", "number_of_points", ",", "float", ")", ":", "number_of_points", "=", "2", "*", "int", "(", "abs", "(", "(", "final_angle", "-", "initial_angle", ")", "*", "(", "radius", "+", "max", "(", "old_distance", ",", "self", ".", "distance", ")", "*", "(", "self", ".", "n", "-", "1", ")", "*", "0.5", "+", "max", "(", "old_w", ",", "self", ".", "w", ")", ")", "/", "number_of_points", ")", "+", "0.5", ")", "+", "2", "number_of_points", "=", "max", "(", "number_of_points", ",", "3", ")", "pieces", "=", "int", "(", "numpy", ".", "ceil", "(", "number_of_points", "/", "float", "(", "max_points", ")", ")", ")", "number_of_points", "=", "number_of_points", "//", "pieces", "widths", "=", "numpy", ".", "linspace", "(", "old_w", ",", "self", ".", "w", ",", "pieces", "+", "1", ")", "distances", "=", "numpy", ".", "linspace", "(", "old_distance", ",", "self", ".", "distance", ",", "pieces", "+", "1", ")", "angles", "=", "numpy", ".", "linspace", "(", "initial_angle", ",", "final_angle", ",", "pieces", "+", "1", ")", "if", "(", "self", ".", "w", "!=", "0", ")", "or", "(", "old_w", "!=", "0", ")", ":", "for", "jj", "in", "range", "(", "pieces", ")", ":", "for", "ii", "in", "range", "(", "self", ".", "n", ")", ":", "self", ".", "polygons", ".", "append", "(", "numpy", ".", "zeros", "(", "(", "number_of_points", ",", "2", ")", ")", ")", "r0", "=", "radius", "+", "ii", "*", "distances", "[", "jj", "+", "1", "]", "-", "(", "self", ".", "n", "-", "1", ")", "*", "distances", "[", "jj", "+", "1", "]", "*", "0.5", "old_r0", "=", "radius", "+", "ii", "*", "distances", "[", "jj", "]", "-", "(", "self", ".", "n", "-", "1", ")", "*", "distances", "[", "jj", "]", "*", "0.5", "pts2", "=", "number_of_points", "//", "2", "pts1", "=", "number_of_points", "-", "pts2", "ang", "=", "numpy", ".", "linspace", "(", "angles", "[", "jj", "]", ",", "angles", "[", "jj", "+", "1", "]", ",", "pts1", ")", "rad", "=", "numpy", ".", "linspace", "(", "old_r0", "+", "widths", "[", "jj", "]", ",", "r0", "+", "widths", "[", "jj", "+", "1", "]", ",", "pts1", ")", "self", ".", "polygons", "[", "-", "1", "]", "[", ":", "pts1", ",", "0", "]", "=", "numpy", ".", "cos", "(", "ang", ")", "*", "rad", "+", "cx", "self", ".", "polygons", "[", "-", "1", "]", "[", ":", "pts1", ",", "1", "]", "=", "numpy", ".", "sin", "(", "ang", ")", "*", "rad", "+", "cy", "if", "widths", "[", "jj", "+", "1", "]", "==", "0", ":", "pts1", "-=", "1", "pts2", "+=", "1", "if", "widths", "[", "jj", "]", "==", "0", ":", "self", ".", "polygons", "[", "-", "1", "]", "[", ":", "pts1", "-", "1", ",", ":", "]", "=", "numpy", ".", "array", "(", "self", ".", "polygons", "[", "-", "1", "]", "[", "1", ":", "pts1", ",", ":", "]", ")", "pts1", "-=", "1", "pts2", "+=", "1", "ang", "=", "numpy", ".", "linspace", "(", "angles", "[", "jj", "+", "1", "]", ",", "angles", "[", "jj", "]", ",", "pts2", ")", "rad", "=", "numpy", ".", "linspace", "(", "r0", "-", "widths", "[", "jj", "+", "1", "]", ",", "old_r0", "-", "widths", "[", "jj", "]", ",", "pts2", ")", "if", "(", "rad", "[", "0", "]", "<=", "0", "or", "rad", "[", "-", "1", "]", "<=", "0", ")", "and", "warn", ":", "warnings", ".", "warn", "(", "\"[GDSPY] Path arc with width larger than radius \"", "\"created: possible self-intersecting polygon.\"", ",", "stacklevel", "=", "2", ")", "warn", "=", "False", "self", ".", "polygons", "[", "-", "1", "]", "[", "pts1", ":", ",", "0", "]", "=", "numpy", ".", "cos", "(", "ang", ")", "*", "rad", "+", "cx", "self", ".", "polygons", "[", "-", "1", "]", "[", "pts1", ":", ",", "1", "]", "=", "numpy", ".", "sin", "(", "ang", ")", "*", "rad", "+", "cy", "self", ".", "length", "+=", "abs", "(", "(", "angles", "[", "jj", "+", "1", "]", "-", "angles", "[", "jj", "]", ")", "*", "radius", ")", "if", "isinstance", "(", "layer", ",", "list", ")", ":", "self", ".", "layers", ".", "extend", "(", "(", "layer", "*", "(", "self", ".", "n", "//", "len", "(", "layer", ")", "+", "1", ")", ")", "[", ":", "self", ".", "n", "]", ")", "else", ":", "self", ".", "layers", ".", "extend", "(", "layer", "for", "_", "in", "range", "(", "self", ".", "n", ")", ")", "if", "isinstance", "(", "datatype", ",", "list", ")", ":", "self", ".", "datatypes", ".", "extend", "(", "(", "datatype", "*", "(", "self", ".", "n", "//", "len", "(", "datatype", ")", "+", "1", ")", ")", "[", ":", "self", ".", "n", "]", ")", "else", ":", "self", ".", "datatypes", ".", "extend", "(", "datatype", "for", "_", "in", "range", "(", "self", ".", "n", ")", ")", "return", "self" ]
Add a curved section to the path. Parameters ---------- radius : number Central radius of the section. initial_angle : number Initial angle of the curve (in *radians*). final_angle : number Final angle of the curve (in *radians*). number_of_points : integer or float If integer: number of vertices that form the object (polygonal approximation). If float: approximate curvature resolution. The actual number of points is automatically calculated. max_points : integer if ``number_of_points > max_points``, the element will be fractured in smaller polygons with at most ``max_points`` each. final_width : number If set, the paths of this segment will have their widths linearly changed from their current value to this one. final_distance : number If set, the distance between paths is linearly change from its current value to this one along this segment. layer : integer, list The GDSII layer numbers for the elements of each path. If the number of layers in the list is less than the number of paths, the list is repeated. datatype : integer, list The GDSII datatype for the elements of each path (between 0 and 255). If the number of datatypes in the list is less than the number of paths, the list is repeated. Returns ------- out : ``Path`` This object. Notes ----- The GDSII specification supports only a maximum of 199 vertices per polygon.
[ "Add", "a", "curved", "section", "to", "the", "path", "." ]
python
train
45.1875
onnx/onnx
onnx/external_data_helper.py
https://github.com/onnx/onnx/blob/2f7dc10f03a072526d94b6820cedbf2a1ec5a2c4/onnx/external_data_helper.py#L197-L209
def remove_external_data_field(tensor, field_key): # type: (TensorProto, Text) -> None """ Remove a field from a Tensor's external_data key-value store. Modifies tensor object in place. @params tensor: Tensor object from which value will be removed field_key: The key of the field to be removed """ for (i, field) in enumerate(tensor.external_data): if field.key == field_key: del tensor.external_data[i]
[ "def", "remove_external_data_field", "(", "tensor", ",", "field_key", ")", ":", "# type: (TensorProto, Text) -> None", "for", "(", "i", ",", "field", ")", "in", "enumerate", "(", "tensor", ".", "external_data", ")", ":", "if", "field", ".", "key", "==", "field_key", ":", "del", "tensor", ".", "external_data", "[", "i", "]" ]
Remove a field from a Tensor's external_data key-value store. Modifies tensor object in place. @params tensor: Tensor object from which value will be removed field_key: The key of the field to be removed
[ "Remove", "a", "field", "from", "a", "Tensor", "s", "external_data", "key", "-", "value", "store", "." ]
python
train
34.384615
softlayer/softlayer-python
SoftLayer/CLI/environment.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/environment.py#L45-L47
def err(self, output, newline=True): """Outputs an error string to the console (stderr).""" click.echo(output, nl=newline, err=True)
[ "def", "err", "(", "self", ",", "output", ",", "newline", "=", "True", ")", ":", "click", ".", "echo", "(", "output", ",", "nl", "=", "newline", ",", "err", "=", "True", ")" ]
Outputs an error string to the console (stderr).
[ "Outputs", "an", "error", "string", "to", "the", "console", "(", "stderr", ")", "." ]
python
train
48.666667
kubernetes-client/python
kubernetes/client/apis/rbac_authorization_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/rbac_authorization_v1_api.py#L845-L871
def delete_collection_cluster_role_binding(self, **kwargs): """ delete collection of ClusterRoleBinding This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_cluster_role_binding(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_cluster_role_binding_with_http_info(**kwargs) else: (data) = self.delete_collection_cluster_role_binding_with_http_info(**kwargs) return data
[ "def", "delete_collection_cluster_role_binding", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "delete_collection_cluster_role_binding_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "delete_collection_cluster_role_binding_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
delete collection of ClusterRoleBinding This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_cluster_role_binding(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
[ "delete", "collection", "of", "ClusterRoleBinding", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "delete_collection_cluster_role_binding", "(", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
169.111111
CityOfZion/neo-python
neo/Core/Helper.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/Helper.py#L93-L111
def ToStream(value): """ Serialize the given `value` to a an array of bytes. Args: value (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin. Returns: bytes: not hexlified """ ms = StreamManager.GetStream() writer = BinaryWriter(ms) value.Serialize(writer) retVal = ms.getvalue() StreamManager.ReleaseStream(ms) return retVal
[ "def", "ToStream", "(", "value", ")", ":", "ms", "=", "StreamManager", ".", "GetStream", "(", ")", "writer", "=", "BinaryWriter", "(", "ms", ")", "value", ".", "Serialize", "(", "writer", ")", "retVal", "=", "ms", ".", "getvalue", "(", ")", "StreamManager", ".", "ReleaseStream", "(", "ms", ")", "return", "retVal" ]
Serialize the given `value` to a an array of bytes. Args: value (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin. Returns: bytes: not hexlified
[ "Serialize", "the", "given", "value", "to", "a", "an", "array", "of", "bytes", "." ]
python
train
23.263158
saltstack/salt
salt/modules/boto_vpc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_vpc.py#L2853-L2888
def describe_vpc_peering_connection(name, region=None, key=None, keyid=None, profile=None): ''' Returns any VPC peering connection id(s) for the given VPC peering connection name. VPC peering connection ids are only returned for connections that are in the ``active``, ``pending-acceptance`` or ``provisioning`` state. .. versionadded:: 2016.11.0 :param name: The string name for this VPC peering connection :param region: The aws region to use :param key: Your aws key :param keyid: The key id associated with this aws account :param profile: The profile to use :return: dict CLI Example: .. code-block:: bash salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc # Specify a region salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc region=us-west-2 ''' conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile) return { 'VPC-Peerings': _get_peering_connection_ids(name, conn) }
[ "def", "describe_vpc_peering_connection", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn3", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "return", "{", "'VPC-Peerings'", ":", "_get_peering_connection_ids", "(", "name", ",", "conn", ")", "}" ]
Returns any VPC peering connection id(s) for the given VPC peering connection name. VPC peering connection ids are only returned for connections that are in the ``active``, ``pending-acceptance`` or ``provisioning`` state. .. versionadded:: 2016.11.0 :param name: The string name for this VPC peering connection :param region: The aws region to use :param key: Your aws key :param keyid: The key id associated with this aws account :param profile: The profile to use :return: dict CLI Example: .. code-block:: bash salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc # Specify a region salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc region=us-west-2
[ "Returns", "any", "VPC", "peering", "connection", "id", "(", "s", ")", "for", "the", "given", "VPC", "peering", "connection", "name", "." ]
python
train
32.305556
etalab/cada
cada/commands.py
https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L60-L67
def echo(msg, *args, **kwargs): '''Wraps click.echo, handles formatting and check encoding''' file = kwargs.pop('file', None) nl = kwargs.pop('nl', True) err = kwargs.pop('err', False) color = kwargs.pop('color', None) msg = safe_unicode(msg).format(*args, **kwargs) click.echo(msg, file=file, nl=nl, err=err, color=color)
[ "def", "echo", "(", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "file", "=", "kwargs", ".", "pop", "(", "'file'", ",", "None", ")", "nl", "=", "kwargs", ".", "pop", "(", "'nl'", ",", "True", ")", "err", "=", "kwargs", ".", "pop", "(", "'err'", ",", "False", ")", "color", "=", "kwargs", ".", "pop", "(", "'color'", ",", "None", ")", "msg", "=", "safe_unicode", "(", "msg", ")", ".", "format", "(", "*", "args", ",", "*", "*", "kwargs", ")", "click", ".", "echo", "(", "msg", ",", "file", "=", "file", ",", "nl", "=", "nl", ",", "err", "=", "err", ",", "color", "=", "color", ")" ]
Wraps click.echo, handles formatting and check encoding
[ "Wraps", "click", ".", "echo", "handles", "formatting", "and", "check", "encoding" ]
python
train
42.875
mediaburst/clockwork-python
clockwork/clockwork.py
https://github.com/mediaburst/clockwork-python/blob/7f8368bbed1fcb5218584fbc5094d93c6aa365d1/clockwork/clockwork.py#L69-L115
def send(self, messages): """Send a SMS message, or an array of SMS messages""" tmpSms = SMS(to='', message='') if str(type(messages)) == str(type(tmpSms)): messages = [messages] xml_root = self.__init_xml('Message') wrapper_id = 0 for m in messages: m.wrapper_id = wrapper_id msg = self.__build_sms_data(m) sms = etree.SubElement(xml_root, 'SMS') for sms_element in msg: element = etree.SubElement(sms, sms_element) element.text = msg[sms_element] # print etree.tostring(xml_root) response = clockwork_http.request(SMS_URL, etree.tostring(xml_root, encoding='utf-8')) response_data = response['data'] # print response_data data_etree = etree.fromstring(response_data) # Check for general error err_desc = data_etree.find('ErrDesc') if err_desc is not None: raise clockwork_exceptions.ApiException(err_desc.text, data_etree.find('ErrNo').text) # Return a consistent object results = [] for sms in data_etree: matching_sms = next((s for s in messages if str(s.wrapper_id) == sms.find('WrapperID').text), None) new_result = SMSResponse( sms = matching_sms, id = '' if sms.find('MessageID') is None else sms.find('MessageID').text, error_code = 0 if sms.find('ErrNo') is None else sms.find('ErrNo').text, error_message = '' if sms.find('ErrDesc') is None else sms.find('ErrDesc').text, success = True if sms.find('ErrNo') is None else (sms.find('ErrNo').text == 0) ) results.append(new_result) if len(results) > 1: return results return results[0]
[ "def", "send", "(", "self", ",", "messages", ")", ":", "tmpSms", "=", "SMS", "(", "to", "=", "''", ",", "message", "=", "''", ")", "if", "str", "(", "type", "(", "messages", ")", ")", "==", "str", "(", "type", "(", "tmpSms", ")", ")", ":", "messages", "=", "[", "messages", "]", "xml_root", "=", "self", ".", "__init_xml", "(", "'Message'", ")", "wrapper_id", "=", "0", "for", "m", "in", "messages", ":", "m", ".", "wrapper_id", "=", "wrapper_id", "msg", "=", "self", ".", "__build_sms_data", "(", "m", ")", "sms", "=", "etree", ".", "SubElement", "(", "xml_root", ",", "'SMS'", ")", "for", "sms_element", "in", "msg", ":", "element", "=", "etree", ".", "SubElement", "(", "sms", ",", "sms_element", ")", "element", ".", "text", "=", "msg", "[", "sms_element", "]", "# print etree.tostring(xml_root)", "response", "=", "clockwork_http", ".", "request", "(", "SMS_URL", ",", "etree", ".", "tostring", "(", "xml_root", ",", "encoding", "=", "'utf-8'", ")", ")", "response_data", "=", "response", "[", "'data'", "]", "# print response_data", "data_etree", "=", "etree", ".", "fromstring", "(", "response_data", ")", "# Check for general error", "err_desc", "=", "data_etree", ".", "find", "(", "'ErrDesc'", ")", "if", "err_desc", "is", "not", "None", ":", "raise", "clockwork_exceptions", ".", "ApiException", "(", "err_desc", ".", "text", ",", "data_etree", ".", "find", "(", "'ErrNo'", ")", ".", "text", ")", "# Return a consistent object", "results", "=", "[", "]", "for", "sms", "in", "data_etree", ":", "matching_sms", "=", "next", "(", "(", "s", "for", "s", "in", "messages", "if", "str", "(", "s", ".", "wrapper_id", ")", "==", "sms", ".", "find", "(", "'WrapperID'", ")", ".", "text", ")", ",", "None", ")", "new_result", "=", "SMSResponse", "(", "sms", "=", "matching_sms", ",", "id", "=", "''", "if", "sms", ".", "find", "(", "'MessageID'", ")", "is", "None", "else", "sms", ".", "find", "(", "'MessageID'", ")", ".", "text", ",", "error_code", "=", "0", "if", "sms", ".", "find", "(", "'ErrNo'", ")", "is", "None", "else", "sms", ".", "find", "(", "'ErrNo'", ")", ".", "text", ",", "error_message", "=", "''", "if", "sms", ".", "find", "(", "'ErrDesc'", ")", "is", "None", "else", "sms", ".", "find", "(", "'ErrDesc'", ")", ".", "text", ",", "success", "=", "True", "if", "sms", ".", "find", "(", "'ErrNo'", ")", "is", "None", "else", "(", "sms", ".", "find", "(", "'ErrNo'", ")", ".", "text", "==", "0", ")", ")", "results", ".", "append", "(", "new_result", ")", "if", "len", "(", "results", ")", ">", "1", ":", "return", "results", "return", "results", "[", "0", "]" ]
Send a SMS message, or an array of SMS messages
[ "Send", "a", "SMS", "message", "or", "an", "array", "of", "SMS", "messages" ]
python
train
38.361702
joke2k/faker
faker/providers/ssn/en_US/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/ssn/en_US/__init__.py#L13-L33
def itin(self): """Generate a random United States Individual Taxpayer Identification Number (ITIN). An United States Individual Taxpayer Identification Number (ITIN) is a tax processing number issued by the Internal Revenue Service. It is a nine-digit number that always begins with the number 9 and has a range of 70-88 in the fourth and fifth digit. Effective April 12, 2011, the range was extended to include 900-70-0000 through 999-88-9999, 900-90-0000 through 999-92-9999 and 900-94-0000 through 999-99-9999. https://www.irs.gov/individuals/international-taxpayers/general-itin-information """ area = self.random_int(min=900, max=999) serial = self.random_int(min=0, max=9999) # The group number must be between 70 and 99 inclusively but not 89 or 93 group = random.choice([x for x in range(70, 100) if x not in [89, 93]]) itin = "{0:03d}-{1:02d}-{2:04d}".format(area, group, serial) return itin
[ "def", "itin", "(", "self", ")", ":", "area", "=", "self", ".", "random_int", "(", "min", "=", "900", ",", "max", "=", "999", ")", "serial", "=", "self", ".", "random_int", "(", "min", "=", "0", ",", "max", "=", "9999", ")", "# The group number must be between 70 and 99 inclusively but not 89 or 93", "group", "=", "random", ".", "choice", "(", "[", "x", "for", "x", "in", "range", "(", "70", ",", "100", ")", "if", "x", "not", "in", "[", "89", ",", "93", "]", "]", ")", "itin", "=", "\"{0:03d}-{1:02d}-{2:04d}\"", ".", "format", "(", "area", ",", "group", ",", "serial", ")", "return", "itin" ]
Generate a random United States Individual Taxpayer Identification Number (ITIN). An United States Individual Taxpayer Identification Number (ITIN) is a tax processing number issued by the Internal Revenue Service. It is a nine-digit number that always begins with the number 9 and has a range of 70-88 in the fourth and fifth digit. Effective April 12, 2011, the range was extended to include 900-70-0000 through 999-88-9999, 900-90-0000 through 999-92-9999 and 900-94-0000 through 999-99-9999. https://www.irs.gov/individuals/international-taxpayers/general-itin-information
[ "Generate", "a", "random", "United", "States", "Individual", "Taxpayer", "Identification", "Number", "(", "ITIN", ")", "." ]
python
train
48.238095
narfman0/helga-markovify
helga_markovify/markov.py
https://github.com/narfman0/helga-markovify/blob/b5a82de070102e6da1fd3f5f81cad12d0a9185d8/helga_markovify/markov.py#L15-L21
def ingest(topic, text, **kwargs): """ Ingest the given text for the topic """ if not text: raise ValueError('No text given to ingest for topic: ' + topic) data = {'topic': topic, 'text': text.strip()} data.update(kwargs) db.markovify.insert(data)
[ "def", "ingest", "(", "topic", ",", "text", ",", "*", "*", "kwargs", ")", ":", "if", "not", "text", ":", "raise", "ValueError", "(", "'No text given to ingest for topic: '", "+", "topic", ")", "data", "=", "{", "'topic'", ":", "topic", ",", "'text'", ":", "text", ".", "strip", "(", ")", "}", "data", ".", "update", "(", "kwargs", ")", "db", ".", "markovify", ".", "insert", "(", "data", ")" ]
Ingest the given text for the topic
[ "Ingest", "the", "given", "text", "for", "the", "topic" ]
python
train
38.428571
codelv/enaml-native
src/enamlnative/android/android_sensors.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_sensors.py#L131-L155
def start(self, callback, rate=SENSOR_DELAY_NORMAL): """ Start listening to sensor events. Sensor event data depends on the type of sensor that was given to Parameters ---------- callback: Callable A callback that takes one argument that will be passed the sensor data. Sensor data is a dict with data based on the type of sensor. rate: Integer How fast to update. One of the Sensor.SENSOR_DELAY values Returns ------- result: Future A future that resolves to whether the register call completed. """ if not self.manager: raise RuntimeError( "Cannot start a sensor without a SensorManager!") self.onSensorChanged.connect(callback) return self.manager.registerListener(self.getId(), self, rate)
[ "def", "start", "(", "self", ",", "callback", ",", "rate", "=", "SENSOR_DELAY_NORMAL", ")", ":", "if", "not", "self", ".", "manager", ":", "raise", "RuntimeError", "(", "\"Cannot start a sensor without a SensorManager!\"", ")", "self", ".", "onSensorChanged", ".", "connect", "(", "callback", ")", "return", "self", ".", "manager", ".", "registerListener", "(", "self", ".", "getId", "(", ")", ",", "self", ",", "rate", ")" ]
Start listening to sensor events. Sensor event data depends on the type of sensor that was given to Parameters ---------- callback: Callable A callback that takes one argument that will be passed the sensor data. Sensor data is a dict with data based on the type of sensor. rate: Integer How fast to update. One of the Sensor.SENSOR_DELAY values Returns ------- result: Future A future that resolves to whether the register call completed.
[ "Start", "listening", "to", "sensor", "events", ".", "Sensor", "event", "data", "depends", "on", "the", "type", "of", "sensor", "that", "was", "given", "to", "Parameters", "----------", "callback", ":", "Callable", "A", "callback", "that", "takes", "one", "argument", "that", "will", "be", "passed", "the", "sensor", "data", ".", "Sensor", "data", "is", "a", "dict", "with", "data", "based", "on", "the", "type", "of", "sensor", ".", "rate", ":", "Integer", "How", "fast", "to", "update", ".", "One", "of", "the", "Sensor", ".", "SENSOR_DELAY", "values", "Returns", "-------", "result", ":", "Future", "A", "future", "that", "resolves", "to", "whether", "the", "register", "call", "completed", "." ]
python
train
37.6
hharnisc/python-ddp
DDPClient.py
https://github.com/hharnisc/python-ddp/blob/00bdf33c20ecba56623890515381154e14c5b757/DDPClient.py#L43-L47
def _debug_log(self, msg): """Debug log messages if debug=True""" if not self.debug: return sys.stderr.write('{}\n'.format(msg))
[ "def", "_debug_log", "(", "self", ",", "msg", ")", ":", "if", "not", "self", ".", "debug", ":", "return", "sys", ".", "stderr", ".", "write", "(", "'{}\\n'", ".", "format", "(", "msg", ")", ")" ]
Debug log messages if debug=True
[ "Debug", "log", "messages", "if", "debug", "=", "True" ]
python
train
32
TrafficSenseMSD/SumoTools
traci/_vehicle.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_vehicle.py#L674-L682
def getLaneChangeState(self, vehID, direction): """getLaneChangeState(string, int) -> (int, int) Return the lane change state for the vehicle """ self._connection._beginMessage( tc.CMD_GET_VEHICLE_VARIABLE, tc.CMD_CHANGELANE, vehID, 1 + 4) self._connection._string += struct.pack("!Bi", tc.TYPE_INTEGER, direction) result = self._connection._checkResult(tc.CMD_GET_VEHICLE_VARIABLE, tc.CMD_CHANGELANE, vehID) return result.read("!iBiBi")[2::2]
[ "def", "getLaneChangeState", "(", "self", ",", "vehID", ",", "direction", ")", ":", "self", ".", "_connection", ".", "_beginMessage", "(", "tc", ".", "CMD_GET_VEHICLE_VARIABLE", ",", "tc", ".", "CMD_CHANGELANE", ",", "vehID", ",", "1", "+", "4", ")", "self", ".", "_connection", ".", "_string", "+=", "struct", ".", "pack", "(", "\"!Bi\"", ",", "tc", ".", "TYPE_INTEGER", ",", "direction", ")", "result", "=", "self", ".", "_connection", ".", "_checkResult", "(", "tc", ".", "CMD_GET_VEHICLE_VARIABLE", ",", "tc", ".", "CMD_CHANGELANE", ",", "vehID", ")", "return", "result", ".", "read", "(", "\"!iBiBi\"", ")", "[", "2", ":", ":", "2", "]" ]
getLaneChangeState(string, int) -> (int, int) Return the lane change state for the vehicle
[ "getLaneChangeState", "(", "string", "int", ")", "-", ">", "(", "int", "int", ")", "Return", "the", "lane", "change", "state", "for", "the", "vehicle" ]
python
train
55.888889
CivicSpleen/ambry
ambry/bundle/process.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L258-L260
def start(self, phase, stage, **kwargs): """Start a new routine, stage or phase""" return ProgressSection(self, self._session, phase, stage, self._logger, **kwargs)
[ "def", "start", "(", "self", ",", "phase", ",", "stage", ",", "*", "*", "kwargs", ")", ":", "return", "ProgressSection", "(", "self", ",", "self", ".", "_session", ",", "phase", ",", "stage", ",", "self", ".", "_logger", ",", "*", "*", "kwargs", ")" ]
Start a new routine, stage or phase
[ "Start", "a", "new", "routine", "stage", "or", "phase" ]
python
train
59.333333
angr/angr
angr/state_plugins/heap/heap_libc.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/heap/heap_libc.py#L27-L36
def calloc(self, sim_nmemb, sim_size): """ A somewhat faithful implementation of libc `calloc`. :param sim_nmemb: the number of elements to allocated :param sim_size: the size of each element (in bytes) :returns: the address of the allocation, or a NULL pointer if the allocation failed """ raise NotImplementedError("%s not implemented for %s" % (self.calloc.__func__.__name__, self.__class__.__name__))
[ "def", "calloc", "(", "self", ",", "sim_nmemb", ",", "sim_size", ")", ":", "raise", "NotImplementedError", "(", "\"%s not implemented for %s\"", "%", "(", "self", ".", "calloc", ".", "__func__", ".", "__name__", ",", "self", ".", "__class__", ".", "__name__", ")", ")" ]
A somewhat faithful implementation of libc `calloc`. :param sim_nmemb: the number of elements to allocated :param sim_size: the size of each element (in bytes) :returns: the address of the allocation, or a NULL pointer if the allocation failed
[ "A", "somewhat", "faithful", "implementation", "of", "libc", "calloc", "." ]
python
train
52.6
fabioz/PyDev.Debugger
_pydev_imps/_pydev_BaseHTTPServer.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_imps/_pydev_BaseHTTPServer.py#L302-L335
def handle_one_request(self): """Handle a single HTTP request. You normally don't need to override this method; see the class __doc__ string for information on how to handle specific HTTP commands such as GET and POST. """ try: self.raw_requestline = self.rfile.readline(65537) if len(self.raw_requestline) > 65536: self.requestline = '' self.request_version = '' self.command = '' self.send_error(414) return if not self.raw_requestline: self.close_connection = 1 return if not self.parse_request(): # An error code has been sent, just exit return mname = 'do_' + self.command if not hasattr(self, mname): self.send_error(501, "Unsupported method (%r)" % self.command) return method = getattr(self, mname) method() self.wfile.flush() #actually send the response if not already done. except socket.timeout: #a read or a write timed out. Discard this connection self.log_error("Request timed out: %r", sys.exc_info()[1]) self.close_connection = 1 return
[ "def", "handle_one_request", "(", "self", ")", ":", "try", ":", "self", ".", "raw_requestline", "=", "self", ".", "rfile", ".", "readline", "(", "65537", ")", "if", "len", "(", "self", ".", "raw_requestline", ")", ">", "65536", ":", "self", ".", "requestline", "=", "''", "self", ".", "request_version", "=", "''", "self", ".", "command", "=", "''", "self", ".", "send_error", "(", "414", ")", "return", "if", "not", "self", ".", "raw_requestline", ":", "self", ".", "close_connection", "=", "1", "return", "if", "not", "self", ".", "parse_request", "(", ")", ":", "# An error code has been sent, just exit", "return", "mname", "=", "'do_'", "+", "self", ".", "command", "if", "not", "hasattr", "(", "self", ",", "mname", ")", ":", "self", ".", "send_error", "(", "501", ",", "\"Unsupported method (%r)\"", "%", "self", ".", "command", ")", "return", "method", "=", "getattr", "(", "self", ",", "mname", ")", "method", "(", ")", "self", ".", "wfile", ".", "flush", "(", ")", "#actually send the response if not already done.", "except", "socket", ".", "timeout", ":", "#a read or a write timed out. Discard this connection", "self", ".", "log_error", "(", "\"Request timed out: %r\"", ",", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ")", "self", ".", "close_connection", "=", "1", "return" ]
Handle a single HTTP request. You normally don't need to override this method; see the class __doc__ string for information on how to handle specific HTTP commands such as GET and POST.
[ "Handle", "a", "single", "HTTP", "request", "." ]
python
train
38.529412
dhain/potpy
potpy/wsgi.py
https://github.com/dhain/potpy/blob/e39a5a84f763fbf144b07a620afb02a5ff3741c9/potpy/wsgi.py#L88-L99
def reverse(self, *args, **kwargs): """Look up a path by name and fill in the provided parameters. Example: >>> handler = lambda: None # just a bogus handler >>> router = PathRouter(('post', '/posts/{slug}', handler)) >>> router.reverse('post', slug='my-post') '/posts/my-post' """ (name,) = args return self._templates[name].fill(**kwargs)
[ "def", "reverse", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "(", "name", ",", ")", "=", "args", "return", "self", ".", "_templates", "[", "name", "]", ".", "fill", "(", "*", "*", "kwargs", ")" ]
Look up a path by name and fill in the provided parameters. Example: >>> handler = lambda: None # just a bogus handler >>> router = PathRouter(('post', '/posts/{slug}', handler)) >>> router.reverse('post', slug='my-post') '/posts/my-post'
[ "Look", "up", "a", "path", "by", "name", "and", "fill", "in", "the", "provided", "parameters", "." ]
python
train
35
OSSOS/MOP
src/ossos/utils/get_image_lists.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/utils/get_image_lists.py#L17-L46
def main(): """ Input asteroid family, filter type, and image type to query SSOIS """ parser = argparse.ArgumentParser(description='Run SSOIS and return the available images in a particular filter.') parser.add_argument("--filter", action="store", default='r', dest="filter", choices=['r', 'u'], help="Passband: default is r.") parser.add_argument("--family", '-f', action="store", default=None, help='List of objects to query.') parser.add_argument("--member", '-m', action="store", default=None, help='Member object of family to query.') args = parser.parse_args() if args.family != None and args.member == None: get_family_info(str(args.family), args.filter) elif args.family == None and args.member != None: get_member_info(str(args.member), args.filter) else: print "Please input either a family or single member name"
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Run SSOIS and return the available images in a particular filter.'", ")", "parser", ".", "add_argument", "(", "\"--filter\"", ",", "action", "=", "\"store\"", ",", "default", "=", "'r'", ",", "dest", "=", "\"filter\"", ",", "choices", "=", "[", "'r'", ",", "'u'", "]", ",", "help", "=", "\"Passband: default is r.\"", ")", "parser", ".", "add_argument", "(", "\"--family\"", ",", "'-f'", ",", "action", "=", "\"store\"", ",", "default", "=", "None", ",", "help", "=", "'List of objects to query.'", ")", "parser", ".", "add_argument", "(", "\"--member\"", ",", "'-m'", ",", "action", "=", "\"store\"", ",", "default", "=", "None", ",", "help", "=", "'Member object of family to query.'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "family", "!=", "None", "and", "args", ".", "member", "==", "None", ":", "get_family_info", "(", "str", "(", "args", ".", "family", ")", ",", "args", ".", "filter", ")", "elif", "args", ".", "family", "==", "None", "and", "args", ".", "member", "!=", "None", ":", "get_member_info", "(", "str", "(", "args", ".", "member", ")", ",", "args", ".", "filter", ")", "else", ":", "print", "\"Please input either a family or single member name\"" ]
Input asteroid family, filter type, and image type to query SSOIS
[ "Input", "asteroid", "family", "filter", "type", "and", "image", "type", "to", "query", "SSOIS" ]
python
train
37.666667
rushter/heamy
heamy/dataset.py
https://github.com/rushter/heamy/blob/c330854cee3c547417eb353a4a4a23331b40b4bc/heamy/dataset.py#L326-L329
def to_csc(self): """Convert Dataset to scipy's Compressed Sparse Column matrix.""" self._X_train = csc_matrix(self._X_train) self._X_test = csc_matrix(self._X_test)
[ "def", "to_csc", "(", "self", ")", ":", "self", ".", "_X_train", "=", "csc_matrix", "(", "self", ".", "_X_train", ")", "self", ".", "_X_test", "=", "csc_matrix", "(", "self", ".", "_X_test", ")" ]
Convert Dataset to scipy's Compressed Sparse Column matrix.
[ "Convert", "Dataset", "to", "scipy", "s", "Compressed", "Sparse", "Column", "matrix", "." ]
python
train
46.5
BYU-PCCL/holodeck
holodeck/shmem.py
https://github.com/BYU-PCCL/holodeck/blob/01acd4013f5acbd9f61fbc9caaafe19975e8b121/holodeck/shmem.py#L51-L58
def unlink(self): """unlinks the shared memory""" if os.name == "posix": self.__linux_unlink__() elif os.name == "nt": self.__windows_unlink__() else: raise HolodeckException("Currently unsupported os: " + os.name)
[ "def", "unlink", "(", "self", ")", ":", "if", "os", ".", "name", "==", "\"posix\"", ":", "self", ".", "__linux_unlink__", "(", ")", "elif", "os", ".", "name", "==", "\"nt\"", ":", "self", ".", "__windows_unlink__", "(", ")", "else", ":", "raise", "HolodeckException", "(", "\"Currently unsupported os: \"", "+", "os", ".", "name", ")" ]
unlinks the shared memory
[ "unlinks", "the", "shared", "memory" ]
python
train
34.375
saltstack/salt
salt/states/zfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zfs.py#L810-L847
def _schedule_snapshot_retrieve(dataset, prefix, snapshots): ''' Update snapshots dict with current snapshots dataset: string name of filesystem or volume prefix : string prefix for the snapshots e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm' snapshots : OrderedDict preseeded OrderedDict with configuration ''' ## NOTE: retrieve all snapshots for the dataset for snap in sorted(__salt__['zfs.list'](dataset, **{'recursive': True, 'depth': 1, 'type': 'snapshot'}).keys()): ## NOTE: we only want the actualy name ## myzpool/data@zbck-20171201_000248 -> zbck-20171201_000248 snap_name = snap[snap.index('@')+1:] ## NOTE: we only want snapshots matching our prefix if not snap_name.startswith('{0}-'.format(prefix)): continue ## NOTE: retrieve the holds for this snapshot snap_holds = __salt__['zfs.holds'](snap) ## NOTE: this snapshot has no holds, eligable for pruning if not snap_holds: snapshots['_prunable'].append(snap) ## NOTE: update snapshots based on holds (if any) ## we are only interested in the ones from our schedule ## if we find any others we skip them for hold in snap_holds: if hold in snapshots['_schedule'].keys(): snapshots[hold].append(snap) return snapshots
[ "def", "_schedule_snapshot_retrieve", "(", "dataset", ",", "prefix", ",", "snapshots", ")", ":", "## NOTE: retrieve all snapshots for the dataset", "for", "snap", "in", "sorted", "(", "__salt__", "[", "'zfs.list'", "]", "(", "dataset", ",", "*", "*", "{", "'recursive'", ":", "True", ",", "'depth'", ":", "1", ",", "'type'", ":", "'snapshot'", "}", ")", ".", "keys", "(", ")", ")", ":", "## NOTE: we only want the actualy name", "## myzpool/data@zbck-20171201_000248 -> zbck-20171201_000248", "snap_name", "=", "snap", "[", "snap", ".", "index", "(", "'@'", ")", "+", "1", ":", "]", "## NOTE: we only want snapshots matching our prefix", "if", "not", "snap_name", ".", "startswith", "(", "'{0}-'", ".", "format", "(", "prefix", ")", ")", ":", "continue", "## NOTE: retrieve the holds for this snapshot", "snap_holds", "=", "__salt__", "[", "'zfs.holds'", "]", "(", "snap", ")", "## NOTE: this snapshot has no holds, eligable for pruning", "if", "not", "snap_holds", ":", "snapshots", "[", "'_prunable'", "]", ".", "append", "(", "snap", ")", "## NOTE: update snapshots based on holds (if any)", "## we are only interested in the ones from our schedule", "## if we find any others we skip them", "for", "hold", "in", "snap_holds", ":", "if", "hold", "in", "snapshots", "[", "'_schedule'", "]", ".", "keys", "(", ")", ":", "snapshots", "[", "hold", "]", ".", "append", "(", "snap", ")", "return", "snapshots" ]
Update snapshots dict with current snapshots dataset: string name of filesystem or volume prefix : string prefix for the snapshots e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm' snapshots : OrderedDict preseeded OrderedDict with configuration
[ "Update", "snapshots", "dict", "with", "current", "snapshots" ]
python
train
37.184211
jleclanche/fireplace
fireplace/game.py
https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/game.py#L193-L201
def queue_actions(self, source, actions, event_args=None): """ Queue a list of \a actions for processing from \a source. Triggers an aura refresh afterwards. """ source.event_args = event_args ret = self.trigger_actions(source, actions) source.event_args = None return ret
[ "def", "queue_actions", "(", "self", ",", "source", ",", "actions", ",", "event_args", "=", "None", ")", ":", "source", ".", "event_args", "=", "event_args", "ret", "=", "self", ".", "trigger_actions", "(", "source", ",", "actions", ")", "source", ".", "event_args", "=", "None", "return", "ret" ]
Queue a list of \a actions for processing from \a source. Triggers an aura refresh afterwards.
[ "Queue", "a", "list", "of", "\\", "a", "actions", "for", "processing", "from", "\\", "a", "source", ".", "Triggers", "an", "aura", "refresh", "afterwards", "." ]
python
train
31.111111
pip-services/pip-services-commons-python
pip_services_commons/commands/InterceptedCommand.py
https://github.com/pip-services/pip-services-commons-python/blob/2205b18c45c60372966c62c1f23ac4fbc31e11b3/pip_services_commons/commands/InterceptedCommand.py#L42-L55
def execute(self, correlation_id, args): """ Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: MicroserviceError: when execution fails for whatever reason. """ return self._intercepter.execute(_next, correlation_id, args)
[ "def", "execute", "(", "self", ",", "correlation_id", ",", "args", ")", ":", "return", "self", ".", "_intercepter", ".", "execute", "(", "_next", ",", "correlation_id", ",", "args", ")" ]
Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: MicroserviceError: when execution fails for whatever reason.
[ "Executes", "the", "command", "given", "specific", "arguments", "as", "an", "input", ".", "Args", ":", "correlation_id", ":", "a", "unique", "correlation", "/", "transaction", "id", "args", ":", "command", "arguments", "Returns", ":", "an", "execution", "result", ".", "Raises", ":", "MicroserviceError", ":", "when", "execution", "fails", "for", "whatever", "reason", "." ]
python
valid
33.5
pgmpy/pgmpy
pgmpy/models/LinearGaussianBayesianNetwork.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/LinearGaussianBayesianNetwork.py#L129-L189
def to_joint_gaussian(self): """ The linear Gaussian Bayesian Networks are an alternative representation for the class of multivariate Gaussian distributions. This method returns an equivalent joint Gaussian distribution. Returns ------- GaussianDistribution: An equivalent joint Gaussian distribution for the network. Reference --------- Section 7.2, Example 7.3, Probabilistic Graphical Models, Principles and Techniques Examples -------- >>> from pgmpy.models import LinearGaussianBayesianNetwork >>> from pgmpy.factors.continuous import LinearGaussianCPD >>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')]) >>> cpd1 = LinearGaussianCPD('x1', [1], 4) >>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1']) >>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2']) >>> model.add_cpds(cpd1, cpd2, cpd3) >>> jgd = model.to_joint_gaussian() >>> jgd.variables ['x1', 'x2', 'x3'] >>> jgd.mean array([[ 1. ], [-4.5], [ 8.5]]) >>> jgd.covariance array([[ 4., 2., -2.], [ 2., 5., -5.], [-2., -5., 8.]]) """ variables = nx.topological_sort(self) mean = np.zeros(len(variables)) covariance = np.zeros((len(variables), len(variables))) for node_idx in range(len(variables)): cpd = self.get_cpds(variables[node_idx]) mean[node_idx] = sum([coeff * mean[variables.index(parent)] for coeff, parent in zip(cpd.beta_vector, cpd.evidence)]) + cpd.beta_0 covariance[node_idx, node_idx] = sum( [coeff * coeff * covariance[variables.index(parent), variables.index(parent)] for coeff, parent in zip(cpd.beta_vector, cpd.evidence)]) + cpd.variance for node_i_idx in range(len(variables)): for node_j_idx in range(len(variables)): if covariance[node_j_idx, node_i_idx] != 0: covariance[node_i_idx, node_j_idx] = covariance[node_j_idx, node_i_idx] else: cpd_j = self.get_cpds(variables[node_j_idx]) covariance[node_i_idx, node_j_idx] = sum( [coeff * covariance[node_i_idx, variables.index(parent)] for coeff, parent in zip(cpd_j.beta_vector, cpd_j.evidence)]) return GaussianDistribution(variables, mean, covariance)
[ "def", "to_joint_gaussian", "(", "self", ")", ":", "variables", "=", "nx", ".", "topological_sort", "(", "self", ")", "mean", "=", "np", ".", "zeros", "(", "len", "(", "variables", ")", ")", "covariance", "=", "np", ".", "zeros", "(", "(", "len", "(", "variables", ")", ",", "len", "(", "variables", ")", ")", ")", "for", "node_idx", "in", "range", "(", "len", "(", "variables", ")", ")", ":", "cpd", "=", "self", ".", "get_cpds", "(", "variables", "[", "node_idx", "]", ")", "mean", "[", "node_idx", "]", "=", "sum", "(", "[", "coeff", "*", "mean", "[", "variables", ".", "index", "(", "parent", ")", "]", "for", "coeff", ",", "parent", "in", "zip", "(", "cpd", ".", "beta_vector", ",", "cpd", ".", "evidence", ")", "]", ")", "+", "cpd", ".", "beta_0", "covariance", "[", "node_idx", ",", "node_idx", "]", "=", "sum", "(", "[", "coeff", "*", "coeff", "*", "covariance", "[", "variables", ".", "index", "(", "parent", ")", ",", "variables", ".", "index", "(", "parent", ")", "]", "for", "coeff", ",", "parent", "in", "zip", "(", "cpd", ".", "beta_vector", ",", "cpd", ".", "evidence", ")", "]", ")", "+", "cpd", ".", "variance", "for", "node_i_idx", "in", "range", "(", "len", "(", "variables", ")", ")", ":", "for", "node_j_idx", "in", "range", "(", "len", "(", "variables", ")", ")", ":", "if", "covariance", "[", "node_j_idx", ",", "node_i_idx", "]", "!=", "0", ":", "covariance", "[", "node_i_idx", ",", "node_j_idx", "]", "=", "covariance", "[", "node_j_idx", ",", "node_i_idx", "]", "else", ":", "cpd_j", "=", "self", ".", "get_cpds", "(", "variables", "[", "node_j_idx", "]", ")", "covariance", "[", "node_i_idx", ",", "node_j_idx", "]", "=", "sum", "(", "[", "coeff", "*", "covariance", "[", "node_i_idx", ",", "variables", ".", "index", "(", "parent", ")", "]", "for", "coeff", ",", "parent", "in", "zip", "(", "cpd_j", ".", "beta_vector", ",", "cpd_j", ".", "evidence", ")", "]", ")", "return", "GaussianDistribution", "(", "variables", ",", "mean", ",", "covariance", ")" ]
The linear Gaussian Bayesian Networks are an alternative representation for the class of multivariate Gaussian distributions. This method returns an equivalent joint Gaussian distribution. Returns ------- GaussianDistribution: An equivalent joint Gaussian distribution for the network. Reference --------- Section 7.2, Example 7.3, Probabilistic Graphical Models, Principles and Techniques Examples -------- >>> from pgmpy.models import LinearGaussianBayesianNetwork >>> from pgmpy.factors.continuous import LinearGaussianCPD >>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')]) >>> cpd1 = LinearGaussianCPD('x1', [1], 4) >>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1']) >>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2']) >>> model.add_cpds(cpd1, cpd2, cpd3) >>> jgd = model.to_joint_gaussian() >>> jgd.variables ['x1', 'x2', 'x3'] >>> jgd.mean array([[ 1. ], [-4.5], [ 8.5]]) >>> jgd.covariance array([[ 4., 2., -2.], [ 2., 5., -5.], [-2., -5., 8.]])
[ "The", "linear", "Gaussian", "Bayesian", "Networks", "are", "an", "alternative", "representation", "for", "the", "class", "of", "multivariate", "Gaussian", "distributions", ".", "This", "method", "returns", "an", "equivalent", "joint", "Gaussian", "distribution", "." ]
python
train
42.377049
saltstack/salt
salt/cli/support/console.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/console.py#L30-L43
def put(self, message, indent=0): ''' Print message with an indent. :param message: :param indent: :return: ''' color = self._colors_conf.get(indent + indent % 2, self._colors_conf.get(0, self._default_color)) for chunk in [' ' * indent, self._colors[color], message, self._colors['ENDC']]: self._device.write(str(chunk)) self._device.write(os.linesep) self._device.flush()
[ "def", "put", "(", "self", ",", "message", ",", "indent", "=", "0", ")", ":", "color", "=", "self", ".", "_colors_conf", ".", "get", "(", "indent", "+", "indent", "%", "2", ",", "self", ".", "_colors_conf", ".", "get", "(", "0", ",", "self", ".", "_default_color", ")", ")", "for", "chunk", "in", "[", "' '", "*", "indent", ",", "self", ".", "_colors", "[", "color", "]", ",", "message", ",", "self", ".", "_colors", "[", "'ENDC'", "]", "]", ":", "self", ".", "_device", ".", "write", "(", "str", "(", "chunk", ")", ")", "self", ".", "_device", ".", "write", "(", "os", ".", "linesep", ")", "self", ".", "_device", ".", "flush", "(", ")" ]
Print message with an indent. :param message: :param indent: :return:
[ "Print", "message", "with", "an", "indent", "." ]
python
train
32.428571
dereneaton/ipyrad
ipyrad/assemble/refmap.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/refmap.py#L116-L324
def mapreads(data, sample, nthreads, force): """ Attempt to map reads to reference sequence. This reads in the fasta files (samples.files.edits), and maps each read to the reference. Unmapped reads are dropped right back in the de novo pipeline. Reads that map successfully are processed and pushed downstream and joined with the rest of the data post muscle_align. Mapped reads end up in a sam file. """ LOGGER.info("Entering mapreads(): %s %s", sample.name, nthreads) ## This is the input derep file, for paired data we need to split the data, ## and so we will make sample.files.dereps == [derep1, derep2], but for ## SE data we can simply use sample.files.derep == [derepfile]. derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq") sample.files.dereps = [derepfile] ## This is the final output files containing merged/concat derep'd refmap'd ## reads that did not match to the reference. They will be back in ## merge/concat (--nnnnn--) format ready to be input to vsearch, if needed. mumapfile = sample.files.unmapped_reads umap1file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap1.fastq") umap2file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap2.fastq") ## split the derepfile into the two handles we designate if "pair" in data.paramsdict["datatype"]: sample.files.split1 = os.path.join(data.dirs.edits, sample.name+"-split1.fastq") sample.files.split2 = os.path.join(data.dirs.edits, sample.name+"-split2.fastq") sample.files.dereps = [sample.files.split1, sample.files.split2] split_merged_reads(sample.files.dereps, derepfile) ## (cmd1) smalt <task> [TASK_OPTIONS] [<index_name> <file_name_A> [<file_name_B>]] ## -f sam : Output as sam format, tried :clip: to hard mask output ## but it shreds the unmapped reads (outputs empty fq) ## -l [pe,mp,pp]: If paired end select the orientation of each read ## -n # : Number of threads to use ## -x : Perform a more exhaustive search ## -y # : proportion matched to reference (sequence similarity) ## -o : output file ## : Reference sequence ## : Input file(s), in a list. One for R1 and one for R2 ## -c # : proportion of the query read length that must be covered ## (cmd1) bwa mem [OPTIONS] <index_name> <file_name_A> [<file_name_B>] > <output_file> ## -t # : Number of threads ## -M : Mark split alignments as secondary. ## (cmd2) samtools view [options] <in.bam>|<in.sam>|<in.cram> [region ...] ## -b = write to .bam ## -q = Only keep reads with mapq score >= 30 (seems to be pretty standard) ## -F = Select all reads that DON'T have these flags. ## 0x4 (segment unmapped) ## 0x100 (Secondary alignment) ## 0x800 (supplementary alignment) ## -U = Write out all reads that don't pass the -F filter ## (all unmapped reads go to this file). ## TODO: Should eventually add `-q 13` to filter low confidence mapping. ## If you do this it will throw away some fraction of reads. Ideally you'd ## catch these and throw them in with the rest of the unmapped reads, but ## I can't think of a straightforward way of doing that. There should be ## a `-Q` flag to only keep reads below the threshold, but i realize that ## would be of limited use besides for me. ## (cmd3) samtools sort [options...] [in.bam] ## -T = Temporary file name, this is required by samtools, ignore it ## Here we hack it to be samhandle.tmp cuz samtools cleans it up ## -O = Output file format, in this case bam ## -o = Output file name if "smalt" in data._hackersonly["aligner"]: ## The output SAM data is written to file (-o) ## input is either (derep) or (derep-split1, derep-split2) cmd1 = [ipyrad.bins.smalt, "map", "-f", "sam", "-n", str(max(1, nthreads)), "-y", str(data.paramsdict['clust_threshold']), "-o", os.path.join(data.dirs.refmapping, sample.name+".sam"), "-x", data.paramsdict['reference_sequence'] ] + sample.files.dereps cmd1_stdout = sps.PIPE cmd1_stderr = sps.STDOUT else: cmd1 = [ipyrad.bins.bwa, "mem", "-t", str(max(1, nthreads)), "-M", data.paramsdict['reference_sequence'] ] + sample.files.dereps ## Insert optional flags for bwa try: bwa_args = data._hackersonly["bwa_args"].split() bwa_args.reverse() for arg in bwa_args: cmd1.insert(2, arg) except KeyError: ## Do nothing pass cmd1_stdout = open(os.path.join(data.dirs.refmapping, sample.name+".sam"), 'w') cmd1_stderr = None ## Reads in the SAM file from cmd1. It writes the unmapped data to file ## and it pipes the mapped data to be used in cmd3 cmd2 = [ipyrad.bins.samtools, "view", "-b", ## TODO: This introduces a bug with PE right now. Think about the case where ## R1 has low qual mapping and R2 has high. You get different numbers ## of reads in the unmapped tmp files. FML. #"-q", "30", "-F", "0x904", "-U", os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam"), os.path.join(data.dirs.refmapping, sample.name+".sam")] ## this is gonna catch mapped bam output from cmd2 and write to file cmd3 = [ipyrad.bins.samtools, "sort", "-T", os.path.join(data.dirs.refmapping, sample.name+".sam.tmp"), "-O", "bam", "-o", sample.files.mapped_reads] ## TODO: Unnecessary? ## this is gonna read the sorted BAM file and index it. only for pileup? cmd4 = [ipyrad.bins.samtools, "index", sample.files.mapped_reads] ## this is gonna read in the unmapped files, args are added below, ## and it will output fastq formatted unmapped reads for merging. ## -v 45 sets the default qscore arbitrarily high cmd5 = [ipyrad.bins.samtools, "bam2fq", "-v 45", os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam")] ## Insert additional arguments for paired data to the commands. ## We assume Illumina paired end reads for the orientation ## of mate pairs (orientation: ---> <----). if 'pair' in data.paramsdict["datatype"]: if "smalt" in data._hackersonly["aligner"]: ## add paired flag (-l pe) to cmd1 right after (smalt map ...) cmd1.insert(2, "pe") cmd1.insert(2, "-l") else: ## No special PE flags for bwa pass ## add samtools filter for only keep if both pairs hit ## 0x1 - Read is paired ## 0x2 - Each read properly aligned cmd2.insert(2, "0x3") cmd2.insert(2, "-f") ## tell bam2fq that there are output files for each read pair cmd5.insert(2, umap1file) cmd5.insert(2, "-1") cmd5.insert(2, umap2file) cmd5.insert(2, "-2") else: cmd5.insert(2, mumapfile) cmd5.insert(2, "-0") ## Running cmd1 creates ref_mapping/sname.sam, LOGGER.debug(" ".join(cmd1)) proc1 = sps.Popen(cmd1, stderr=cmd1_stderr, stdout=cmd1_stdout) ## This is really long running job so we wrap it to ensure it dies. try: error1 = proc1.communicate()[0] except KeyboardInterrupt: proc1.kill() ## raise error if one occurred in smalt if proc1.returncode: raise IPyradWarningExit(error1) ## Running cmd2 writes to ref_mapping/sname.unmapped.bam, and ## fills the pipe with mapped BAM data LOGGER.debug(" ".join(cmd2)) proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE) ## Running cmd3 pulls mapped BAM from pipe and writes to ## ref_mapping/sname.mapped-sorted.bam. ## Because proc2 pipes to proc3 we just communicate this to run both. LOGGER.debug(" ".join(cmd3)) proc3 = sps.Popen(cmd3, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc2.stdout) error3 = proc3.communicate()[0] if proc3.returncode: raise IPyradWarningExit(error3) proc2.stdout.close() ## Later we're gonna use samtools to grab out regions using 'view', and to ## do that we need it to be indexed. Let's index it now. LOGGER.debug(" ".join(cmd4)) proc4 = sps.Popen(cmd4, stderr=sps.STDOUT, stdout=sps.PIPE) error4 = proc4.communicate()[0] if proc4.returncode: raise IPyradWarningExit(error4) ## Running cmd5 writes to either edits/sname-refmap_derep.fastq for SE ## or it makes edits/sname-tmp-umap{12}.fastq for paired data, which ## will then need to be merged. LOGGER.debug(" ".join(cmd5)) proc5 = sps.Popen(cmd5, stderr=sps.STDOUT, stdout=sps.PIPE) error5 = proc5.communicate()[0] if proc5.returncode: raise IPyradWarningExit(error5) ## Finally, merge the unmapped reads, which is what cluster() ## expects. If SE, just rename the outfile. In the end ## <sample>-refmap_derep.fq will be the final output if 'pair' in data.paramsdict["datatype"]: LOGGER.info("Merging unmapped reads {} {}".format(umap1file, umap2file)) merge_pairs_after_refmapping(data, [(umap1file, umap2file)], mumapfile)
[ "def", "mapreads", "(", "data", ",", "sample", ",", "nthreads", ",", "force", ")", ":", "LOGGER", ".", "info", "(", "\"Entering mapreads(): %s %s\"", ",", "sample", ".", "name", ",", "nthreads", ")", "## This is the input derep file, for paired data we need to split the data, ", "## and so we will make sample.files.dereps == [derep1, derep2], but for ", "## SE data we can simply use sample.files.derep == [derepfile].", "derepfile", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "edits", ",", "sample", ".", "name", "+", "\"_derep.fastq\"", ")", "sample", ".", "files", ".", "dereps", "=", "[", "derepfile", "]", "## This is the final output files containing merged/concat derep'd refmap'd ", "## reads that did not match to the reference. They will be back in ", "## merge/concat (--nnnnn--) format ready to be input to vsearch, if needed. ", "mumapfile", "=", "sample", ".", "files", ".", "unmapped_reads", "umap1file", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "edits", ",", "sample", ".", "name", "+", "\"-tmp-umap1.fastq\"", ")", "umap2file", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "edits", ",", "sample", ".", "name", "+", "\"-tmp-umap2.fastq\"", ")", "## split the derepfile into the two handles we designate", "if", "\"pair\"", "in", "data", ".", "paramsdict", "[", "\"datatype\"", "]", ":", "sample", ".", "files", ".", "split1", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "edits", ",", "sample", ".", "name", "+", "\"-split1.fastq\"", ")", "sample", ".", "files", ".", "split2", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "edits", ",", "sample", ".", "name", "+", "\"-split2.fastq\"", ")", "sample", ".", "files", ".", "dereps", "=", "[", "sample", ".", "files", ".", "split1", ",", "sample", ".", "files", ".", "split2", "]", "split_merged_reads", "(", "sample", ".", "files", ".", "dereps", ",", "derepfile", ")", "## (cmd1) smalt <task> [TASK_OPTIONS] [<index_name> <file_name_A> [<file_name_B>]]", "## -f sam : Output as sam format, tried :clip: to hard mask output ", "## but it shreds the unmapped reads (outputs empty fq)", "## -l [pe,mp,pp]: If paired end select the orientation of each read", "## -n # : Number of threads to use", "## -x : Perform a more exhaustive search", "## -y # : proportion matched to reference (sequence similarity)", "## -o : output file", "## : Reference sequence", "## : Input file(s), in a list. One for R1 and one for R2", "## -c # : proportion of the query read length that must be covered", "## (cmd1) bwa mem [OPTIONS] <index_name> <file_name_A> [<file_name_B>] > <output_file>", "## -t # : Number of threads", "## -M : Mark split alignments as secondary.", "## (cmd2) samtools view [options] <in.bam>|<in.sam>|<in.cram> [region ...] ", "## -b = write to .bam", "## -q = Only keep reads with mapq score >= 30 (seems to be pretty standard)", "## -F = Select all reads that DON'T have these flags. ", "## 0x4 (segment unmapped)", "## 0x100 (Secondary alignment)", "## 0x800 (supplementary alignment)", "## -U = Write out all reads that don't pass the -F filter ", "## (all unmapped reads go to this file).", "## TODO: Should eventually add `-q 13` to filter low confidence mapping.", "## If you do this it will throw away some fraction of reads. Ideally you'd", "## catch these and throw them in with the rest of the unmapped reads, but", "## I can't think of a straightforward way of doing that. There should be ", "## a `-Q` flag to only keep reads below the threshold, but i realize that", "## would be of limited use besides for me.", "## (cmd3) samtools sort [options...] [in.bam]", "## -T = Temporary file name, this is required by samtools, ignore it", "## Here we hack it to be samhandle.tmp cuz samtools cleans it up", "## -O = Output file format, in this case bam", "## -o = Output file name", "if", "\"smalt\"", "in", "data", ".", "_hackersonly", "[", "\"aligner\"", "]", ":", "## The output SAM data is written to file (-o)", "## input is either (derep) or (derep-split1, derep-split2)", "cmd1", "=", "[", "ipyrad", ".", "bins", ".", "smalt", ",", "\"map\"", ",", "\"-f\"", ",", "\"sam\"", ",", "\"-n\"", ",", "str", "(", "max", "(", "1", ",", "nthreads", ")", ")", ",", "\"-y\"", ",", "str", "(", "data", ".", "paramsdict", "[", "'clust_threshold'", "]", ")", ",", "\"-o\"", ",", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "refmapping", ",", "sample", ".", "name", "+", "\".sam\"", ")", ",", "\"-x\"", ",", "data", ".", "paramsdict", "[", "'reference_sequence'", "]", "]", "+", "sample", ".", "files", ".", "dereps", "cmd1_stdout", "=", "sps", ".", "PIPE", "cmd1_stderr", "=", "sps", ".", "STDOUT", "else", ":", "cmd1", "=", "[", "ipyrad", ".", "bins", ".", "bwa", ",", "\"mem\"", ",", "\"-t\"", ",", "str", "(", "max", "(", "1", ",", "nthreads", ")", ")", ",", "\"-M\"", ",", "data", ".", "paramsdict", "[", "'reference_sequence'", "]", "]", "+", "sample", ".", "files", ".", "dereps", "## Insert optional flags for bwa", "try", ":", "bwa_args", "=", "data", ".", "_hackersonly", "[", "\"bwa_args\"", "]", ".", "split", "(", ")", "bwa_args", ".", "reverse", "(", ")", "for", "arg", "in", "bwa_args", ":", "cmd1", ".", "insert", "(", "2", ",", "arg", ")", "except", "KeyError", ":", "## Do nothing", "pass", "cmd1_stdout", "=", "open", "(", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "refmapping", ",", "sample", ".", "name", "+", "\".sam\"", ")", ",", "'w'", ")", "cmd1_stderr", "=", "None", "## Reads in the SAM file from cmd1. It writes the unmapped data to file", "## and it pipes the mapped data to be used in cmd3", "cmd2", "=", "[", "ipyrad", ".", "bins", ".", "samtools", ",", "\"view\"", ",", "\"-b\"", ",", "## TODO: This introduces a bug with PE right now. Think about the case where", "## R1 has low qual mapping and R2 has high. You get different numbers", "## of reads in the unmapped tmp files. FML.", "#\"-q\", \"30\",", "\"-F\"", ",", "\"0x904\"", ",", "\"-U\"", ",", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "refmapping", ",", "sample", ".", "name", "+", "\"-unmapped.bam\"", ")", ",", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "refmapping", ",", "sample", ".", "name", "+", "\".sam\"", ")", "]", "## this is gonna catch mapped bam output from cmd2 and write to file", "cmd3", "=", "[", "ipyrad", ".", "bins", ".", "samtools", ",", "\"sort\"", ",", "\"-T\"", ",", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "refmapping", ",", "sample", ".", "name", "+", "\".sam.tmp\"", ")", ",", "\"-O\"", ",", "\"bam\"", ",", "\"-o\"", ",", "sample", ".", "files", ".", "mapped_reads", "]", "## TODO: Unnecessary?", "## this is gonna read the sorted BAM file and index it. only for pileup?", "cmd4", "=", "[", "ipyrad", ".", "bins", ".", "samtools", ",", "\"index\"", ",", "sample", ".", "files", ".", "mapped_reads", "]", "## this is gonna read in the unmapped files, args are added below, ", "## and it will output fastq formatted unmapped reads for merging.", "## -v 45 sets the default qscore arbitrarily high", "cmd5", "=", "[", "ipyrad", ".", "bins", ".", "samtools", ",", "\"bam2fq\"", ",", "\"-v 45\"", ",", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "refmapping", ",", "sample", ".", "name", "+", "\"-unmapped.bam\"", ")", "]", "## Insert additional arguments for paired data to the commands.", "## We assume Illumina paired end reads for the orientation ", "## of mate pairs (orientation: ---> <----). ", "if", "'pair'", "in", "data", ".", "paramsdict", "[", "\"datatype\"", "]", ":", "if", "\"smalt\"", "in", "data", ".", "_hackersonly", "[", "\"aligner\"", "]", ":", "## add paired flag (-l pe) to cmd1 right after (smalt map ...)", "cmd1", ".", "insert", "(", "2", ",", "\"pe\"", ")", "cmd1", ".", "insert", "(", "2", ",", "\"-l\"", ")", "else", ":", "## No special PE flags for bwa", "pass", "## add samtools filter for only keep if both pairs hit", "## 0x1 - Read is paired", "## 0x2 - Each read properly aligned", "cmd2", ".", "insert", "(", "2", ",", "\"0x3\"", ")", "cmd2", ".", "insert", "(", "2", ",", "\"-f\"", ")", "## tell bam2fq that there are output files for each read pair", "cmd5", ".", "insert", "(", "2", ",", "umap1file", ")", "cmd5", ".", "insert", "(", "2", ",", "\"-1\"", ")", "cmd5", ".", "insert", "(", "2", ",", "umap2file", ")", "cmd5", ".", "insert", "(", "2", ",", "\"-2\"", ")", "else", ":", "cmd5", ".", "insert", "(", "2", ",", "mumapfile", ")", "cmd5", ".", "insert", "(", "2", ",", "\"-0\"", ")", "## Running cmd1 creates ref_mapping/sname.sam, ", "LOGGER", ".", "debug", "(", "\" \"", ".", "join", "(", "cmd1", ")", ")", "proc1", "=", "sps", ".", "Popen", "(", "cmd1", ",", "stderr", "=", "cmd1_stderr", ",", "stdout", "=", "cmd1_stdout", ")", "## This is really long running job so we wrap it to ensure it dies. ", "try", ":", "error1", "=", "proc1", ".", "communicate", "(", ")", "[", "0", "]", "except", "KeyboardInterrupt", ":", "proc1", ".", "kill", "(", ")", "## raise error if one occurred in smalt", "if", "proc1", ".", "returncode", ":", "raise", "IPyradWarningExit", "(", "error1", ")", "## Running cmd2 writes to ref_mapping/sname.unmapped.bam, and ", "## fills the pipe with mapped BAM data", "LOGGER", ".", "debug", "(", "\" \"", ".", "join", "(", "cmd2", ")", ")", "proc2", "=", "sps", ".", "Popen", "(", "cmd2", ",", "stderr", "=", "sps", ".", "STDOUT", ",", "stdout", "=", "sps", ".", "PIPE", ")", "## Running cmd3 pulls mapped BAM from pipe and writes to ", "## ref_mapping/sname.mapped-sorted.bam. ", "## Because proc2 pipes to proc3 we just communicate this to run both.", "LOGGER", ".", "debug", "(", "\" \"", ".", "join", "(", "cmd3", ")", ")", "proc3", "=", "sps", ".", "Popen", "(", "cmd3", ",", "stderr", "=", "sps", ".", "STDOUT", ",", "stdout", "=", "sps", ".", "PIPE", ",", "stdin", "=", "proc2", ".", "stdout", ")", "error3", "=", "proc3", ".", "communicate", "(", ")", "[", "0", "]", "if", "proc3", ".", "returncode", ":", "raise", "IPyradWarningExit", "(", "error3", ")", "proc2", ".", "stdout", ".", "close", "(", ")", "## Later we're gonna use samtools to grab out regions using 'view', and to", "## do that we need it to be indexed. Let's index it now. ", "LOGGER", ".", "debug", "(", "\" \"", ".", "join", "(", "cmd4", ")", ")", "proc4", "=", "sps", ".", "Popen", "(", "cmd4", ",", "stderr", "=", "sps", ".", "STDOUT", ",", "stdout", "=", "sps", ".", "PIPE", ")", "error4", "=", "proc4", ".", "communicate", "(", ")", "[", "0", "]", "if", "proc4", ".", "returncode", ":", "raise", "IPyradWarningExit", "(", "error4", ")", "## Running cmd5 writes to either edits/sname-refmap_derep.fastq for SE", "## or it makes edits/sname-tmp-umap{12}.fastq for paired data, which ", "## will then need to be merged.", "LOGGER", ".", "debug", "(", "\" \"", ".", "join", "(", "cmd5", ")", ")", "proc5", "=", "sps", ".", "Popen", "(", "cmd5", ",", "stderr", "=", "sps", ".", "STDOUT", ",", "stdout", "=", "sps", ".", "PIPE", ")", "error5", "=", "proc5", ".", "communicate", "(", ")", "[", "0", "]", "if", "proc5", ".", "returncode", ":", "raise", "IPyradWarningExit", "(", "error5", ")", "## Finally, merge the unmapped reads, which is what cluster()", "## expects. If SE, just rename the outfile. In the end", "## <sample>-refmap_derep.fq will be the final output", "if", "'pair'", "in", "data", ".", "paramsdict", "[", "\"datatype\"", "]", ":", "LOGGER", ".", "info", "(", "\"Merging unmapped reads {} {}\"", ".", "format", "(", "umap1file", ",", "umap2file", ")", ")", "merge_pairs_after_refmapping", "(", "data", ",", "[", "(", "umap1file", ",", "umap2file", ")", "]", ",", "mumapfile", ")" ]
Attempt to map reads to reference sequence. This reads in the fasta files (samples.files.edits), and maps each read to the reference. Unmapped reads are dropped right back in the de novo pipeline. Reads that map successfully are processed and pushed downstream and joined with the rest of the data post muscle_align. Mapped reads end up in a sam file.
[ "Attempt", "to", "map", "reads", "to", "reference", "sequence", ".", "This", "reads", "in", "the", "fasta", "files", "(", "samples", ".", "files", ".", "edits", ")", "and", "maps", "each", "read", "to", "the", "reference", ".", "Unmapped", "reads", "are", "dropped", "right", "back", "in", "the", "de", "novo", "pipeline", ".", "Reads", "that", "map", "successfully", "are", "processed", "and", "pushed", "downstream", "and", "joined", "with", "the", "rest", "of", "the", "data", "post", "muscle_align", "." ]
python
valid
44.966507
cuihantao/andes
andes/utils/solver.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/utils/solver.py#L17-L38
def symbolic(self, A): """ Return the symbolic factorization of sparse matrix ``A`` Parameters ---------- sparselib Library name in ``umfpack`` and ``klu`` A Sparse matrix Returns symbolic factorization ------- """ if self.sparselib == 'umfpack': return umfpack.symbolic(A) elif self.sparselib == 'klu': return klu.symbolic(A)
[ "def", "symbolic", "(", "self", ",", "A", ")", ":", "if", "self", ".", "sparselib", "==", "'umfpack'", ":", "return", "umfpack", ".", "symbolic", "(", "A", ")", "elif", "self", ".", "sparselib", "==", "'klu'", ":", "return", "klu", ".", "symbolic", "(", "A", ")" ]
Return the symbolic factorization of sparse matrix ``A`` Parameters ---------- sparselib Library name in ``umfpack`` and ``klu`` A Sparse matrix Returns symbolic factorization -------
[ "Return", "the", "symbolic", "factorization", "of", "sparse", "matrix", "A" ]
python
train
20.636364
materialsproject/pymatgen
pymatgen/analysis/graphs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/graphs.py#L2272-L2320
def find_rings(self, including=None): """ Find ring structures in the MoleculeGraph. :param including: list of site indices. If including is not None, then find_rings will only return those rings including the specified sites. By default, this parameter is None, and all rings will be returned. :return: dict {index:cycle}. Each entry will be a ring (cycle, in graph theory terms) including the index found in the Molecule. If there is no cycle including an index, the value will be an empty list. """ # Copies self.graph such that all edges (u, v) matched by edges (v, u) undirected = self.graph.to_undirected() directed = undirected.to_directed() cycles_nodes = [] cycles_edges = [] # Remove all two-edge cycles all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2] # Using to_directed() will mean that each cycle always appears twice # So, we must also remove duplicates unique_sorted = [] unique_cycles = [] for cycle in all_cycles: if sorted(cycle) not in unique_sorted: unique_sorted.append(sorted(cycle)) unique_cycles.append(cycle) if including is None: cycles_nodes = unique_cycles else: for i in including: for cycle in unique_cycles: if i in cycle and cycle not in cycles_nodes: cycles_nodes.append(cycle) for cycle in cycles_nodes: edges = [] for i, e in enumerate(cycle): edges.append((cycle[i-1], e)) cycles_edges.append(edges) return cycles_edges
[ "def", "find_rings", "(", "self", ",", "including", "=", "None", ")", ":", "# Copies self.graph such that all edges (u, v) matched by edges (v, u)", "undirected", "=", "self", ".", "graph", ".", "to_undirected", "(", ")", "directed", "=", "undirected", ".", "to_directed", "(", ")", "cycles_nodes", "=", "[", "]", "cycles_edges", "=", "[", "]", "# Remove all two-edge cycles", "all_cycles", "=", "[", "c", "for", "c", "in", "nx", ".", "simple_cycles", "(", "directed", ")", "if", "len", "(", "c", ")", ">", "2", "]", "# Using to_directed() will mean that each cycle always appears twice", "# So, we must also remove duplicates", "unique_sorted", "=", "[", "]", "unique_cycles", "=", "[", "]", "for", "cycle", "in", "all_cycles", ":", "if", "sorted", "(", "cycle", ")", "not", "in", "unique_sorted", ":", "unique_sorted", ".", "append", "(", "sorted", "(", "cycle", ")", ")", "unique_cycles", ".", "append", "(", "cycle", ")", "if", "including", "is", "None", ":", "cycles_nodes", "=", "unique_cycles", "else", ":", "for", "i", "in", "including", ":", "for", "cycle", "in", "unique_cycles", ":", "if", "i", "in", "cycle", "and", "cycle", "not", "in", "cycles_nodes", ":", "cycles_nodes", ".", "append", "(", "cycle", ")", "for", "cycle", "in", "cycles_nodes", ":", "edges", "=", "[", "]", "for", "i", ",", "e", "in", "enumerate", "(", "cycle", ")", ":", "edges", ".", "append", "(", "(", "cycle", "[", "i", "-", "1", "]", ",", "e", ")", ")", "cycles_edges", ".", "append", "(", "edges", ")", "return", "cycles_edges" ]
Find ring structures in the MoleculeGraph. :param including: list of site indices. If including is not None, then find_rings will only return those rings including the specified sites. By default, this parameter is None, and all rings will be returned. :return: dict {index:cycle}. Each entry will be a ring (cycle, in graph theory terms) including the index found in the Molecule. If there is no cycle including an index, the value will be an empty list.
[ "Find", "ring", "structures", "in", "the", "MoleculeGraph", "." ]
python
train
35.469388
skggm/skggm
examples/estimator_suite_spark.py
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite_spark.py#L232-L237
def empirical(X): """Compute empirical covariance as baseline estimator. """ print("Empirical") cov = np.dot(X.T, X) / n_samples return cov, np.linalg.inv(cov)
[ "def", "empirical", "(", "X", ")", ":", "print", "(", "\"Empirical\"", ")", "cov", "=", "np", ".", "dot", "(", "X", ".", "T", ",", "X", ")", "/", "n_samples", "return", "cov", ",", "np", ".", "linalg", ".", "inv", "(", "cov", ")" ]
Compute empirical covariance as baseline estimator.
[ "Compute", "empirical", "covariance", "as", "baseline", "estimator", "." ]
python
train
29
numenta/nupic
src/nupic/data/generators/pattern_machine.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/pattern_machine.py#L202-L213
def _generate(self): """ Generates set of consecutive patterns. """ n = self._n w = self._w assert type(w) is int, "List for w not supported" for i in xrange(n / w): pattern = set(xrange(i * w, (i+1) * w)) self._patterns[i] = pattern
[ "def", "_generate", "(", "self", ")", ":", "n", "=", "self", ".", "_n", "w", "=", "self", ".", "_w", "assert", "type", "(", "w", ")", "is", "int", ",", "\"List for w not supported\"", "for", "i", "in", "xrange", "(", "n", "/", "w", ")", ":", "pattern", "=", "set", "(", "xrange", "(", "i", "*", "w", ",", "(", "i", "+", "1", ")", "*", "w", ")", ")", "self", ".", "_patterns", "[", "i", "]", "=", "pattern" ]
Generates set of consecutive patterns.
[ "Generates", "set", "of", "consecutive", "patterns", "." ]
python
valid
22
thomasleese/mo
mo/steps.py
https://github.com/thomasleese/mo/blob/b757f52b42e51ad19c14724ceb7c5db5d52abaea/mo/steps.py#L96-L113
def help(project, task, step, variables): """Run a help step.""" task_name = step.args or variables['task'] try: task = project.find_task(task_name) except NoSuchTaskError as e: yield events.task_not_found(task_name, e.similarities) raise StopTask text = f'# {task.name}\n' text += '\n' text += task.description text += '\n\n' text += 'Variables: {}'.format(', '.join(task.variables)) yield events.help_output(text)
[ "def", "help", "(", "project", ",", "task", ",", "step", ",", "variables", ")", ":", "task_name", "=", "step", ".", "args", "or", "variables", "[", "'task'", "]", "try", ":", "task", "=", "project", ".", "find_task", "(", "task_name", ")", "except", "NoSuchTaskError", "as", "e", ":", "yield", "events", ".", "task_not_found", "(", "task_name", ",", "e", ".", "similarities", ")", "raise", "StopTask", "text", "=", "f'# {task.name}\\n'", "text", "+=", "'\\n'", "text", "+=", "task", ".", "description", "text", "+=", "'\\n\\n'", "text", "+=", "'Variables: {}'", ".", "format", "(", "', '", ".", "join", "(", "task", ".", "variables", ")", ")", "yield", "events", ".", "help_output", "(", "text", ")" ]
Run a help step.
[ "Run", "a", "help", "step", "." ]
python
train
25.888889
saltstack/salt
salt/cloud/clouds/proxmox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L275-L289
def _lookup_proxmox_task(upid): ''' Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed. ''' log.debug('Getting creation status for upid: %s', upid) tasks = query('get', 'cluster/tasks') if tasks: for task in tasks: if task['upid'] == upid: log.debug('Found upid task: %s', task) return task return False
[ "def", "_lookup_proxmox_task", "(", "upid", ")", ":", "log", ".", "debug", "(", "'Getting creation status for upid: %s'", ",", "upid", ")", "tasks", "=", "query", "(", "'get'", ",", "'cluster/tasks'", ")", "if", "tasks", ":", "for", "task", "in", "tasks", ":", "if", "task", "[", "'upid'", "]", "==", "upid", ":", "log", ".", "debug", "(", "'Found upid task: %s'", ",", "task", ")", "return", "task", "return", "False" ]
Retrieve the (latest) logs and retrieve the status for a UPID. This can be used to verify whether a task has completed.
[ "Retrieve", "the", "(", "latest", ")", "logs", "and", "retrieve", "the", "status", "for", "a", "UPID", ".", "This", "can", "be", "used", "to", "verify", "whether", "a", "task", "has", "completed", "." ]
python
train
29.533333
mozilla-iot/webthing-python
webthing/server.py
https://github.com/mozilla-iot/webthing-python/blob/65d467c89ed79d0bbc42b8b3c8f9e5a320edd237/webthing/server.py#L116-L122
def set_default_headers(self, *args, **kwargs): """Set the default headers for all requests.""" self.set_header('Access-Control-Allow-Origin', '*') self.set_header('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept') self.set_header('Access-Control-Allow-Methods', 'GET, HEAD, PUT, POST, DELETE')
[ "def", "set_default_headers", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "set_header", "(", "'Access-Control-Allow-Origin'", ",", "'*'", ")", "self", ".", "set_header", "(", "'Access-Control-Allow-Headers'", ",", "'Origin, X-Requested-With, Content-Type, Accept'", ")", "self", ".", "set_header", "(", "'Access-Control-Allow-Methods'", ",", "'GET, HEAD, PUT, POST, DELETE'", ")" ]
Set the default headers for all requests.
[ "Set", "the", "default", "headers", "for", "all", "requests", "." ]
python
test
57
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/tiger.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/tiger.py#L344-L369
def get_subordinate_clauses(tiger_docgraph): """ given a document graph of a TIGER syntax tree, return all node IDs of nodes representing subordinate clause constituents. Parameters ---------- tiger_docgraph : DiscourseDocumentGraph or TigerDocumentGraph document graph from which subordinate clauses will be extracted Returns ------- subord_clause_nodes : list(str) list of node IDs of nodes directly dominating subordinate clauses """ subord_clause_rels = \ dg.select_edges_by_attribute( tiger_docgraph, attribute='tiger:label', value=['MO', 'RC', 'SB']) subord_clause_nodes = [] for src_id, target_id in subord_clause_rels: src_cat = tiger_docgraph.node[src_id].get('tiger:cat') if src_cat == 'S' and not dg.istoken(tiger_docgraph, target_id): subord_clause_nodes.append(target_id) return subord_clause_nodes
[ "def", "get_subordinate_clauses", "(", "tiger_docgraph", ")", ":", "subord_clause_rels", "=", "dg", ".", "select_edges_by_attribute", "(", "tiger_docgraph", ",", "attribute", "=", "'tiger:label'", ",", "value", "=", "[", "'MO'", ",", "'RC'", ",", "'SB'", "]", ")", "subord_clause_nodes", "=", "[", "]", "for", "src_id", ",", "target_id", "in", "subord_clause_rels", ":", "src_cat", "=", "tiger_docgraph", ".", "node", "[", "src_id", "]", ".", "get", "(", "'tiger:cat'", ")", "if", "src_cat", "==", "'S'", "and", "not", "dg", ".", "istoken", "(", "tiger_docgraph", ",", "target_id", ")", ":", "subord_clause_nodes", ".", "append", "(", "target_id", ")", "return", "subord_clause_nodes" ]
given a document graph of a TIGER syntax tree, return all node IDs of nodes representing subordinate clause constituents. Parameters ---------- tiger_docgraph : DiscourseDocumentGraph or TigerDocumentGraph document graph from which subordinate clauses will be extracted Returns ------- subord_clause_nodes : list(str) list of node IDs of nodes directly dominating subordinate clauses
[ "given", "a", "document", "graph", "of", "a", "TIGER", "syntax", "tree", "return", "all", "node", "IDs", "of", "nodes", "representing", "subordinate", "clause", "constituents", "." ]
python
train
35.423077
srossross/rpmfile
rpmfile/__init__.py
https://github.com/srossross/rpmfile/blob/3ab96f211da7b56f5e99d8cc248f714a6e542d31/rpmfile/__init__.py#L149-L158
def extractfile(self, member): ''' Extract a member from the archive as a file object. `member' may be a filename or an RPMInfo object. The file-like object is read-only and provides the following methods: read(), readline(), readlines(), seek() and tell() ''' if not isinstance(member, RPMInfo): member = self.getmember(member) return _SubFile(self.data_file, member.file_start, member.size)
[ "def", "extractfile", "(", "self", ",", "member", ")", ":", "if", "not", "isinstance", "(", "member", ",", "RPMInfo", ")", ":", "member", "=", "self", ".", "getmember", "(", "member", ")", "return", "_SubFile", "(", "self", ".", "data_file", ",", "member", ".", "file_start", ",", "member", ".", "size", ")" ]
Extract a member from the archive as a file object. `member' may be a filename or an RPMInfo object. The file-like object is read-only and provides the following methods: read(), readline(), readlines(), seek() and tell()
[ "Extract", "a", "member", "from", "the", "archive", "as", "a", "file", "object", ".", "member", "may", "be", "a", "filename", "or", "an", "RPMInfo", "object", ".", "The", "file", "-", "like", "object", "is", "read", "-", "only", "and", "provides", "the", "following", "methods", ":", "read", "()", "readline", "()", "readlines", "()", "seek", "()", "and", "tell", "()" ]
python
train
45.9
helixyte/everest
everest/representers/base.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/representers/base.py#L294-L302
def with_updated_configuration(self, options=None, attribute_options=None): """ Returns a context in which this representer is updated with the given options and attribute options. """ return self._mapping.with_updated_configuration(options=options, attribute_options= attribute_options)
[ "def", "with_updated_configuration", "(", "self", ",", "options", "=", "None", ",", "attribute_options", "=", "None", ")", ":", "return", "self", ".", "_mapping", ".", "with_updated_configuration", "(", "options", "=", "options", ",", "attribute_options", "=", "attribute_options", ")" ]
Returns a context in which this representer is updated with the given options and attribute options.
[ "Returns", "a", "context", "in", "which", "this", "representer", "is", "updated", "with", "the", "given", "options", "and", "attribute", "options", "." ]
python
train
52.222222
pydata/xarray
xarray/core/variable.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/variable.py#L1896-L1927
def copy(self, deep=True, data=None): """Returns a copy of this object. `deep` is ignored since data is stored in the form of pandas.Index, which is already immutable. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Deep is always ignored. data : array_like, optional Data to use in the new object. Must have same shape as original. Returns ------- object : Variable New object with dimensions, attributes, encodings, and optionally data copied from original. """ if data is None: data = self._data else: data = as_compatible_data(data) if self.shape != data.shape: raise ValueError("Data shape {} must match shape of object {}" .format(data.shape, self.shape)) return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
[ "def", "copy", "(", "self", ",", "deep", "=", "True", ",", "data", "=", "None", ")", ":", "if", "data", "is", "None", ":", "data", "=", "self", ".", "_data", "else", ":", "data", "=", "as_compatible_data", "(", "data", ")", "if", "self", ".", "shape", "!=", "data", ".", "shape", ":", "raise", "ValueError", "(", "\"Data shape {} must match shape of object {}\"", ".", "format", "(", "data", ".", "shape", ",", "self", ".", "shape", ")", ")", "return", "type", "(", "self", ")", "(", "self", ".", "dims", ",", "data", ",", "self", ".", "_attrs", ",", "self", ".", "_encoding", ",", "fastpath", "=", "True", ")" ]
Returns a copy of this object. `deep` is ignored since data is stored in the form of pandas.Index, which is already immutable. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Deep is always ignored. data : array_like, optional Data to use in the new object. Must have same shape as original. Returns ------- object : Variable New object with dimensions, attributes, encodings, and optionally data copied from original.
[ "Returns", "a", "copy", "of", "this", "object", "." ]
python
train
36.03125
UCL-INGI/INGInious
inginious/frontend/pages/course.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/course.py#L40-L74
def show_page(self, course): """ Prepares and shows the course page """ username = self.user_manager.session_username() if not self.user_manager.course_is_open_to_user(course, lti=False): return self.template_helper.get_renderer().course_unavailable() else: tasks = course.get_tasks() last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": course.get_id(), "taskid": {"$in": list(tasks.keys())}}) for submission in last_submissions: submission["taskname"] = tasks[submission['taskid']].get_name(self.user_manager.session_language()) tasks_data = {} user_tasks = self.database.user_tasks.find({"username": username, "courseid": course.get_id(), "taskid": {"$in": list(tasks.keys())}}) is_admin = self.user_manager.has_staff_rights_on_course(course, username) tasks_score = [0.0, 0.0] for taskid, task in tasks.items(): tasks_data[taskid] = {"visible": task.get_accessible_time().after_start() or is_admin, "succeeded": False, "grade": 0.0} tasks_score[1] += task.get_grading_weight() if tasks_data[taskid]["visible"] else 0 for user_task in user_tasks: tasks_data[user_task["taskid"]]["succeeded"] = user_task["succeeded"] tasks_data[user_task["taskid"]]["grade"] = user_task["grade"] weighted_score = user_task["grade"]*tasks[user_task["taskid"]].get_grading_weight() tasks_score[0] += weighted_score if tasks_data[user_task["taskid"]]["visible"] else 0 course_grade = round(tasks_score[0]/tasks_score[1]) if tasks_score[1] > 0 else 0 tag_list = course.get_all_tags_names_as_list(is_admin, self.user_manager.session_language()) user_info = self.database.users.find_one({"username": username}) return self.template_helper.get_renderer().course(user_info, course, last_submissions, tasks, tasks_data, course_grade, tag_list)
[ "def", "show_page", "(", "self", ",", "course", ")", ":", "username", "=", "self", ".", "user_manager", ".", "session_username", "(", ")", "if", "not", "self", ".", "user_manager", ".", "course_is_open_to_user", "(", "course", ",", "lti", "=", "False", ")", ":", "return", "self", ".", "template_helper", ".", "get_renderer", "(", ")", ".", "course_unavailable", "(", ")", "else", ":", "tasks", "=", "course", ".", "get_tasks", "(", ")", "last_submissions", "=", "self", ".", "submission_manager", ".", "get_user_last_submissions", "(", "5", ",", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"taskid\"", ":", "{", "\"$in\"", ":", "list", "(", "tasks", ".", "keys", "(", ")", ")", "}", "}", ")", "for", "submission", "in", "last_submissions", ":", "submission", "[", "\"taskname\"", "]", "=", "tasks", "[", "submission", "[", "'taskid'", "]", "]", ".", "get_name", "(", "self", ".", "user_manager", ".", "session_language", "(", ")", ")", "tasks_data", "=", "{", "}", "user_tasks", "=", "self", ".", "database", ".", "user_tasks", ".", "find", "(", "{", "\"username\"", ":", "username", ",", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"taskid\"", ":", "{", "\"$in\"", ":", "list", "(", "tasks", ".", "keys", "(", ")", ")", "}", "}", ")", "is_admin", "=", "self", ".", "user_manager", ".", "has_staff_rights_on_course", "(", "course", ",", "username", ")", "tasks_score", "=", "[", "0.0", ",", "0.0", "]", "for", "taskid", ",", "task", "in", "tasks", ".", "items", "(", ")", ":", "tasks_data", "[", "taskid", "]", "=", "{", "\"visible\"", ":", "task", ".", "get_accessible_time", "(", ")", ".", "after_start", "(", ")", "or", "is_admin", ",", "\"succeeded\"", ":", "False", ",", "\"grade\"", ":", "0.0", "}", "tasks_score", "[", "1", "]", "+=", "task", ".", "get_grading_weight", "(", ")", "if", "tasks_data", "[", "taskid", "]", "[", "\"visible\"", "]", "else", "0", "for", "user_task", "in", "user_tasks", ":", "tasks_data", "[", "user_task", "[", "\"taskid\"", "]", "]", "[", "\"succeeded\"", "]", "=", "user_task", "[", "\"succeeded\"", "]", "tasks_data", "[", "user_task", "[", "\"taskid\"", "]", "]", "[", "\"grade\"", "]", "=", "user_task", "[", "\"grade\"", "]", "weighted_score", "=", "user_task", "[", "\"grade\"", "]", "*", "tasks", "[", "user_task", "[", "\"taskid\"", "]", "]", ".", "get_grading_weight", "(", ")", "tasks_score", "[", "0", "]", "+=", "weighted_score", "if", "tasks_data", "[", "user_task", "[", "\"taskid\"", "]", "]", "[", "\"visible\"", "]", "else", "0", "course_grade", "=", "round", "(", "tasks_score", "[", "0", "]", "/", "tasks_score", "[", "1", "]", ")", "if", "tasks_score", "[", "1", "]", ">", "0", "else", "0", "tag_list", "=", "course", ".", "get_all_tags_names_as_list", "(", "is_admin", ",", "self", ".", "user_manager", ".", "session_language", "(", ")", ")", "user_info", "=", "self", ".", "database", ".", "users", ".", "find_one", "(", "{", "\"username\"", ":", "username", "}", ")", "return", "self", ".", "template_helper", ".", "get_renderer", "(", ")", ".", "course", "(", "user_info", ",", "course", ",", "last_submissions", ",", "tasks", ",", "tasks_data", ",", "course_grade", ",", "tag_list", ")" ]
Prepares and shows the course page
[ "Prepares", "and", "shows", "the", "course", "page" ]
python
train
59.428571
DistrictDataLabs/yellowbrick
yellowbrick/classifier/base.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/classifier/base.py#L84-L111
def fit(self, X, y=None, **kwargs): """ Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values kwargs: keyword arguments passed to Scikit-Learn API. Returns ------- self : instance Returns the instance of the classification score visualizer """ # Fit the inner estimator self.estimator.fit(X, y) # Extract the classes from the estimator if self.classes_ is None: self.classes_ = self.estimator.classes_ # Always return self from fit return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Fit the inner estimator", "self", ".", "estimator", ".", "fit", "(", "X", ",", "y", ")", "# Extract the classes from the estimator", "if", "self", ".", "classes_", "is", "None", ":", "self", ".", "classes_", "=", "self", ".", "estimator", ".", "classes_", "# Always return self from fit", "return", "self" ]
Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values kwargs: keyword arguments passed to Scikit-Learn API. Returns ------- self : instance Returns the instance of the classification score visualizer
[ "Parameters", "----------" ]
python
train
25.928571
google/transitfeed
gtfsscheduleviewer/marey_graph.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/gtfsscheduleviewer/marey_graph.py#L380-L398
def _DrawHours(self): """Generates svg to show a vertical hour and sub-hour grid Returns: # A string containing a polyline tag for each grid line " <polyline class="FullHour" points="20,0 ..." """ tmpstrs = [] for i in range(0, self._gwidth, self._min_grid): if i % self._hour_grid == 0: tmpstrs.append('<polyline class="FullHour" points="%d,%d, %d,%d" />' \ % (i + .5 + 20, 20, i + .5 + 20, self._gheight)) tmpstrs.append('<text class="Label" x="%d" y="%d">%d</text>' % (i + 20, 20, (i / self._hour_grid + self._offset) % 24)) else: tmpstrs.append('<polyline class="SubHour" points="%d,%d,%d,%d" />' \ % (i + .5 + 20, 20, i + .5 + 20, self._gheight)) return "".join(tmpstrs)
[ "def", "_DrawHours", "(", "self", ")", ":", "tmpstrs", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "_gwidth", ",", "self", ".", "_min_grid", ")", ":", "if", "i", "%", "self", ".", "_hour_grid", "==", "0", ":", "tmpstrs", ".", "append", "(", "'<polyline class=\"FullHour\" points=\"%d,%d, %d,%d\" />'", "%", "(", "i", "+", ".5", "+", "20", ",", "20", ",", "i", "+", ".5", "+", "20", ",", "self", ".", "_gheight", ")", ")", "tmpstrs", ".", "append", "(", "'<text class=\"Label\" x=\"%d\" y=\"%d\">%d</text>'", "%", "(", "i", "+", "20", ",", "20", ",", "(", "i", "/", "self", ".", "_hour_grid", "+", "self", ".", "_offset", ")", "%", "24", ")", ")", "else", ":", "tmpstrs", ".", "append", "(", "'<polyline class=\"SubHour\" points=\"%d,%d,%d,%d\" />'", "%", "(", "i", "+", ".5", "+", "20", ",", "20", ",", "i", "+", ".5", "+", "20", ",", "self", ".", "_gheight", ")", ")", "return", "\"\"", ".", "join", "(", "tmpstrs", ")" ]
Generates svg to show a vertical hour and sub-hour grid Returns: # A string containing a polyline tag for each grid line " <polyline class="FullHour" points="20,0 ..."
[ "Generates", "svg", "to", "show", "a", "vertical", "hour", "and", "sub", "-", "hour", "grid" ]
python
train
43.368421
gabstopper/smc-python
smc/base/util.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/base/util.py#L106-L131
def merge_dicts(dict1, dict2, append_lists=False): """ Merge the second dict into the first Not intended to merge list of dicts. :param append_lists: If true, instead of clobbering a list with the new value, append all of the new values onto the original list. """ for key in dict2: if isinstance(dict2[key], dict): if key in dict1 and key in dict2: merge_dicts(dict1[key], dict2[key], append_lists) else: dict1[key] = dict2[key] # If the value is a list and the ``append_lists`` flag is set, # append the new values onto the original list elif isinstance(dict2[key], list) and append_lists: # The value in dict1 must be a list in order to append new # values onto it. Don't add duplicates. if key in dict1 and isinstance(dict1[key], list): dict1[key].extend( [k for k in dict2[key] if k not in dict1[key]]) else: dict1[key] = dict2[key] else: dict1[key] = dict2[key]
[ "def", "merge_dicts", "(", "dict1", ",", "dict2", ",", "append_lists", "=", "False", ")", ":", "for", "key", "in", "dict2", ":", "if", "isinstance", "(", "dict2", "[", "key", "]", ",", "dict", ")", ":", "if", "key", "in", "dict1", "and", "key", "in", "dict2", ":", "merge_dicts", "(", "dict1", "[", "key", "]", ",", "dict2", "[", "key", "]", ",", "append_lists", ")", "else", ":", "dict1", "[", "key", "]", "=", "dict2", "[", "key", "]", "# If the value is a list and the ``append_lists`` flag is set,", "# append the new values onto the original list", "elif", "isinstance", "(", "dict2", "[", "key", "]", ",", "list", ")", "and", "append_lists", ":", "# The value in dict1 must be a list in order to append new", "# values onto it. Don't add duplicates.", "if", "key", "in", "dict1", "and", "isinstance", "(", "dict1", "[", "key", "]", ",", "list", ")", ":", "dict1", "[", "key", "]", ".", "extend", "(", "[", "k", "for", "k", "in", "dict2", "[", "key", "]", "if", "k", "not", "in", "dict1", "[", "key", "]", "]", ")", "else", ":", "dict1", "[", "key", "]", "=", "dict2", "[", "key", "]", "else", ":", "dict1", "[", "key", "]", "=", "dict2", "[", "key", "]" ]
Merge the second dict into the first Not intended to merge list of dicts. :param append_lists: If true, instead of clobbering a list with the new value, append all of the new values onto the original list.
[ "Merge", "the", "second", "dict", "into", "the", "first", "Not", "intended", "to", "merge", "list", "of", "dicts", "." ]
python
train
41.653846
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/flask/app.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/app.py#L1441-L1461
def dispatch_request(self): """Does the request dispatching. Matches the URL and returns the return value of the view or error handler. This does not have to be a response object. In order to convert the return value to a proper response object, call :func:`make_response`. .. versionchanged:: 0.7 This no longer does the exception handling, this code was moved to the new :meth:`full_dispatch_request`. """ req = _request_ctx_stack.top.request if req.routing_exception is not None: self.raise_routing_exception(req) rule = req.url_rule # if we provide automatic options for this URL and the # request came with the OPTIONS method, reply automatically if getattr(rule, 'provide_automatic_options', False) \ and req.method == 'OPTIONS': return self.make_default_options_response() # otherwise dispatch to the handler for that endpoint return self.view_functions[rule.endpoint](**req.view_args)
[ "def", "dispatch_request", "(", "self", ")", ":", "req", "=", "_request_ctx_stack", ".", "top", ".", "request", "if", "req", ".", "routing_exception", "is", "not", "None", ":", "self", ".", "raise_routing_exception", "(", "req", ")", "rule", "=", "req", ".", "url_rule", "# if we provide automatic options for this URL and the", "# request came with the OPTIONS method, reply automatically", "if", "getattr", "(", "rule", ",", "'provide_automatic_options'", ",", "False", ")", "and", "req", ".", "method", "==", "'OPTIONS'", ":", "return", "self", ".", "make_default_options_response", "(", ")", "# otherwise dispatch to the handler for that endpoint", "return", "self", ".", "view_functions", "[", "rule", ".", "endpoint", "]", "(", "*", "*", "req", ".", "view_args", ")" ]
Does the request dispatching. Matches the URL and returns the return value of the view or error handler. This does not have to be a response object. In order to convert the return value to a proper response object, call :func:`make_response`. .. versionchanged:: 0.7 This no longer does the exception handling, this code was moved to the new :meth:`full_dispatch_request`.
[ "Does", "the", "request", "dispatching", ".", "Matches", "the", "URL", "and", "returns", "the", "return", "value", "of", "the", "view", "or", "error", "handler", ".", "This", "does", "not", "have", "to", "be", "a", "response", "object", ".", "In", "order", "to", "convert", "the", "return", "value", "to", "a", "proper", "response", "object", "call", ":", "func", ":", "make_response", "." ]
python
test
49.761905
uber/multidimensional_urlencode
multidimensional_urlencode/urlencoder.py
https://github.com/uber/multidimensional_urlencode/blob/f626528bc3535503fa1557a53bbfacaa29920251/multidimensional_urlencode/urlencoder.py#L41-L54
def parametrize(params): """Return list of params as params. >>> parametrize(['a']) 'a' >>> parametrize(['a', 'b']) 'a[b]' >>> parametrize(['a', 'b', 'c']) 'a[b][c]' """ returned = str(params[0]) returned += "".join("[" + str(p) + "]" for p in params[1:]) return returned
[ "def", "parametrize", "(", "params", ")", ":", "returned", "=", "str", "(", "params", "[", "0", "]", ")", "returned", "+=", "\"\"", ".", "join", "(", "\"[\"", "+", "str", "(", "p", ")", "+", "\"]\"", "for", "p", "in", "params", "[", "1", ":", "]", ")", "return", "returned" ]
Return list of params as params. >>> parametrize(['a']) 'a' >>> parametrize(['a', 'b']) 'a[b]' >>> parametrize(['a', 'b', 'c']) 'a[b][c]'
[ "Return", "list", "of", "params", "as", "params", "." ]
python
train
21.714286
saltstack/salt
salt/spm/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L569-L618
def _resolve_deps(self, formula_def): ''' Return a list of packages which need to be installed, to resolve all dependencies ''' pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name']) if not isinstance(pkg_info, dict): pkg_info = {} can_has = {} cant_has = [] if 'dependencies' in formula_def and formula_def['dependencies'] is None: formula_def['dependencies'] = '' for dep in formula_def.get('dependencies', '').split(','): dep = dep.strip() if not dep: continue if self.pkgdb['{0}.info'.format(self.db_prov)](dep): continue if dep in self.avail_pkgs: can_has[dep] = self.avail_pkgs[dep] else: cant_has.append(dep) optional = formula_def.get('optional', '').split(',') recommended = formula_def.get('recommended', '').split(',') inspected = [] to_inspect = can_has.copy() while to_inspect: dep = next(six.iterkeys(to_inspect)) del to_inspect[dep] # Don't try to resolve the same package more than once if dep in inspected: continue inspected.append(dep) repo_contents = self.repo_metadata.get(can_has[dep], {}) repo_packages = repo_contents.get('packages', {}) dep_formula = repo_packages.get(dep, {}).get('info', {}) also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula) can_has.update(also_can) cant_has = sorted(set(cant_has + also_cant)) optional = sorted(set(optional + opt_dep)) recommended = sorted(set(recommended + rec_dep)) return can_has, cant_has, optional, recommended
[ "def", "_resolve_deps", "(", "self", ",", "formula_def", ")", ":", "pkg_info", "=", "self", ".", "pkgdb", "[", "'{0}.info'", ".", "format", "(", "self", ".", "db_prov", ")", "]", "(", "formula_def", "[", "'name'", "]", ")", "if", "not", "isinstance", "(", "pkg_info", ",", "dict", ")", ":", "pkg_info", "=", "{", "}", "can_has", "=", "{", "}", "cant_has", "=", "[", "]", "if", "'dependencies'", "in", "formula_def", "and", "formula_def", "[", "'dependencies'", "]", "is", "None", ":", "formula_def", "[", "'dependencies'", "]", "=", "''", "for", "dep", "in", "formula_def", ".", "get", "(", "'dependencies'", ",", "''", ")", ".", "split", "(", "','", ")", ":", "dep", "=", "dep", ".", "strip", "(", ")", "if", "not", "dep", ":", "continue", "if", "self", ".", "pkgdb", "[", "'{0}.info'", ".", "format", "(", "self", ".", "db_prov", ")", "]", "(", "dep", ")", ":", "continue", "if", "dep", "in", "self", ".", "avail_pkgs", ":", "can_has", "[", "dep", "]", "=", "self", ".", "avail_pkgs", "[", "dep", "]", "else", ":", "cant_has", ".", "append", "(", "dep", ")", "optional", "=", "formula_def", ".", "get", "(", "'optional'", ",", "''", ")", ".", "split", "(", "','", ")", "recommended", "=", "formula_def", ".", "get", "(", "'recommended'", ",", "''", ")", ".", "split", "(", "','", ")", "inspected", "=", "[", "]", "to_inspect", "=", "can_has", ".", "copy", "(", ")", "while", "to_inspect", ":", "dep", "=", "next", "(", "six", ".", "iterkeys", "(", "to_inspect", ")", ")", "del", "to_inspect", "[", "dep", "]", "# Don't try to resolve the same package more than once", "if", "dep", "in", "inspected", ":", "continue", "inspected", ".", "append", "(", "dep", ")", "repo_contents", "=", "self", ".", "repo_metadata", ".", "get", "(", "can_has", "[", "dep", "]", ",", "{", "}", ")", "repo_packages", "=", "repo_contents", ".", "get", "(", "'packages'", ",", "{", "}", ")", "dep_formula", "=", "repo_packages", ".", "get", "(", "dep", ",", "{", "}", ")", ".", "get", "(", "'info'", ",", "{", "}", ")", "also_can", ",", "also_cant", ",", "opt_dep", ",", "rec_dep", "=", "self", ".", "_resolve_deps", "(", "dep_formula", ")", "can_has", ".", "update", "(", "also_can", ")", "cant_has", "=", "sorted", "(", "set", "(", "cant_has", "+", "also_cant", ")", ")", "optional", "=", "sorted", "(", "set", "(", "optional", "+", "opt_dep", ")", ")", "recommended", "=", "sorted", "(", "set", "(", "recommended", "+", "rec_dep", ")", ")", "return", "can_has", ",", "cant_has", ",", "optional", ",", "recommended" ]
Return a list of packages which need to be installed, to resolve all dependencies
[ "Return", "a", "list", "of", "packages", "which", "need", "to", "be", "installed", "to", "resolve", "all", "dependencies" ]
python
train
36.56
mbedmicro/pyOCD
pyocd/probe/pydapaccess/dap_access_cmsis_dap.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L939-L992
def _write(self, dap_index, transfer_count, transfer_request, transfer_data): """ Write one or more commands """ assert dap_index == 0 # dap index currently unsupported assert isinstance(transfer_count, six.integer_types) assert isinstance(transfer_request, six.integer_types) assert transfer_data is None or len(transfer_data) > 0 # Create transfer and add to transfer list transfer = None if transfer_request & READ: transfer = _Transfer(self, dap_index, transfer_count, transfer_request, transfer_data) self._transfer_list.append(transfer) # Build physical packet by adding it to command cmd = self._crnt_cmd is_read = transfer_request & READ size_to_transfer = transfer_count trans_data_pos = 0 while size_to_transfer > 0: # Get the size remaining in the current packet for the given request. size = cmd.get_request_space(size_to_transfer, transfer_request, dap_index) # This request doesn't fit in the packet so send it. if size == 0: if LOG_PACKET_BUILDS: self._logger.debug("_write: send packet [size==0]") self._send_packet() cmd = self._crnt_cmd continue # Add request to packet. if transfer_data is None: data = None else: data = transfer_data[trans_data_pos:trans_data_pos + size] cmd.add(size, transfer_request, data, dap_index) size_to_transfer -= size trans_data_pos += size # Packet has been filled so send it if cmd.get_full(): if LOG_PACKET_BUILDS: self._logger.debug("_write: send packet [full]") self._send_packet() cmd = self._crnt_cmd if not self._deferred_transfer: self.flush() return transfer
[ "def", "_write", "(", "self", ",", "dap_index", ",", "transfer_count", ",", "transfer_request", ",", "transfer_data", ")", ":", "assert", "dap_index", "==", "0", "# dap index currently unsupported", "assert", "isinstance", "(", "transfer_count", ",", "six", ".", "integer_types", ")", "assert", "isinstance", "(", "transfer_request", ",", "six", ".", "integer_types", ")", "assert", "transfer_data", "is", "None", "or", "len", "(", "transfer_data", ")", ">", "0", "# Create transfer and add to transfer list", "transfer", "=", "None", "if", "transfer_request", "&", "READ", ":", "transfer", "=", "_Transfer", "(", "self", ",", "dap_index", ",", "transfer_count", ",", "transfer_request", ",", "transfer_data", ")", "self", ".", "_transfer_list", ".", "append", "(", "transfer", ")", "# Build physical packet by adding it to command", "cmd", "=", "self", ".", "_crnt_cmd", "is_read", "=", "transfer_request", "&", "READ", "size_to_transfer", "=", "transfer_count", "trans_data_pos", "=", "0", "while", "size_to_transfer", ">", "0", ":", "# Get the size remaining in the current packet for the given request.", "size", "=", "cmd", ".", "get_request_space", "(", "size_to_transfer", ",", "transfer_request", ",", "dap_index", ")", "# This request doesn't fit in the packet so send it.", "if", "size", "==", "0", ":", "if", "LOG_PACKET_BUILDS", ":", "self", ".", "_logger", ".", "debug", "(", "\"_write: send packet [size==0]\"", ")", "self", ".", "_send_packet", "(", ")", "cmd", "=", "self", ".", "_crnt_cmd", "continue", "# Add request to packet.", "if", "transfer_data", "is", "None", ":", "data", "=", "None", "else", ":", "data", "=", "transfer_data", "[", "trans_data_pos", ":", "trans_data_pos", "+", "size", "]", "cmd", ".", "add", "(", "size", ",", "transfer_request", ",", "data", ",", "dap_index", ")", "size_to_transfer", "-=", "size", "trans_data_pos", "+=", "size", "# Packet has been filled so send it", "if", "cmd", ".", "get_full", "(", ")", ":", "if", "LOG_PACKET_BUILDS", ":", "self", ".", "_logger", ".", "debug", "(", "\"_write: send packet [full]\"", ")", "self", ".", "_send_packet", "(", ")", "cmd", "=", "self", ".", "_crnt_cmd", "if", "not", "self", ".", "_deferred_transfer", ":", "self", ".", "flush", "(", ")", "return", "transfer" ]
Write one or more commands
[ "Write", "one", "or", "more", "commands" ]
python
train
37.537037
zerotk/easyfs
zerotk/easyfs/_easyfs.py
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L1654-L1682
def Split(cls, extended_path_mask): ''' Splits the given path into their components: recursive, dirname, in_filters and out_filters :param str: extended_path_mask: The "extended path mask" to split :rtype: tuple(bool,bool,str,list(str),list(str)) :returns: Returns the extended path 5 components: - The tree-recurse flag - The flat-recurse flag - The actual path - A list of masks to include - A list of masks to exclude ''' import os.path r_tree_recurse = extended_path_mask[0] in '+-' r_flat_recurse = extended_path_mask[0] in '-' r_dirname, r_filters = os.path.split(extended_path_mask) if r_tree_recurse: r_dirname = r_dirname[1:] filters = r_filters.split(';') r_in_filters = [i for i in filters if not i.startswith('!')] r_out_filters = [i[1:] for i in filters if i.startswith('!')] return r_tree_recurse, r_flat_recurse, r_dirname, r_in_filters, r_out_filters
[ "def", "Split", "(", "cls", ",", "extended_path_mask", ")", ":", "import", "os", ".", "path", "r_tree_recurse", "=", "extended_path_mask", "[", "0", "]", "in", "'+-'", "r_flat_recurse", "=", "extended_path_mask", "[", "0", "]", "in", "'-'", "r_dirname", ",", "r_filters", "=", "os", ".", "path", ".", "split", "(", "extended_path_mask", ")", "if", "r_tree_recurse", ":", "r_dirname", "=", "r_dirname", "[", "1", ":", "]", "filters", "=", "r_filters", ".", "split", "(", "';'", ")", "r_in_filters", "=", "[", "i", "for", "i", "in", "filters", "if", "not", "i", ".", "startswith", "(", "'!'", ")", "]", "r_out_filters", "=", "[", "i", "[", "1", ":", "]", "for", "i", "in", "filters", "if", "i", ".", "startswith", "(", "'!'", ")", "]", "return", "r_tree_recurse", ",", "r_flat_recurse", ",", "r_dirname", ",", "r_in_filters", ",", "r_out_filters" ]
Splits the given path into their components: recursive, dirname, in_filters and out_filters :param str: extended_path_mask: The "extended path mask" to split :rtype: tuple(bool,bool,str,list(str),list(str)) :returns: Returns the extended path 5 components: - The tree-recurse flag - The flat-recurse flag - The actual path - A list of masks to include - A list of masks to exclude
[ "Splits", "the", "given", "path", "into", "their", "components", ":", "recursive", "dirname", "in_filters", "and", "out_filters" ]
python
valid
36.517241
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L199-L204
def quaternion(self): """:obj:`numpy.ndarray` of float: A quaternion vector in wxyz layout. """ q_xyzw = transformations.quaternion_from_matrix(self.matrix) q_wxyz = np.roll(q_xyzw, 1) return q_wxyz
[ "def", "quaternion", "(", "self", ")", ":", "q_xyzw", "=", "transformations", ".", "quaternion_from_matrix", "(", "self", ".", "matrix", ")", "q_wxyz", "=", "np", ".", "roll", "(", "q_xyzw", ",", "1", ")", "return", "q_wxyz" ]
:obj:`numpy.ndarray` of float: A quaternion vector in wxyz layout.
[ ":", "obj", ":", "numpy", ".", "ndarray", "of", "float", ":", "A", "quaternion", "vector", "in", "wxyz", "layout", "." ]
python
train
38.833333
facelessuser/pyspelling
pyspelling/filters/odf.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/odf.py#L139-L147
def get_sub_node(self, node): """Extract node from document if desired.""" subnode = node.find('office:document') if subnode: mimetype = subnode.attrs['office:mimetype'] self.type = MIMEMAP[mimetype] node = node.find('office:body') return node
[ "def", "get_sub_node", "(", "self", ",", "node", ")", ":", "subnode", "=", "node", ".", "find", "(", "'office:document'", ")", "if", "subnode", ":", "mimetype", "=", "subnode", ".", "attrs", "[", "'office:mimetype'", "]", "self", ".", "type", "=", "MIMEMAP", "[", "mimetype", "]", "node", "=", "node", ".", "find", "(", "'office:body'", ")", "return", "node" ]
Extract node from document if desired.
[ "Extract", "node", "from", "document", "if", "desired", "." ]
python
train
33.777778