id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
sequencelengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
540
def exec_query(cmd, fields): if (conf.prog.powershell is None): return _exec_query_vbs(cmd, fields) return _exec_query_ps(cmd, fields)
[ "def", "exec_query", "(", "cmd", ",", "fields", ")", ":", "if", "(", "conf", ".", "prog", ".", "powershell", "is", "None", ")", ":", "return", "_exec_query_vbs", "(", "cmd", ",", "fields", ")", "return", "_exec_query_ps", "(", "cmd", ",", "fields", ")" ]
execute a system query using powershell if it is available .
train
false
543
def _execute5(*args, **kargs): cmd = (args[1:(-3)] if (args[0] == 'raidcom') else args) result = EXECUTE_TABLE5.get(cmd, CMD_SUCCEED) return result
[ "def", "_execute5", "(", "*", "args", ",", "**", "kargs", ")", ":", "cmd", "=", "(", "args", "[", "1", ":", "(", "-", "3", ")", "]", "if", "(", "args", "[", "0", "]", "==", "'raidcom'", ")", "else", "args", ")", "result", "=", "EXECUTE_TABLE5", ".", "get", "(", "cmd", ",", "CMD_SUCCEED", ")", "return", "result" ]
return predefined results based on execute_table5 .
train
false
544
@pytest.mark.django_db def test_file_extension_instance(): ext = FileExtension.objects.create(name='foo') assert (ext.name == 'foo') assert (str(ext) == 'foo')
[ "@", "pytest", ".", "mark", ".", "django_db", "def", "test_file_extension_instance", "(", ")", ":", "ext", "=", "FileExtension", ".", "objects", ".", "create", "(", "name", "=", "'foo'", ")", "assert", "(", "ext", ".", "name", "==", "'foo'", ")", "assert", "(", "str", "(", "ext", ")", "==", "'foo'", ")" ]
tests the creation of a file extension .
train
false
545
def _get_cart_quotation(party=None): if (not party): party = get_party() quotation = frappe.get_all(u'Quotation', fields=[u'name'], filters={party.doctype.lower(): party.name, u'order_type': u'Shopping Cart', u'docstatus': 0}, order_by=u'modified desc', limit_page_length=1) if quotation: qdoc = frappe.get_doc(u'Quotation', quotation[0].name) else: qdoc = frappe.get_doc({u'doctype': u'Quotation', u'naming_series': (get_shopping_cart_settings().quotation_series or u'QTN-CART-'), u'quotation_to': party.doctype, u'company': frappe.db.get_value(u'Shopping Cart Settings', None, u'company'), u'order_type': u'Shopping Cart', u'status': u'Draft', u'docstatus': 0, u'__islocal': 1, party.doctype.lower(): party.name}) qdoc.contact_person = frappe.db.get_value(u'Contact', {u'email_id': frappe.session.user}) qdoc.contact_email = frappe.session.user qdoc.flags.ignore_permissions = True qdoc.run_method(u'set_missing_values') apply_cart_settings(party, qdoc) return qdoc
[ "def", "_get_cart_quotation", "(", "party", "=", "None", ")", ":", "if", "(", "not", "party", ")", ":", "party", "=", "get_party", "(", ")", "quotation", "=", "frappe", ".", "get_all", "(", "u'Quotation'", ",", "fields", "=", "[", "u'name'", "]", ",", "filters", "=", "{", "party", ".", "doctype", ".", "lower", "(", ")", ":", "party", ".", "name", ",", "u'order_type'", ":", "u'Shopping Cart'", ",", "u'docstatus'", ":", "0", "}", ",", "order_by", "=", "u'modified desc'", ",", "limit_page_length", "=", "1", ")", "if", "quotation", ":", "qdoc", "=", "frappe", ".", "get_doc", "(", "u'Quotation'", ",", "quotation", "[", "0", "]", ".", "name", ")", "else", ":", "qdoc", "=", "frappe", ".", "get_doc", "(", "{", "u'doctype'", ":", "u'Quotation'", ",", "u'naming_series'", ":", "(", "get_shopping_cart_settings", "(", ")", ".", "quotation_series", "or", "u'QTN-CART-'", ")", ",", "u'quotation_to'", ":", "party", ".", "doctype", ",", "u'company'", ":", "frappe", ".", "db", ".", "get_value", "(", "u'Shopping Cart Settings'", ",", "None", ",", "u'company'", ")", ",", "u'order_type'", ":", "u'Shopping Cart'", ",", "u'status'", ":", "u'Draft'", ",", "u'docstatus'", ":", "0", ",", "u'__islocal'", ":", "1", ",", "party", ".", "doctype", ".", "lower", "(", ")", ":", "party", ".", "name", "}", ")", "qdoc", ".", "contact_person", "=", "frappe", ".", "db", ".", "get_value", "(", "u'Contact'", ",", "{", "u'email_id'", ":", "frappe", ".", "session", ".", "user", "}", ")", "qdoc", ".", "contact_email", "=", "frappe", ".", "session", ".", "user", "qdoc", ".", "flags", ".", "ignore_permissions", "=", "True", "qdoc", ".", "run_method", "(", "u'set_missing_values'", ")", "apply_cart_settings", "(", "party", ",", "qdoc", ")", "return", "qdoc" ]
return the open quotation of type "shopping cart" or make a new one .
train
false
546
@contextmanager def safe_concurrent_creation(target_path): safe_mkdir_for(target_path) tmp_path = u'{}.tmp.{}'.format(target_path, uuid.uuid4().hex) try: (yield tmp_path) finally: if os.path.exists(tmp_path): safe_concurrent_rename(tmp_path, target_path)
[ "@", "contextmanager", "def", "safe_concurrent_creation", "(", "target_path", ")", ":", "safe_mkdir_for", "(", "target_path", ")", "tmp_path", "=", "u'{}.tmp.{}'", ".", "format", "(", "target_path", ",", "uuid", ".", "uuid4", "(", ")", ".", "hex", ")", "try", ":", "(", "yield", "tmp_path", ")", "finally", ":", "if", "os", ".", "path", ".", "exists", "(", "tmp_path", ")", ":", "safe_concurrent_rename", "(", "tmp_path", ",", "target_path", ")" ]
a contextmanager that yields a temporary path and renames it to a final target path when the contextmanager exits .
train
false
547
@app.route('/scans/<int:scan_id>/traffic/<int:traffic_id>', methods=['GET']) @requires_auth def get_traffic_details(scan_id, traffic_id): scan_info = get_scan_info_from_id(scan_id) if (scan_info is None): abort(404, 'Scan not found') history_db = HistoryItem() try: details = history_db.read(traffic_id) except DBException: msg = 'Failed to retrieve request with id %s from DB.' abort(404, msg) return data = {'request': b64encode(details.request.dump()), 'response': b64encode(details.response.dump())} return jsonify(data)
[ "@", "app", ".", "route", "(", "'/scans/<int:scan_id>/traffic/<int:traffic_id>'", ",", "methods", "=", "[", "'GET'", "]", ")", "@", "requires_auth", "def", "get_traffic_details", "(", "scan_id", ",", "traffic_id", ")", ":", "scan_info", "=", "get_scan_info_from_id", "(", "scan_id", ")", "if", "(", "scan_info", "is", "None", ")", ":", "abort", "(", "404", ",", "'Scan not found'", ")", "history_db", "=", "HistoryItem", "(", ")", "try", ":", "details", "=", "history_db", ".", "read", "(", "traffic_id", ")", "except", "DBException", ":", "msg", "=", "'Failed to retrieve request with id %s from DB.'", "abort", "(", "404", ",", "msg", ")", "return", "data", "=", "{", "'request'", ":", "b64encode", "(", "details", ".", "request", ".", "dump", "(", ")", ")", ",", "'response'", ":", "b64encode", "(", "details", ".", "response", ".", "dump", "(", ")", ")", "}", "return", "jsonify", "(", "data", ")" ]
the http request and response associated with a vulnerability .
train
false
548
@with_open_mode('r') @with_sizes('medium') def seek_forward_bytewise(f): f.seek(0, 2) size = f.tell() f.seek(0, 0) for i in xrange(0, (size - 1)): f.seek(i, 0)
[ "@", "with_open_mode", "(", "'r'", ")", "@", "with_sizes", "(", "'medium'", ")", "def", "seek_forward_bytewise", "(", "f", ")", ":", "f", ".", "seek", "(", "0", ",", "2", ")", "size", "=", "f", ".", "tell", "(", ")", "f", ".", "seek", "(", "0", ",", "0", ")", "for", "i", "in", "xrange", "(", "0", ",", "(", "size", "-", "1", ")", ")", ":", "f", ".", "seek", "(", "i", ",", "0", ")" ]
seek forward one unit at a time .
train
false
550
def safe_iterator(node, tag=None): if (node is None): return [] if hasattr(node, 'iter'): return node.iter(tag) else: return node.getiterator(tag)
[ "def", "safe_iterator", "(", "node", ",", "tag", "=", "None", ")", ":", "if", "(", "node", "is", "None", ")", ":", "return", "[", "]", "if", "hasattr", "(", "node", ",", "'iter'", ")", ":", "return", "node", ".", "iter", "(", "tag", ")", "else", ":", "return", "node", ".", "getiterator", "(", "tag", ")" ]
return an iterator that is compatible with python 2 .
train
false
555
def Poisson(name, lamda): return rv(name, PoissonDistribution, lamda)
[ "def", "Poisson", "(", "name", ",", "lamda", ")", ":", "return", "rv", "(", "name", ",", "PoissonDistribution", ",", "lamda", ")" ]
create a discrete random variable with a poisson distribution .
train
false
556
def SetNodeAnnotation(node, annotation, value): setattr(node, (_NODE_ANNOTATION_PREFIX + annotation), value)
[ "def", "SetNodeAnnotation", "(", "node", ",", "annotation", ",", "value", ")", ":", "setattr", "(", "node", ",", "(", "_NODE_ANNOTATION_PREFIX", "+", "annotation", ")", ",", "value", ")" ]
set annotation value on a node .
train
false
557
def rastrigin_skew(individual): N = len(individual) return (((10 * N) + sum((((((10 * x) if (x > 0) else x) ** 2) - (10 * cos(((2 * pi) * ((10 * x) if (x > 0) else x))))) for x in individual))),)
[ "def", "rastrigin_skew", "(", "individual", ")", ":", "N", "=", "len", "(", "individual", ")", "return", "(", "(", "(", "10", "*", "N", ")", "+", "sum", "(", "(", "(", "(", "(", "(", "10", "*", "x", ")", "if", "(", "x", ">", "0", ")", "else", "x", ")", "**", "2", ")", "-", "(", "10", "*", "cos", "(", "(", "(", "2", "*", "pi", ")", "*", "(", "(", "10", "*", "x", ")", "if", "(", "x", ">", "0", ")", "else", "x", ")", ")", ")", ")", ")", "for", "x", "in", "individual", ")", ")", ")", ",", ")" ]
skewed rastrigin test objective function .
train
false
558
def user_add(cursor, user, password, role_attr_flags, encrypted, expires): query_password_data = dict(password=password, expires=expires) query = [('CREATE USER %(user)s' % {'user': pg_quote_identifier(user, 'role')})] if (password is not None): query.append(('WITH %(crypt)s' % {'crypt': encrypted})) query.append('PASSWORD %(password)s') if (expires is not None): query.append('VALID UNTIL %(expires)s') query.append(role_attr_flags) query = ' '.join(query) cursor.execute(query, query_password_data) return True
[ "def", "user_add", "(", "cursor", ",", "user", ",", "password", ",", "role_attr_flags", ",", "encrypted", ",", "expires", ")", ":", "query_password_data", "=", "dict", "(", "password", "=", "password", ",", "expires", "=", "expires", ")", "query", "=", "[", "(", "'CREATE USER %(user)s'", "%", "{", "'user'", ":", "pg_quote_identifier", "(", "user", ",", "'role'", ")", "}", ")", "]", "if", "(", "password", "is", "not", "None", ")", ":", "query", ".", "append", "(", "(", "'WITH %(crypt)s'", "%", "{", "'crypt'", ":", "encrypted", "}", ")", ")", "query", ".", "append", "(", "'PASSWORD %(password)s'", ")", "if", "(", "expires", "is", "not", "None", ")", ":", "query", ".", "append", "(", "'VALID UNTIL %(expires)s'", ")", "query", ".", "append", "(", "role_attr_flags", ")", "query", "=", "' '", ".", "join", "(", "query", ")", "cursor", ".", "execute", "(", "query", ",", "query_password_data", ")", "return", "True" ]
user add .
train
false
559
def find_referenced_templates(ast): for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)): if (isinstance(node.template, nodes.Const) and isinstance(node.template.value, basestring)): (yield node.template.value) else: (yield None)
[ "def", "find_referenced_templates", "(", "ast", ")", ":", "for", "node", "in", "ast", ".", "find_all", "(", "(", "nodes", ".", "Extends", ",", "nodes", ".", "FromImport", ",", "nodes", ".", "Import", ",", "nodes", ".", "Include", ")", ")", ":", "if", "(", "isinstance", "(", "node", ".", "template", ",", "nodes", ".", "Const", ")", "and", "isinstance", "(", "node", ".", "template", ".", "value", ",", "basestring", ")", ")", ":", "(", "yield", "node", ".", "template", ".", "value", ")", "else", ":", "(", "yield", "None", ")" ]
finds all the referenced templates from the ast .
train
false
560
@must_have_permission('write') @must_have_addon(SHORT_NAME, 'user') @must_have_addon(SHORT_NAME, 'node') @must_be_addon_authorizer(SHORT_NAME) def dataverse_set_config(node_addon, auth, **kwargs): user_settings = node_addon.user_settings user = auth.user if (user_settings and (user_settings.owner != user)): raise HTTPError(http.FORBIDDEN) try: assert_clean(request.json) except AssertionError: raise HTTPError(http.NOT_ACCEPTABLE) alias = request.json.get('dataverse', {}).get('alias') doi = request.json.get('dataset', {}).get('doi') if ((doi is None) or (alias is None)): return HTTPError(http.BAD_REQUEST) connection = client.connect_from_settings(node_addon) dataverse = client.get_dataverse(connection, alias) dataset = client.get_dataset(dataverse, doi) node_addon.set_folder(dataverse, dataset, auth) return ({'dataverse': dataverse.title, 'dataset': dataset.title}, http.OK)
[ "@", "must_have_permission", "(", "'write'", ")", "@", "must_have_addon", "(", "SHORT_NAME", ",", "'user'", ")", "@", "must_have_addon", "(", "SHORT_NAME", ",", "'node'", ")", "@", "must_be_addon_authorizer", "(", "SHORT_NAME", ")", "def", "dataverse_set_config", "(", "node_addon", ",", "auth", ",", "**", "kwargs", ")", ":", "user_settings", "=", "node_addon", ".", "user_settings", "user", "=", "auth", ".", "user", "if", "(", "user_settings", "and", "(", "user_settings", ".", "owner", "!=", "user", ")", ")", ":", "raise", "HTTPError", "(", "http", ".", "FORBIDDEN", ")", "try", ":", "assert_clean", "(", "request", ".", "json", ")", "except", "AssertionError", ":", "raise", "HTTPError", "(", "http", ".", "NOT_ACCEPTABLE", ")", "alias", "=", "request", ".", "json", ".", "get", "(", "'dataverse'", ",", "{", "}", ")", ".", "get", "(", "'alias'", ")", "doi", "=", "request", ".", "json", ".", "get", "(", "'dataset'", ",", "{", "}", ")", ".", "get", "(", "'doi'", ")", "if", "(", "(", "doi", "is", "None", ")", "or", "(", "alias", "is", "None", ")", ")", ":", "return", "HTTPError", "(", "http", ".", "BAD_REQUEST", ")", "connection", "=", "client", ".", "connect_from_settings", "(", "node_addon", ")", "dataverse", "=", "client", ".", "get_dataverse", "(", "connection", ",", "alias", ")", "dataset", "=", "client", ".", "get_dataset", "(", "dataverse", ",", "doi", ")", "node_addon", ".", "set_folder", "(", "dataverse", ",", "dataset", ",", "auth", ")", "return", "(", "{", "'dataverse'", ":", "dataverse", ".", "title", ",", "'dataset'", ":", "dataset", ".", "title", "}", ",", "http", ".", "OK", ")" ]
saves selected dataverse and dataset to node settings .
train
false
561
def getBoundingBoxByPaths(elementNode): transformedPaths = elementNode.xmlObject.getTransformedPaths() maximum = euclidean.getMaximumByVector3Paths(transformedPaths) minimum = euclidean.getMinimumByVector3Paths(transformedPaths) return [minimum, maximum]
[ "def", "getBoundingBoxByPaths", "(", "elementNode", ")", ":", "transformedPaths", "=", "elementNode", ".", "xmlObject", ".", "getTransformedPaths", "(", ")", "maximum", "=", "euclidean", ".", "getMaximumByVector3Paths", "(", "transformedPaths", ")", "minimum", "=", "euclidean", ".", "getMinimumByVector3Paths", "(", "transformedPaths", ")", "return", "[", "minimum", ",", "maximum", "]" ]
get bounding box of the transformed paths of the xmlobject of the elementnode .
train
false
563
def mapping_lines_from_collapsed_df(collapsed_df): lines = [] lines.append(' DCTB '.join((['#SampleID', 'original-sample-ids'] + list(collapsed_df.columns)[1:]))) for r in collapsed_df.iterrows(): if isinstance(r[0], tuple): new_idx = '.'.join(map(str, r[0])) else: new_idx = str(r[0]) new_values = [] for e in r[1]: if (len(set(e)) == 1): new_values.append(str(e[0])) else: new_values.append(('(%s)' % ', '.join(map(str, e)))) lines.append(' DCTB '.join(([new_idx] + new_values))) return lines
[ "def", "mapping_lines_from_collapsed_df", "(", "collapsed_df", ")", ":", "lines", "=", "[", "]", "lines", ".", "append", "(", "' DCTB '", ".", "join", "(", "(", "[", "'#SampleID'", ",", "'original-sample-ids'", "]", "+", "list", "(", "collapsed_df", ".", "columns", ")", "[", "1", ":", "]", ")", ")", ")", "for", "r", "in", "collapsed_df", ".", "iterrows", "(", ")", ":", "if", "isinstance", "(", "r", "[", "0", "]", ",", "tuple", ")", ":", "new_idx", "=", "'.'", ".", "join", "(", "map", "(", "str", ",", "r", "[", "0", "]", ")", ")", "else", ":", "new_idx", "=", "str", "(", "r", "[", "0", "]", ")", "new_values", "=", "[", "]", "for", "e", "in", "r", "[", "1", "]", ":", "if", "(", "len", "(", "set", "(", "e", ")", ")", "==", "1", ")", ":", "new_values", ".", "append", "(", "str", "(", "e", "[", "0", "]", ")", ")", "else", ":", "new_values", ".", "append", "(", "(", "'(%s)'", "%", "', '", ".", "join", "(", "map", "(", "str", ",", "e", ")", ")", ")", ")", "lines", ".", "append", "(", "' DCTB '", ".", "join", "(", "(", "[", "new_idx", "]", "+", "new_values", ")", ")", ")", "return", "lines" ]
formats a multi-index dataframe as lines of a qiime mapping file parameters collapsed_df : pd .
train
false
564
def stored_cookie_messages_count(storage, response): cookie = response.cookies.get(storage.cookie_name) if ((not cookie) or (cookie['max-age'] == 0)): return 0 data = storage._decode(cookie.value) if (not data): return 0 if (data[(-1)] == CookieStorage.not_finished): data.pop() return len(data)
[ "def", "stored_cookie_messages_count", "(", "storage", ",", "response", ")", ":", "cookie", "=", "response", ".", "cookies", ".", "get", "(", "storage", ".", "cookie_name", ")", "if", "(", "(", "not", "cookie", ")", "or", "(", "cookie", "[", "'max-age'", "]", "==", "0", ")", ")", ":", "return", "0", "data", "=", "storage", ".", "_decode", "(", "cookie", ".", "value", ")", "if", "(", "not", "data", ")", ":", "return", "0", "if", "(", "data", "[", "(", "-", "1", ")", "]", "==", "CookieStorage", ".", "not_finished", ")", ":", "data", ".", "pop", "(", ")", "return", "len", "(", "data", ")" ]
return an integer containing the number of messages stored .
train
false
565
def data_fun(times): global n n_samp = len(times) window = np.zeros(n_samp) (start, stop) = [int(((ii * float(n_samp)) / (2 * n_dipoles))) for ii in ((2 * n), ((2 * n) + 1))] window[start:stop] = 1.0 n += 1 data = (2.5e-08 * np.sin(((((2.0 * np.pi) * 10.0) * n) * times))) data *= window return data
[ "def", "data_fun", "(", "times", ")", ":", "global", "n", "n_samp", "=", "len", "(", "times", ")", "window", "=", "np", ".", "zeros", "(", "n_samp", ")", "(", "start", ",", "stop", ")", "=", "[", "int", "(", "(", "(", "ii", "*", "float", "(", "n_samp", ")", ")", "/", "(", "2", "*", "n_dipoles", ")", ")", ")", "for", "ii", "in", "(", "(", "2", "*", "n", ")", ",", "(", "(", "2", "*", "n", ")", "+", "1", ")", ")", "]", "window", "[", "start", ":", "stop", "]", "=", "1.0", "n", "+=", "1", "data", "=", "(", "2.5e-08", "*", "np", ".", "sin", "(", "(", "(", "(", "(", "2.0", "*", "np", ".", "pi", ")", "*", "10.0", ")", "*", "n", ")", "*", "times", ")", ")", ")", "data", "*=", "window", "return", "data" ]
function to generate random source time courses .
train
false
566
def umc_module_for_edit(module, object_dn, superordinate=None): mod = module_by_name(module) objects = get_umc_admin_objects() position = position_base_dn() position.setDn(ldap_dn_tree_parent(object_dn)) obj = objects.get(mod, config(), uldap(), position=position, superordinate=superordinate, dn=object_dn) obj.open() return obj
[ "def", "umc_module_for_edit", "(", "module", ",", "object_dn", ",", "superordinate", "=", "None", ")", ":", "mod", "=", "module_by_name", "(", "module", ")", "objects", "=", "get_umc_admin_objects", "(", ")", "position", "=", "position_base_dn", "(", ")", "position", ".", "setDn", "(", "ldap_dn_tree_parent", "(", "object_dn", ")", ")", "obj", "=", "objects", ".", "get", "(", "mod", ",", "config", "(", ")", ",", "uldap", "(", ")", ",", "position", "=", "position", ",", "superordinate", "=", "superordinate", ",", "dn", "=", "object_dn", ")", "obj", ".", "open", "(", ")", "return", "obj" ]
returns an umc module object prepared for editing an existing entry .
train
false
568
def __tweak_field(fields, field_name, attribute, value): for x in fields: if (x['name'] == field_name): x[attribute] = value
[ "def", "__tweak_field", "(", "fields", ",", "field_name", ",", "attribute", ",", "value", ")", ":", "for", "x", "in", "fields", ":", "if", "(", "x", "[", "'name'", "]", "==", "field_name", ")", ":", "x", "[", "attribute", "]", "=", "value" ]
helper function to insert extra data into the field list .
train
false
569
def extract_context(): for frame in inspect.stack(): arguments = frame[0].f_code.co_varnames if (not arguments): continue self_argument = arguments[0] if (not (self_argument in frame[0].f_locals)): continue instance = frame[0].f_locals[self_argument] if (hasattr(instance, 'context') and isinstance(instance.context, PluginContext)): return instance.context
[ "def", "extract_context", "(", ")", ":", "for", "frame", "in", "inspect", ".", "stack", "(", ")", ":", "arguments", "=", "frame", "[", "0", "]", ".", "f_code", ".", "co_varnames", "if", "(", "not", "arguments", ")", ":", "continue", "self_argument", "=", "arguments", "[", "0", "]", "if", "(", "not", "(", "self_argument", "in", "frame", "[", "0", "]", ".", "f_locals", ")", ")", ":", "continue", "instance", "=", "frame", "[", "0", "]", ".", "f_locals", "[", "self_argument", "]", "if", "(", "hasattr", "(", "instance", ",", "'context'", ")", "and", "isinstance", "(", "instance", ".", "context", ",", "PluginContext", ")", ")", ":", "return", "instance", ".", "context" ]
given an xml element corresponding to the output of test_capa_system .
train
false
572
def allocate_eip_address(domain=None, region=None, key=None, keyid=None, profile=None): if (domain and (domain != 'vpc')): raise SaltInvocationError("The only permitted value for the 'domain' param is 'vpc'.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: address = conn.allocate_address(domain=domain) except boto.exception.BotoServerError as e: log.error(e) return False interesting = ['allocation_id', 'association_id', 'domain', 'instance_id', 'network_interface_id', 'network_interface_owner_id', 'public_ip', 'private_ip_address'] return dict([(x, getattr(address, x)) for x in interesting])
[ "def", "allocate_eip_address", "(", "domain", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "if", "(", "domain", "and", "(", "domain", "!=", "'vpc'", ")", ")", ":", "raise", "SaltInvocationError", "(", "\"The only permitted value for the 'domain' param is 'vpc'.\"", ")", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "try", ":", "address", "=", "conn", ".", "allocate_address", "(", "domain", "=", "domain", ")", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "log", ".", "error", "(", "e", ")", "return", "False", "interesting", "=", "[", "'allocation_id'", ",", "'association_id'", ",", "'domain'", ",", "'instance_id'", ",", "'network_interface_id'", ",", "'network_interface_owner_id'", ",", "'public_ip'", ",", "'private_ip_address'", "]", "return", "dict", "(", "[", "(", "x", ",", "getattr", "(", "address", ",", "x", ")", ")", "for", "x", "in", "interesting", "]", ")" ]
release an eip from your eip pool args: client : boto3 client kwargs: check_mode : if set to true .
train
true
574
def test_calibration_curve(): y_true = np.array([0, 0, 0, 1, 1, 1]) y_pred = np.array([0.0, 0.1, 0.2, 0.8, 0.9, 1.0]) (prob_true, prob_pred) = calibration_curve(y_true, y_pred, n_bins=2) (prob_true_unnormalized, prob_pred_unnormalized) = calibration_curve(y_true, (y_pred * 2), n_bins=2, normalize=True) assert_equal(len(prob_true), len(prob_pred)) assert_equal(len(prob_true), 2) assert_almost_equal(prob_true, [0, 1]) assert_almost_equal(prob_pred, [0.1, 0.9]) assert_almost_equal(prob_true, prob_true_unnormalized) assert_almost_equal(prob_pred, prob_pred_unnormalized) assert_raises(ValueError, calibration_curve, [1.1], [(-0.1)], normalize=False)
[ "def", "test_calibration_curve", "(", ")", ":", "y_true", "=", "np", ".", "array", "(", "[", "0", ",", "0", ",", "0", ",", "1", ",", "1", ",", "1", "]", ")", "y_pred", "=", "np", ".", "array", "(", "[", "0.0", ",", "0.1", ",", "0.2", ",", "0.8", ",", "0.9", ",", "1.0", "]", ")", "(", "prob_true", ",", "prob_pred", ")", "=", "calibration_curve", "(", "y_true", ",", "y_pred", ",", "n_bins", "=", "2", ")", "(", "prob_true_unnormalized", ",", "prob_pred_unnormalized", ")", "=", "calibration_curve", "(", "y_true", ",", "(", "y_pred", "*", "2", ")", ",", "n_bins", "=", "2", ",", "normalize", "=", "True", ")", "assert_equal", "(", "len", "(", "prob_true", ")", ",", "len", "(", "prob_pred", ")", ")", "assert_equal", "(", "len", "(", "prob_true", ")", ",", "2", ")", "assert_almost_equal", "(", "prob_true", ",", "[", "0", ",", "1", "]", ")", "assert_almost_equal", "(", "prob_pred", ",", "[", "0.1", ",", "0.9", "]", ")", "assert_almost_equal", "(", "prob_true", ",", "prob_true_unnormalized", ")", "assert_almost_equal", "(", "prob_pred", ",", "prob_pred_unnormalized", ")", "assert_raises", "(", "ValueError", ",", "calibration_curve", ",", "[", "1.1", "]", ",", "[", "(", "-", "0.1", ")", "]", ",", "normalize", "=", "False", ")" ]
check calibration_curve function .
train
false
575
def is_valid_model_filters(model, filters, exclude_list=None): for key in filters.keys(): if (exclude_list and (key in exclude_list)): continue try: getattr(model, key) except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return False return True
[ "def", "is_valid_model_filters", "(", "model", ",", "filters", ",", "exclude_list", "=", "None", ")", ":", "for", "key", "in", "filters", ".", "keys", "(", ")", ":", "if", "(", "exclude_list", "and", "(", "key", "in", "exclude_list", ")", ")", ":", "continue", "try", ":", "getattr", "(", "model", ",", "key", ")", "except", "AttributeError", ":", "LOG", ".", "debug", "(", "\"'%s' filter key is not valid.\"", ",", "key", ")", "return", "False", "return", "True" ]
return true if filter values exist on the model .
train
false
576
@contextmanager def fake_spawn(): greenlets = [] def _inner_fake_spawn(func, *a, **kw): gt = greenthread.spawn(func, *a, **kw) greenlets.append(gt) return gt object_server.spawn = _inner_fake_spawn with mock.patch('swift.obj.server.spawn', _inner_fake_spawn): try: (yield) finally: for gt in greenlets: gt.wait()
[ "@", "contextmanager", "def", "fake_spawn", "(", ")", ":", "greenlets", "=", "[", "]", "def", "_inner_fake_spawn", "(", "func", ",", "*", "a", ",", "**", "kw", ")", ":", "gt", "=", "greenthread", ".", "spawn", "(", "func", ",", "*", "a", ",", "**", "kw", ")", "greenlets", ".", "append", "(", "gt", ")", "return", "gt", "object_server", ".", "spawn", "=", "_inner_fake_spawn", "with", "mock", ".", "patch", "(", "'swift.obj.server.spawn'", ",", "_inner_fake_spawn", ")", ":", "try", ":", "(", "yield", ")", "finally", ":", "for", "gt", "in", "greenlets", ":", "gt", ".", "wait", "(", ")" ]
spawn and capture the result so we can later wait on it .
train
false
577
def _ignore_keys(endpoint_props): return dict(((prop_name, prop_val) for (prop_name, prop_val) in six.iteritems(endpoint_props) if (prop_name not in _DO_NOT_COMPARE_FIELDS)))
[ "def", "_ignore_keys", "(", "endpoint_props", ")", ":", "return", "dict", "(", "(", "(", "prop_name", ",", "prop_val", ")", "for", "(", "prop_name", ",", "prop_val", ")", "in", "six", ".", "iteritems", "(", "endpoint_props", ")", "if", "(", "prop_name", "not", "in", "_DO_NOT_COMPARE_FIELDS", ")", ")", ")" ]
ignores some keys that might be different without any important info .
train
true
578
def createAppendByText(parentNode, xmlText): monad = OpenMonad(parentNode) for character in xmlText: monad = monad.getNextMonad(character)
[ "def", "createAppendByText", "(", "parentNode", ",", "xmlText", ")", ":", "monad", "=", "OpenMonad", "(", "parentNode", ")", "for", "character", "in", "xmlText", ":", "monad", "=", "monad", ".", "getNextMonad", "(", "character", ")" ]
create and append the child nodes from the xmltext .
train
false
579
def plug_l2_gw_service(cluster, lswitch_id, lport_id, gateway_id, vlan_id=None): att_obj = {'type': 'L2GatewayAttachment', 'l2_gateway_service_uuid': gateway_id} if vlan_id: att_obj['vlan_id'] = vlan_id return _plug_interface(cluster, lswitch_id, lport_id, att_obj)
[ "def", "plug_l2_gw_service", "(", "cluster", ",", "lswitch_id", ",", "lport_id", ",", "gateway_id", ",", "vlan_id", "=", "None", ")", ":", "att_obj", "=", "{", "'type'", ":", "'L2GatewayAttachment'", ",", "'l2_gateway_service_uuid'", ":", "gateway_id", "}", "if", "vlan_id", ":", "att_obj", "[", "'vlan_id'", "]", "=", "vlan_id", "return", "_plug_interface", "(", "cluster", ",", "lswitch_id", ",", "lport_id", ",", "att_obj", ")" ]
plug a layer-2 gateway attachment object in a logical port .
train
false
580
def record_magic(dct, magic_kind, magic_name, func): if (magic_kind == 'line_cell'): dct['line'][magic_name] = dct['cell'][magic_name] = func else: dct[magic_kind][magic_name] = func
[ "def", "record_magic", "(", "dct", ",", "magic_kind", ",", "magic_name", ",", "func", ")", ":", "if", "(", "magic_kind", "==", "'line_cell'", ")", ":", "dct", "[", "'line'", "]", "[", "magic_name", "]", "=", "dct", "[", "'cell'", "]", "[", "magic_name", "]", "=", "func", "else", ":", "dct", "[", "magic_kind", "]", "[", "magic_name", "]", "=", "func" ]
utility function to store a function as a magic of a specific kind .
train
true
581
@skip('silverlight', 'multiple_execute') def test_package_back_patching(): try: mod_backup = dict(sys.modules) _f_dir = path_combine(testpath.public_testdir, 'the_dir') _f_init = path_combine(_f_dir, '__init__.py') _f_pkg_abc = path_combine(_f_dir, 'abc1.py') _f_pkg_xyz = path_combine(_f_dir, 'xyz1.py') ensure_directory_present(_f_dir) write_to_file(_f_init, 'import abc1') write_to_file(_f_pkg_abc, 'import xyz1') write_to_file(_f_pkg_xyz, 'import sys\nsys.foo = "xyz"') import the_dir (x, y) = (the_dir.abc1, the_dir.xyz1) from the_dir import abc1 from the_dir import xyz1 AreEqual(x, abc1) AreEqual(y, xyz1) AreEqual(sys.foo, 'xyz') del sys.foo finally: sys.modules = mod_backup os.unlink(_f_init) os.unlink(_f_pkg_abc) os.unlink(_f_pkg_xyz)
[ "@", "skip", "(", "'silverlight'", ",", "'multiple_execute'", ")", "def", "test_package_back_patching", "(", ")", ":", "try", ":", "mod_backup", "=", "dict", "(", "sys", ".", "modules", ")", "_f_dir", "=", "path_combine", "(", "testpath", ".", "public_testdir", ",", "'the_dir'", ")", "_f_init", "=", "path_combine", "(", "_f_dir", ",", "'__init__.py'", ")", "_f_pkg_abc", "=", "path_combine", "(", "_f_dir", ",", "'abc1.py'", ")", "_f_pkg_xyz", "=", "path_combine", "(", "_f_dir", ",", "'xyz1.py'", ")", "ensure_directory_present", "(", "_f_dir", ")", "write_to_file", "(", "_f_init", ",", "'import abc1'", ")", "write_to_file", "(", "_f_pkg_abc", ",", "'import xyz1'", ")", "write_to_file", "(", "_f_pkg_xyz", ",", "'import sys\\nsys.foo = \"xyz\"'", ")", "import", "the_dir", "(", "x", ",", "y", ")", "=", "(", "the_dir", ".", "abc1", ",", "the_dir", ".", "xyz1", ")", "from", "the_dir", "import", "abc1", "from", "the_dir", "import", "xyz1", "AreEqual", "(", "x", ",", "abc1", ")", "AreEqual", "(", "y", ",", "xyz1", ")", "AreEqual", "(", "sys", ".", "foo", ",", "'xyz'", ")", "del", "sys", ".", "foo", "finally", ":", "sys", ".", "modules", "=", "mod_backup", "os", ".", "unlink", "(", "_f_init", ")", "os", ".", "unlink", "(", "_f_pkg_abc", ")", "os", ".", "unlink", "(", "_f_pkg_xyz", ")" ]
when importing a package item the package should be updated with the child .
train
false
583
def _FormatHash(h): return ('%s_%s_%s_%s_%s' % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40]))
[ "def", "_FormatHash", "(", "h", ")", ":", "return", "(", "'%s_%s_%s_%s_%s'", "%", "(", "h", "[", "0", ":", "8", "]", ",", "h", "[", "8", ":", "16", "]", ",", "h", "[", "16", ":", "24", "]", ",", "h", "[", "24", ":", "32", "]", ",", "h", "[", "32", ":", "40", "]", ")", ")" ]
return a string representation of a hash .
train
false
584
def PackIntSet(ints): if (len(ints) > 15): return ('\xff\xff\xff\xff' + zlib.compress(intlist_to_bitmask(ints))) else: return struct.pack(('<' + ('I' * len(ints))), *ints)
[ "def", "PackIntSet", "(", "ints", ")", ":", "if", "(", "len", "(", "ints", ")", ">", "15", ")", ":", "return", "(", "'\\xff\\xff\\xff\\xff'", "+", "zlib", ".", "compress", "(", "intlist_to_bitmask", "(", "ints", ")", ")", ")", "else", ":", "return", "struct", ".", "pack", "(", "(", "'<'", "+", "(", "'I'", "*", "len", "(", "ints", ")", ")", ")", ",", "*", "ints", ")" ]
pack a set of ints to a compact string .
train
false
586
@db_api.api_context_manager.reader def _ensure_rc_cache(ctx): global _RC_CACHE if (_RC_CACHE is not None): return _RC_CACHE = rc_cache.ResourceClassCache(ctx)
[ "@", "db_api", ".", "api_context_manager", ".", "reader", "def", "_ensure_rc_cache", "(", "ctx", ")", ":", "global", "_RC_CACHE", "if", "(", "_RC_CACHE", "is", "not", "None", ")", ":", "return", "_RC_CACHE", "=", "rc_cache", ".", "ResourceClassCache", "(", "ctx", ")" ]
ensures that a singleton resource class cache has been created in the modules scope .
train
false
587
def _indentln(string): return _indent((string + '\n'))
[ "def", "_indentln", "(", "string", ")", ":", "return", "_indent", "(", "(", "string", "+", "'\\n'", ")", ")" ]
return the indented parameter with newline .
train
false
588
def _key_press(event, params): if (event.key == 'left'): params['pause'] = True params['frame'] = max((params['frame'] - 1), 0) elif (event.key == 'right'): params['pause'] = True params['frame'] = min((params['frame'] + 1), (len(params['frames']) - 1))
[ "def", "_key_press", "(", "event", ",", "params", ")", ":", "if", "(", "event", ".", "key", "==", "'left'", ")", ":", "params", "[", "'pause'", "]", "=", "True", "params", "[", "'frame'", "]", "=", "max", "(", "(", "params", "[", "'frame'", "]", "-", "1", ")", ",", "0", ")", "elif", "(", "event", ".", "key", "==", "'right'", ")", ":", "params", "[", "'pause'", "]", "=", "True", "params", "[", "'frame'", "]", "=", "min", "(", "(", "params", "[", "'frame'", "]", "+", "1", ")", ",", "(", "len", "(", "params", "[", "'frames'", "]", ")", "-", "1", ")", ")" ]
function for handling key presses for the animation .
train
false
590
def _createEncoder(): encoder = MultiEncoder() encoder.addMultipleEncoders({'timestamp': dict(fieldname='timestamp', type='DateEncoder', timeOfDay=(5, 5), forced=True), 'attendeeCount': dict(fieldname='attendeeCount', type='ScalarEncoder', name='attendeeCount', minval=0, maxval=270, clipInput=True, w=5, resolution=10, forced=True), 'consumption': dict(fieldname='consumption', type='ScalarEncoder', name='consumption', minval=0, maxval=115, clipInput=True, w=5, resolution=5, forced=True)}) return encoder
[ "def", "_createEncoder", "(", ")", ":", "encoder", "=", "MultiEncoder", "(", ")", "encoder", ".", "addMultipleEncoders", "(", "{", "'timestamp'", ":", "dict", "(", "fieldname", "=", "'timestamp'", ",", "type", "=", "'DateEncoder'", ",", "timeOfDay", "=", "(", "5", ",", "5", ")", ",", "forced", "=", "True", ")", ",", "'attendeeCount'", ":", "dict", "(", "fieldname", "=", "'attendeeCount'", ",", "type", "=", "'ScalarEncoder'", ",", "name", "=", "'attendeeCount'", ",", "minval", "=", "0", ",", "maxval", "=", "270", ",", "clipInput", "=", "True", ",", "w", "=", "5", ",", "resolution", "=", "10", ",", "forced", "=", "True", ")", ",", "'consumption'", ":", "dict", "(", "fieldname", "=", "'consumption'", ",", "type", "=", "'ScalarEncoder'", ",", "name", "=", "'consumption'", ",", "minval", "=", "0", ",", "maxval", "=", "115", ",", "clipInput", "=", "True", ",", "w", "=", "5", ",", "resolution", "=", "5", ",", "forced", "=", "True", ")", "}", ")", "return", "encoder" ]
create the encoder instance for our test and return it .
train
false
591
def entity_to_protobuf(entity): entity_pb = _entity_pb2.Entity() if (entity.key is not None): key_pb = entity.key.to_protobuf() entity_pb.key.CopyFrom(key_pb) for (name, value) in entity.items(): value_is_list = isinstance(value, list) if (value_is_list and (len(value) == 0)): continue value_pb = _new_value_pb(entity_pb, name) _set_protobuf_value(value_pb, value) if (name in entity.exclude_from_indexes): if (not value_is_list): value_pb.exclude_from_indexes = True for sub_value in value_pb.array_value.values: sub_value.exclude_from_indexes = True _set_pb_meaning_from_entity(entity, name, value, value_pb, is_list=value_is_list) return entity_pb
[ "def", "entity_to_protobuf", "(", "entity", ")", ":", "entity_pb", "=", "_entity_pb2", ".", "Entity", "(", ")", "if", "(", "entity", ".", "key", "is", "not", "None", ")", ":", "key_pb", "=", "entity", ".", "key", ".", "to_protobuf", "(", ")", "entity_pb", ".", "key", ".", "CopyFrom", "(", "key_pb", ")", "for", "(", "name", ",", "value", ")", "in", "entity", ".", "items", "(", ")", ":", "value_is_list", "=", "isinstance", "(", "value", ",", "list", ")", "if", "(", "value_is_list", "and", "(", "len", "(", "value", ")", "==", "0", ")", ")", ":", "continue", "value_pb", "=", "_new_value_pb", "(", "entity_pb", ",", "name", ")", "_set_protobuf_value", "(", "value_pb", ",", "value", ")", "if", "(", "name", "in", "entity", ".", "exclude_from_indexes", ")", ":", "if", "(", "not", "value_is_list", ")", ":", "value_pb", ".", "exclude_from_indexes", "=", "True", "for", "sub_value", "in", "value_pb", ".", "array_value", ".", "values", ":", "sub_value", ".", "exclude_from_indexes", "=", "True", "_set_pb_meaning_from_entity", "(", "entity", ",", "name", ",", "value", ",", "value_pb", ",", "is_list", "=", "value_is_list", ")", "return", "entity_pb" ]
converts an entity into a protobuf .
train
true
592
def status_to_ec2_attach_status(volume): volume_status = volume.get('status') attach_status = volume.get('attach_status') if (volume_status in ('attaching', 'detaching')): ec2_attach_status = volume_status elif (attach_status in ('attached', 'detached')): ec2_attach_status = attach_status else: msg = (_('Unacceptable attach status:%s for ec2 API.') % attach_status) raise exception.Invalid(msg) return ec2_attach_status
[ "def", "status_to_ec2_attach_status", "(", "volume", ")", ":", "volume_status", "=", "volume", ".", "get", "(", "'status'", ")", "attach_status", "=", "volume", ".", "get", "(", "'attach_status'", ")", "if", "(", "volume_status", "in", "(", "'attaching'", ",", "'detaching'", ")", ")", ":", "ec2_attach_status", "=", "volume_status", "elif", "(", "attach_status", "in", "(", "'attached'", ",", "'detached'", ")", ")", ":", "ec2_attach_status", "=", "attach_status", "else", ":", "msg", "=", "(", "_", "(", "'Unacceptable attach status:%s for ec2 API.'", ")", "%", "attach_status", ")", "raise", "exception", ".", "Invalid", "(", "msg", ")", "return", "ec2_attach_status" ]
get the corresponding ec2 attachment state .
train
false
593
def is_low_contrast(image, fraction_threshold=0.05, lower_percentile=1, upper_percentile=99, method='linear'): image = np.asanyarray(image) if ((image.ndim == 3) and (image.shape[2] in [3, 4])): image = rgb2gray(image) dlimits = dtype_limits(image, clip_negative=False) limits = np.percentile(image, [lower_percentile, upper_percentile]) ratio = ((limits[1] - limits[0]) / (dlimits[1] - dlimits[0])) return (ratio < fraction_threshold)
[ "def", "is_low_contrast", "(", "image", ",", "fraction_threshold", "=", "0.05", ",", "lower_percentile", "=", "1", ",", "upper_percentile", "=", "99", ",", "method", "=", "'linear'", ")", ":", "image", "=", "np", ".", "asanyarray", "(", "image", ")", "if", "(", "(", "image", ".", "ndim", "==", "3", ")", "and", "(", "image", ".", "shape", "[", "2", "]", "in", "[", "3", ",", "4", "]", ")", ")", ":", "image", "=", "rgb2gray", "(", "image", ")", "dlimits", "=", "dtype_limits", "(", "image", ",", "clip_negative", "=", "False", ")", "limits", "=", "np", ".", "percentile", "(", "image", ",", "[", "lower_percentile", ",", "upper_percentile", "]", ")", "ratio", "=", "(", "(", "limits", "[", "1", "]", "-", "limits", "[", "0", "]", ")", "/", "(", "dlimits", "[", "1", "]", "-", "dlimits", "[", "0", "]", ")", ")", "return", "(", "ratio", "<", "fraction_threshold", ")" ]
detemine if an image is low contrast .
train
false
594
def get_sources(zone, permanent=True): cmd = '--zone={0} --list-sources'.format(zone) if permanent: cmd += ' --permanent' return __firewall_cmd(cmd).split()
[ "def", "get_sources", "(", "zone", ",", "permanent", "=", "True", ")", ":", "cmd", "=", "'--zone={0} --list-sources'", ".", "format", "(", "zone", ")", "if", "permanent", ":", "cmd", "+=", "' --permanent'", "return", "__firewall_cmd", "(", "cmd", ")", ".", "split", "(", ")" ]
return the path to the generate sources .
train
true
595
def _set_pb_meaning_from_entity(entity, name, value, value_pb, is_list=False): if (name not in entity._meanings): return (meaning, orig_value) = entity._meanings[name] if (orig_value is not value): return if is_list: if (not isinstance(meaning, list)): meaning = itertools.repeat(meaning) val_iter = six.moves.zip(value_pb.array_value.values, meaning) for (sub_value_pb, sub_meaning) in val_iter: if (sub_meaning is not None): sub_value_pb.meaning = sub_meaning else: value_pb.meaning = meaning
[ "def", "_set_pb_meaning_from_entity", "(", "entity", ",", "name", ",", "value", ",", "value_pb", ",", "is_list", "=", "False", ")", ":", "if", "(", "name", "not", "in", "entity", ".", "_meanings", ")", ":", "return", "(", "meaning", ",", "orig_value", ")", "=", "entity", ".", "_meanings", "[", "name", "]", "if", "(", "orig_value", "is", "not", "value", ")", ":", "return", "if", "is_list", ":", "if", "(", "not", "isinstance", "(", "meaning", ",", "list", ")", ")", ":", "meaning", "=", "itertools", ".", "repeat", "(", "meaning", ")", "val_iter", "=", "six", ".", "moves", ".", "zip", "(", "value_pb", ".", "array_value", ".", "values", ",", "meaning", ")", "for", "(", "sub_value_pb", ",", "sub_meaning", ")", "in", "val_iter", ":", "if", "(", "sub_meaning", "is", "not", "None", ")", ":", "sub_value_pb", ".", "meaning", "=", "sub_meaning", "else", ":", "value_pb", ".", "meaning", "=", "meaning" ]
add meaning information to a protobuf .
train
true
596
def _partition_of_index_value(divisions, val): if (divisions[0] is None): msg = 'Can not use loc on DataFrame without known divisions' raise ValueError(msg) val = _coerce_loc_index(divisions, val) i = bisect.bisect_right(divisions, val) return min((len(divisions) - 2), max(0, (i - 1)))
[ "def", "_partition_of_index_value", "(", "divisions", ",", "val", ")", ":", "if", "(", "divisions", "[", "0", "]", "is", "None", ")", ":", "msg", "=", "'Can not use loc on DataFrame without known divisions'", "raise", "ValueError", "(", "msg", ")", "val", "=", "_coerce_loc_index", "(", "divisions", ",", "val", ")", "i", "=", "bisect", ".", "bisect_right", "(", "divisions", ",", "val", ")", "return", "min", "(", "(", "len", "(", "divisions", ")", "-", "2", ")", ",", "max", "(", "0", ",", "(", "i", "-", "1", ")", ")", ")" ]
in which partition does this value lie? .
train
false
597
def get_completed_programs(student): meter = ProgramProgressMeter(student, use_catalog=True) return meter.completed_programs
[ "def", "get_completed_programs", "(", "student", ")", ":", "meter", "=", "ProgramProgressMeter", "(", "student", ",", "use_catalog", "=", "True", ")", "return", "meter", ".", "completed_programs" ]
given a set of completed courses .
train
false
598
@treeio_login_required def ajax_ticket_lookup(request, response_format='html'): tickets = [] if (request.GET and ('term' in request.GET)): tickets = Ticket.objects.filter(name__icontains=request.GET['term'])[:10] return render_to_response('services/ajax_ticket_lookup', {'tickets': tickets}, context_instance=RequestContext(request), response_format=response_format)
[ "@", "treeio_login_required", "def", "ajax_ticket_lookup", "(", "request", ",", "response_format", "=", "'html'", ")", ":", "tickets", "=", "[", "]", "if", "(", "request", ".", "GET", "and", "(", "'term'", "in", "request", ".", "GET", ")", ")", ":", "tickets", "=", "Ticket", ".", "objects", ".", "filter", "(", "name__icontains", "=", "request", ".", "GET", "[", "'term'", "]", ")", "[", ":", "10", "]", "return", "render_to_response", "(", "'services/ajax_ticket_lookup'", ",", "{", "'tickets'", ":", "tickets", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", "response_format", "=", "response_format", ")" ]
returns a list of matching tickets .
train
false
599
def _is_astropy_setup(): main_mod = sys.modules.get('__main__') if (not main_mod): return False return (getattr(main_mod, '__file__', False) and (os.path.basename(main_mod.__file__).rstrip('co') == 'setup.py') and _is_astropy_source(main_mod.__file__))
[ "def", "_is_astropy_setup", "(", ")", ":", "main_mod", "=", "sys", ".", "modules", ".", "get", "(", "'__main__'", ")", "if", "(", "not", "main_mod", ")", ":", "return", "False", "return", "(", "getattr", "(", "main_mod", ",", "'__file__'", ",", "False", ")", "and", "(", "os", ".", "path", ".", "basename", "(", "main_mod", ".", "__file__", ")", ".", "rstrip", "(", "'co'", ")", "==", "'setup.py'", ")", "and", "_is_astropy_source", "(", "main_mod", ".", "__file__", ")", ")" ]
returns whether we are currently being imported in the context of running astropys setup .
train
false
600
def InitUser(): result = AppUser.query((AppUser.user == users.get_current_user())).fetch() if result: app_user = result[0] else: app_user = AppUser(user=users.get_current_user(), email=users.get_current_user().email()) app_user.put() return app_user
[ "def", "InitUser", "(", ")", ":", "result", "=", "AppUser", ".", "query", "(", "(", "AppUser", ".", "user", "==", "users", ".", "get_current_user", "(", ")", ")", ")", ".", "fetch", "(", ")", "if", "result", ":", "app_user", "=", "result", "[", "0", "]", "else", ":", "app_user", "=", "AppUser", "(", "user", "=", "users", ".", "get_current_user", "(", ")", ",", "email", "=", "users", ".", "get_current_user", "(", ")", ".", "email", "(", ")", ")", "app_user", ".", "put", "(", ")", "return", "app_user" ]
initialize application user .
train
true
601
def summary_urls(resource, url, filters): links = {} get_vars = S3URLQuery.parse_url(url) get_vars.pop('t', None) get_vars.pop('w', None) get_vars.update(filters) list_vars = [] for (k, v) in get_vars.items(): if (v is None): continue values = (v if (type(v) is list) else [v]) for value in values: if (value is not None): list_vars.append((k, value)) base_url = url.split('?', 1)[0] summary_config = S3Summary._get_config(resource) tab_idx = 0 for section in summary_config: if section.get('common'): continue tab_vars = (list_vars + [('t', str(tab_idx))]) links[section['name']] = ('%s?%s' % (base_url, urlencode(tab_vars))) tab_idx += 1 return links
[ "def", "summary_urls", "(", "resource", ",", "url", ",", "filters", ")", ":", "links", "=", "{", "}", "get_vars", "=", "S3URLQuery", ".", "parse_url", "(", "url", ")", "get_vars", ".", "pop", "(", "'t'", ",", "None", ")", "get_vars", ".", "pop", "(", "'w'", ",", "None", ")", "get_vars", ".", "update", "(", "filters", ")", "list_vars", "=", "[", "]", "for", "(", "k", ",", "v", ")", "in", "get_vars", ".", "items", "(", ")", ":", "if", "(", "v", "is", "None", ")", ":", "continue", "values", "=", "(", "v", "if", "(", "type", "(", "v", ")", "is", "list", ")", "else", "[", "v", "]", ")", "for", "value", "in", "values", ":", "if", "(", "value", "is", "not", "None", ")", ":", "list_vars", ".", "append", "(", "(", "k", ",", "value", ")", ")", "base_url", "=", "url", ".", "split", "(", "'?'", ",", "1", ")", "[", "0", "]", "summary_config", "=", "S3Summary", ".", "_get_config", "(", "resource", ")", "tab_idx", "=", "0", "for", "section", "in", "summary_config", ":", "if", "section", ".", "get", "(", "'common'", ")", ":", "continue", "tab_vars", "=", "(", "list_vars", "+", "[", "(", "'t'", ",", "str", "(", "tab_idx", ")", ")", "]", ")", "links", "[", "section", "[", "'name'", "]", "]", "=", "(", "'%s?%s'", "%", "(", "base_url", ",", "urlencode", "(", "tab_vars", ")", ")", ")", "tab_idx", "+=", "1", "return", "links" ]
helper to get urls for summary tabs to use as actions for a saved filter .
train
false
604
def get_signatured_headers(headers): d = {} msg = '' for (k, v) in headers: d[k] = v msg += (k + v) secret = os.environ.get('SECRET_KEY', '') d['Signature'] = get_signature(secret, msg) return d
[ "def", "get_signatured_headers", "(", "headers", ")", ":", "d", "=", "{", "}", "msg", "=", "''", "for", "(", "k", ",", "v", ")", "in", "headers", ":", "d", "[", "k", "]", "=", "v", "msg", "+=", "(", "k", "+", "v", ")", "secret", "=", "os", ".", "environ", ".", "get", "(", "'SECRET_KEY'", ",", "''", ")", "d", "[", "'Signature'", "]", "=", "get_signature", "(", "secret", ",", "msg", ")", "return", "d" ]
given a list of headers .
train
false
605
def init_config(): config = (os.path.dirname(__file__) + '/colorset/config') try: data = load_config(config) for d in data: c[d] = data[d] except: pass rainbow_config = ((os.path.expanduser('~') + os.sep) + '.rainbow_config.json') try: data = load_config(rainbow_config) for d in data: c[d] = data[d] except (IOError, ValueError) as e: c['USER_JSON_ERROR'] = str(e) theme_file = (((os.path.dirname(__file__) + '/colorset/') + c['THEME']) + '.json') try: data = load_config(theme_file) for d in data: c[d] = data[d] except: pass
[ "def", "init_config", "(", ")", ":", "config", "=", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "+", "'/colorset/config'", ")", "try", ":", "data", "=", "load_config", "(", "config", ")", "for", "d", "in", "data", ":", "c", "[", "d", "]", "=", "data", "[", "d", "]", "except", ":", "pass", "rainbow_config", "=", "(", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "+", "os", ".", "sep", ")", "+", "'.rainbow_config.json'", ")", "try", ":", "data", "=", "load_config", "(", "rainbow_config", ")", "for", "d", "in", "data", ":", "c", "[", "d", "]", "=", "data", "[", "d", "]", "except", "(", "IOError", ",", "ValueError", ")", "as", "e", ":", "c", "[", "'USER_JSON_ERROR'", "]", "=", "str", "(", "e", ")", "theme_file", "=", "(", "(", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "+", "'/colorset/'", ")", "+", "c", "[", "'THEME'", "]", ")", "+", "'.json'", ")", "try", ":", "data", "=", "load_config", "(", "theme_file", ")", "for", "d", "in", "data", ":", "c", "[", "d", "]", "=", "data", "[", "d", "]", "except", ":", "pass" ]
init configuration .
train
false
606
def _ctl_cmd(cmd, name, conf_file, bin_env): ret = [_get_supervisorctl_bin(bin_env)] if (conf_file is not None): ret += ['-c', conf_file] ret.append(cmd) if name: ret.append(name) return ret
[ "def", "_ctl_cmd", "(", "cmd", ",", "name", ",", "conf_file", ",", "bin_env", ")", ":", "ret", "=", "[", "_get_supervisorctl_bin", "(", "bin_env", ")", "]", "if", "(", "conf_file", "is", "not", "None", ")", ":", "ret", "+=", "[", "'-c'", ",", "conf_file", "]", "ret", ".", "append", "(", "cmd", ")", "if", "name", ":", "ret", ".", "append", "(", "name", ")", "return", "ret" ]
return the command list to use .
train
true
607
def _arma_predict_in_sample(start, end, endog, resid, k_ar, method): if ('mle' in method): fittedvalues = (endog - resid) else: fittedvalues = (endog[k_ar:] - resid) fv_start = start if ('mle' not in method): fv_start -= k_ar fv_end = min(len(fittedvalues), (end + 1)) return fittedvalues[fv_start:fv_end]
[ "def", "_arma_predict_in_sample", "(", "start", ",", "end", ",", "endog", ",", "resid", ",", "k_ar", ",", "method", ")", ":", "if", "(", "'mle'", "in", "method", ")", ":", "fittedvalues", "=", "(", "endog", "-", "resid", ")", "else", ":", "fittedvalues", "=", "(", "endog", "[", "k_ar", ":", "]", "-", "resid", ")", "fv_start", "=", "start", "if", "(", "'mle'", "not", "in", "method", ")", ":", "fv_start", "-=", "k_ar", "fv_end", "=", "min", "(", "len", "(", "fittedvalues", ")", ",", "(", "end", "+", "1", ")", ")", "return", "fittedvalues", "[", "fv_start", ":", "fv_end", "]" ]
pre- and in-sample fitting for arma .
train
false
611
def gitCommitId(path, ref): cmd = (gitCmdBase(path) + ['show', ref]) try: output = runSubprocess(cmd, stderr=None, universal_newlines=True) except sp.CalledProcessError: print cmd raise NameError(("Unknown git reference '%s'" % ref)) commit = output.split('\n')[0] assert (commit[:7] == 'commit ') return commit[7:]
[ "def", "gitCommitId", "(", "path", ",", "ref", ")", ":", "cmd", "=", "(", "gitCmdBase", "(", "path", ")", "+", "[", "'show'", ",", "ref", "]", ")", "try", ":", "output", "=", "runSubprocess", "(", "cmd", ",", "stderr", "=", "None", ",", "universal_newlines", "=", "True", ")", "except", "sp", ".", "CalledProcessError", ":", "print", "cmd", "raise", "NameError", "(", "(", "\"Unknown git reference '%s'\"", "%", "ref", ")", ")", "commit", "=", "output", ".", "split", "(", "'\\n'", ")", "[", "0", "]", "assert", "(", "commit", "[", ":", "7", "]", "==", "'commit '", ")", "return", "commit", "[", "7", ":", "]" ]
return the commit id of *ref* in the git repository at *path* .
train
false
612
@with_setup(step_runner_environ) def test_failing_behave_as_step_fails(): runnable_step = Step.from_string('Given I have a step which calls the "other step fails" step with behave_as') try: runnable_step.run(True) except: pass assert runnable_step.failed
[ "@", "with_setup", "(", "step_runner_environ", ")", "def", "test_failing_behave_as_step_fails", "(", ")", ":", "runnable_step", "=", "Step", ".", "from_string", "(", "'Given I have a step which calls the \"other step fails\" step with behave_as'", ")", "try", ":", "runnable_step", ".", "run", "(", "True", ")", "except", ":", "pass", "assert", "runnable_step", ".", "failed" ]
when a step definition calls another step definition with behave_as .
train
false
613
def get_exclude_string(client_dir): exclude_string = '--exclude=deps/* --exclude=tests/* --exclude=site_tests/*' prof_dir = os.path.join(client_dir, 'profilers') for f in os.listdir(prof_dir): if os.path.isdir(os.path.join(prof_dir, f)): exclude_string += (' --exclude=profilers/%s' % f) return exclude_string
[ "def", "get_exclude_string", "(", "client_dir", ")", ":", "exclude_string", "=", "'--exclude=deps/* --exclude=tests/* --exclude=site_tests/*'", "prof_dir", "=", "os", ".", "path", ".", "join", "(", "client_dir", ",", "'profilers'", ")", "for", "f", "in", "os", ".", "listdir", "(", "prof_dir", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "prof_dir", ",", "f", ")", ")", ":", "exclude_string", "+=", "(", "' --exclude=profilers/%s'", "%", "f", ")", "return", "exclude_string" ]
get the exclude string for the tar command to exclude specific subdirectories inside client_dir .
train
false
615
def getProcessValue(executable, args=(), env={}, path=None, reactor=None): return _callProtocolWithDeferred(_ValueGetter, executable, args, env, path, reactor)
[ "def", "getProcessValue", "(", "executable", ",", "args", "=", "(", ")", ",", "env", "=", "{", "}", ",", "path", "=", "None", ",", "reactor", "=", "None", ")", ":", "return", "_callProtocolWithDeferred", "(", "_ValueGetter", ",", "executable", ",", "args", ",", "env", ",", "path", ",", "reactor", ")" ]
spawn a process and return its exit code as a deferred .
train
false
616
def cloud_cancel_task(cookie, tokens, task_id): url = ''.join([const.PAN_URL, 'rest/2.0/services/cloud_dl', '?bdstoken=', tokens['bdstoken'], '&task_id=', str(task_id), '&method=cancel_task&app_id=250528', '&t=', util.timestamp(), '&channel=chunlei&clienttype=0&web=1']) req = net.urlopen(url, headers={'Cookie': cookie.header_output()}) if req: content = req.data return json.loads(content.decode()) else: return None
[ "def", "cloud_cancel_task", "(", "cookie", ",", "tokens", ",", "task_id", ")", ":", "url", "=", "''", ".", "join", "(", "[", "const", ".", "PAN_URL", ",", "'rest/2.0/services/cloud_dl'", ",", "'?bdstoken='", ",", "tokens", "[", "'bdstoken'", "]", ",", "'&task_id='", ",", "str", "(", "task_id", ")", ",", "'&method=cancel_task&app_id=250528'", ",", "'&t='", ",", "util", ".", "timestamp", "(", ")", ",", "'&channel=chunlei&clienttype=0&web=1'", "]", ")", "req", "=", "net", ".", "urlopen", "(", "url", ",", "headers", "=", "{", "'Cookie'", ":", "cookie", ".", "header_output", "(", ")", "}", ")", "if", "req", ":", "content", "=", "req", ".", "data", "return", "json", ".", "loads", "(", "content", ".", "decode", "(", ")", ")", "else", ":", "return", "None" ]
task_id - 之前建立离线下载任务时的task id .
train
false
617
def _paths_from_ls(recs): ret = {} last_nm = '' level = 0 path = [] ret = [] for ln in recs.splitlines(): (nm, val) = ln.rstrip().split(' = ') barename = nm.lstrip() this_level = (len(nm) - len(barename)) if (this_level == 0): ret.append(barename) level = 0 path = [] elif (this_level == level): ret.append(('%s/%s' % ('/'.join(path), barename))) elif (this_level > level): path.append(last_nm) ret.append(('%s/%s' % ('/'.join(path), barename))) level = this_level elif (this_level < level): path = path[:this_level] ret.append(('%s/%s' % ('/'.join(path), barename))) level = this_level last_nm = barename return ret
[ "def", "_paths_from_ls", "(", "recs", ")", ":", "ret", "=", "{", "}", "last_nm", "=", "''", "level", "=", "0", "path", "=", "[", "]", "ret", "=", "[", "]", "for", "ln", "in", "recs", ".", "splitlines", "(", ")", ":", "(", "nm", ",", "val", ")", "=", "ln", ".", "rstrip", "(", ")", ".", "split", "(", "' = '", ")", "barename", "=", "nm", ".", "lstrip", "(", ")", "this_level", "=", "(", "len", "(", "nm", ")", "-", "len", "(", "barename", ")", ")", "if", "(", "this_level", "==", "0", ")", ":", "ret", ".", "append", "(", "barename", ")", "level", "=", "0", "path", "=", "[", "]", "elif", "(", "this_level", "==", "level", ")", ":", "ret", ".", "append", "(", "(", "'%s/%s'", "%", "(", "'/'", ".", "join", "(", "path", ")", ",", "barename", ")", ")", ")", "elif", "(", "this_level", ">", "level", ")", ":", "path", ".", "append", "(", "last_nm", ")", "ret", ".", "append", "(", "(", "'%s/%s'", "%", "(", "'/'", ".", "join", "(", "path", ")", ",", "barename", ")", ")", ")", "level", "=", "this_level", "elif", "(", "this_level", "<", "level", ")", ":", "path", "=", "path", "[", ":", "this_level", "]", "ret", ".", "append", "(", "(", "'%s/%s'", "%", "(", "'/'", ".", "join", "(", "path", ")", ",", "barename", ")", ")", ")", "level", "=", "this_level", "last_nm", "=", "barename", "return", "ret" ]
the xenstore-ls command returns a listing that isnt terribly useful .
train
false
620
def solve_sylvester(a, b, q): (r, u) = schur(a, output='real') (s, v) = schur(b.conj().transpose(), output='real') f = np.dot(np.dot(u.conj().transpose(), q), v) (trsyl,) = get_lapack_funcs(('trsyl',), (r, s, f)) if (trsyl is None): raise RuntimeError('LAPACK implementation does not contain a proper Sylvester equation solver (TRSYL)') (y, scale, info) = trsyl(r, s, f, tranb='C') y = (scale * y) if (info < 0): raise LinAlgError(('Illegal value encountered in the %d term' % ((- info),))) return np.dot(np.dot(u, y), v.conj().transpose())
[ "def", "solve_sylvester", "(", "a", ",", "b", ",", "q", ")", ":", "(", "r", ",", "u", ")", "=", "schur", "(", "a", ",", "output", "=", "'real'", ")", "(", "s", ",", "v", ")", "=", "schur", "(", "b", ".", "conj", "(", ")", ".", "transpose", "(", ")", ",", "output", "=", "'real'", ")", "f", "=", "np", ".", "dot", "(", "np", ".", "dot", "(", "u", ".", "conj", "(", ")", ".", "transpose", "(", ")", ",", "q", ")", ",", "v", ")", "(", "trsyl", ",", ")", "=", "get_lapack_funcs", "(", "(", "'trsyl'", ",", ")", ",", "(", "r", ",", "s", ",", "f", ")", ")", "if", "(", "trsyl", "is", "None", ")", ":", "raise", "RuntimeError", "(", "'LAPACK implementation does not contain a proper Sylvester equation solver (TRSYL)'", ")", "(", "y", ",", "scale", ",", "info", ")", "=", "trsyl", "(", "r", ",", "s", ",", "f", ",", "tranb", "=", "'C'", ")", "y", "=", "(", "scale", "*", "y", ")", "if", "(", "info", "<", "0", ")", ":", "raise", "LinAlgError", "(", "(", "'Illegal value encountered in the %d term'", "%", "(", "(", "-", "info", ")", ",", ")", ")", ")", "return", "np", ".", "dot", "(", "np", ".", "dot", "(", "u", ",", "y", ")", ",", "v", ".", "conj", "(", ")", ".", "transpose", "(", ")", ")" ]
computes a solution (x) to the sylvester equation :math:ax + xb = q .
train
false
621
def ignore_errors(conn, fun=None, *args, **kwargs): if fun: with _ignore_errors(conn): return fun(*args, **kwargs) return _ignore_errors(conn)
[ "def", "ignore_errors", "(", "conn", ",", "fun", "=", "None", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "fun", ":", "with", "_ignore_errors", "(", "conn", ")", ":", "return", "fun", "(", "*", "args", ",", "**", "kwargs", ")", "return", "_ignore_errors", "(", "conn", ")" ]
ignore connection and channel errors .
train
false
622
def test_ast_good_defclass(): can_compile(u'(defclass a)') can_compile(u'(defclass a [])')
[ "def", "test_ast_good_defclass", "(", ")", ":", "can_compile", "(", "u'(defclass a)'", ")", "can_compile", "(", "u'(defclass a [])'", ")" ]
make sure ast can compile valid defclass .
train
false
624
def delete_files_for_obj(sender, **kwargs): obj = kwargs.pop('instance') for field_name in sender._meta.get_all_field_names(): if (not hasattr(obj, field_name)): continue try: field_class = sender._meta.get_field(field_name) except models.FieldDoesNotExist: continue field_value = getattr(obj, field_name) if (isinstance(field_class, models.FileField) and field_value): field_value.delete()
[ "def", "delete_files_for_obj", "(", "sender", ",", "**", "kwargs", ")", ":", "obj", "=", "kwargs", ".", "pop", "(", "'instance'", ")", "for", "field_name", "in", "sender", ".", "_meta", ".", "get_all_field_names", "(", ")", ":", "if", "(", "not", "hasattr", "(", "obj", ",", "field_name", ")", ")", ":", "continue", "try", ":", "field_class", "=", "sender", ".", "_meta", ".", "get_field", "(", "field_name", ")", "except", "models", ".", "FieldDoesNotExist", ":", "continue", "field_value", "=", "getattr", "(", "obj", ",", "field_name", ")", "if", "(", "isinstance", "(", "field_class", ",", "models", ".", "FileField", ")", "and", "field_value", ")", ":", "field_value", ".", "delete", "(", ")" ]
signal receiver of a model class and instance .
train
false
625
def test_scenarios_parsing(): feature = Feature.from_string(FEATURE15) scenarios_and_tags = [(s.name, s.tags) for s in feature.scenarios] scenarios_and_tags.should.equal([('Bootstraping Redis role', []), ('Restart scalarizr', []), ('Rebundle server', [u'rebundle']), ('Use new role', [u'rebundle']), ('Restart scalarizr after bundling', [u'rebundle']), ('Bundling data', []), ('Modifying data', []), ('Reboot server', []), ('Backuping data on Master', []), ('Setup replication', []), ('Restart scalarizr in slave', []), ('Slave force termination', []), ('Slave delete EBS', [u'ec2']), ('Setup replication for EBS test', [u'ec2']), ('Writing on Master, reading on Slave', []), ('Slave -> Master promotion', []), ('Restart farm', [u'restart_farm'])])
[ "def", "test_scenarios_parsing", "(", ")", ":", "feature", "=", "Feature", ".", "from_string", "(", "FEATURE15", ")", "scenarios_and_tags", "=", "[", "(", "s", ".", "name", ",", "s", ".", "tags", ")", "for", "s", "in", "feature", ".", "scenarios", "]", "scenarios_and_tags", ".", "should", ".", "equal", "(", "[", "(", "'Bootstraping Redis role'", ",", "[", "]", ")", ",", "(", "'Restart scalarizr'", ",", "[", "]", ")", ",", "(", "'Rebundle server'", ",", "[", "u'rebundle'", "]", ")", ",", "(", "'Use new role'", ",", "[", "u'rebundle'", "]", ")", ",", "(", "'Restart scalarizr after bundling'", ",", "[", "u'rebundle'", "]", ")", ",", "(", "'Bundling data'", ",", "[", "]", ")", ",", "(", "'Modifying data'", ",", "[", "]", ")", ",", "(", "'Reboot server'", ",", "[", "]", ")", ",", "(", "'Backuping data on Master'", ",", "[", "]", ")", ",", "(", "'Setup replication'", ",", "[", "]", ")", ",", "(", "'Restart scalarizr in slave'", ",", "[", "]", ")", ",", "(", "'Slave force termination'", ",", "[", "]", ")", ",", "(", "'Slave delete EBS'", ",", "[", "u'ec2'", "]", ")", ",", "(", "'Setup replication for EBS test'", ",", "[", "u'ec2'", "]", ")", ",", "(", "'Writing on Master, reading on Slave'", ",", "[", "]", ")", ",", "(", "'Slave -> Master promotion'", ",", "[", "]", ")", ",", "(", "'Restart farm'", ",", "[", "u'restart_farm'", "]", ")", "]", ")" ]
tags are parsed correctly .
train
false
627
def _check_binary_probabilistic_predictions(y_true, y_prob): check_consistent_length(y_true, y_prob) labels = np.unique(y_true) if (len(labels) > 2): raise ValueError(('Only binary classification is supported. Provided labels %s.' % labels)) if (y_prob.max() > 1): raise ValueError('y_prob contains values greater than 1.') if (y_prob.min() < 0): raise ValueError('y_prob contains values less than 0.') return label_binarize(y_true, labels)[:, 0]
[ "def", "_check_binary_probabilistic_predictions", "(", "y_true", ",", "y_prob", ")", ":", "check_consistent_length", "(", "y_true", ",", "y_prob", ")", "labels", "=", "np", ".", "unique", "(", "y_true", ")", "if", "(", "len", "(", "labels", ")", ">", "2", ")", ":", "raise", "ValueError", "(", "(", "'Only binary classification is supported. Provided labels %s.'", "%", "labels", ")", ")", "if", "(", "y_prob", ".", "max", "(", ")", ">", "1", ")", ":", "raise", "ValueError", "(", "'y_prob contains values greater than 1.'", ")", "if", "(", "y_prob", ".", "min", "(", ")", "<", "0", ")", ":", "raise", "ValueError", "(", "'y_prob contains values less than 0.'", ")", "return", "label_binarize", "(", "y_true", ",", "labels", ")", "[", ":", ",", "0", "]" ]
check that y_true is binary and y_prob contains valid probabilities .
train
false
628
def housing_type(): return s3_rest_controller()
[ "def", "housing_type", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
housing types: restful crud controller .
train
false
629
def ebSentMessage(err): err.printTraceback() reactor.stop()
[ "def", "ebSentMessage", "(", "err", ")", ":", "err", ".", "printTraceback", "(", ")", "reactor", ".", "stop", "(", ")" ]
called if the message cannot be sent .
train
false
631
@addon_view @non_atomic_requests def usage_breakdown_series(request, addon, group, start, end, format, field): date_range = check_series_params_or_404(group, start, end, format) check_stats_permission(request, addon) fields = {'applications': 'apps', 'locales': 'locales', 'oses': 'os', 'versions': 'versions', 'statuses': 'status'} series = get_series(UpdateCount, source=fields[field], addon=addon.id, date__range=date_range) if (field == 'locales'): series = process_locales(series) if (format == 'csv'): if (field == 'applications'): series = flatten_applications(series) (series, fields) = csv_fields(series) return render_csv(request, addon, series, (['date', 'count'] + list(fields))) elif (format == 'json'): return render_json(request, addon, series)
[ "@", "addon_view", "@", "non_atomic_requests", "def", "usage_breakdown_series", "(", "request", ",", "addon", ",", "group", ",", "start", ",", "end", ",", "format", ",", "field", ")", ":", "date_range", "=", "check_series_params_or_404", "(", "group", ",", "start", ",", "end", ",", "format", ")", "check_stats_permission", "(", "request", ",", "addon", ")", "fields", "=", "{", "'applications'", ":", "'apps'", ",", "'locales'", ":", "'locales'", ",", "'oses'", ":", "'os'", ",", "'versions'", ":", "'versions'", ",", "'statuses'", ":", "'status'", "}", "series", "=", "get_series", "(", "UpdateCount", ",", "source", "=", "fields", "[", "field", "]", ",", "addon", "=", "addon", ".", "id", ",", "date__range", "=", "date_range", ")", "if", "(", "field", "==", "'locales'", ")", ":", "series", "=", "process_locales", "(", "series", ")", "if", "(", "format", "==", "'csv'", ")", ":", "if", "(", "field", "==", "'applications'", ")", ":", "series", "=", "flatten_applications", "(", "series", ")", "(", "series", ",", "fields", ")", "=", "csv_fields", "(", "series", ")", "return", "render_csv", "(", "request", ",", "addon", ",", "series", ",", "(", "[", "'date'", ",", "'count'", "]", "+", "list", "(", "fields", ")", ")", ")", "elif", "(", "format", "==", "'json'", ")", ":", "return", "render_json", "(", "request", ",", "addon", ",", "series", ")" ]
generate adu breakdown of field .
train
false
632
@apply_to_text_file def php_template_injection(data): import re template = re.search('<\\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\\:(.*) checksum\\:(.*)__ -->', data) if template: source = template.group(1) with io.open(source, 'r', encoding='utf-8') as in_file: phpdata = in_file.read() _META_SEPARATOR = (((((('(' + (os.linesep * 2)) + '|') + ('\n' * 2)) + '|') + ('\r\n' * 2)) + ')') phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[(-1)] phpdata = re.sub(template.group(0), phpdata, data) return phpdata else: return data
[ "@", "apply_to_text_file", "def", "php_template_injection", "(", "data", ")", ":", "import", "re", "template", "=", "re", ".", "search", "(", "'<\\\\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\\\\:(.*) checksum\\\\:(.*)__ -->'", ",", "data", ")", "if", "template", ":", "source", "=", "template", ".", "group", "(", "1", ")", "with", "io", ".", "open", "(", "source", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "in_file", ":", "phpdata", "=", "in_file", ".", "read", "(", ")", "_META_SEPARATOR", "=", "(", "(", "(", "(", "(", "(", "'('", "+", "(", "os", ".", "linesep", "*", "2", ")", ")", "+", "'|'", ")", "+", "(", "'\\n'", "*", "2", ")", ")", "+", "'|'", ")", "+", "(", "'\\r\\n'", "*", "2", ")", ")", "+", "')'", ")", "phpdata", "=", "re", ".", "split", "(", "_META_SEPARATOR", ",", "phpdata", ",", "maxsplit", "=", "1", ")", "[", "(", "-", "1", ")", "]", "phpdata", "=", "re", ".", "sub", "(", "template", ".", "group", "(", "0", ")", ",", "phpdata", ",", "data", ")", "return", "phpdata", "else", ":", "return", "data" ]
insert php code into nikola templates .
train
false
633
def get_volume_drivers(): _ensure_loaded('cinder/volume/drivers') return [DriverInfo(x) for x in interface._volume_register]
[ "def", "get_volume_drivers", "(", ")", ":", "_ensure_loaded", "(", "'cinder/volume/drivers'", ")", "return", "[", "DriverInfo", "(", "x", ")", "for", "x", "in", "interface", ".", "_volume_register", "]" ]
get a list of all volume drivers .
train
false
634
def cleanup_value(v): v = str(v) if v.startswith('\\??\\'): v = v[4:] return v
[ "def", "cleanup_value", "(", "v", ")", ":", "v", "=", "str", "(", "v", ")", "if", "v", ".", "startswith", "(", "'\\\\??\\\\'", ")", ":", "v", "=", "v", "[", "4", ":", "]", "return", "v" ]
cleanup utility function .
train
false
637
def reissue(csr_file, certificate_id, web_server_type, approver_email=None, http_dc_validation=False, **kwargs): return __get_certificates('namecheap.ssl.reissue', 'SSLReissueResult', csr_file, certificate_id, web_server_type, approver_email, http_dc_validation, kwargs)
[ "def", "reissue", "(", "csr_file", ",", "certificate_id", ",", "web_server_type", ",", "approver_email", "=", "None", ",", "http_dc_validation", "=", "False", ",", "**", "kwargs", ")", ":", "return", "__get_certificates", "(", "'namecheap.ssl.reissue'", ",", "'SSLReissueResult'", ",", "csr_file", ",", "certificate_id", ",", "web_server_type", ",", "approver_email", ",", "http_dc_validation", ",", "kwargs", ")" ]
reissues an existing receipt .
train
true
638
def get_availability_zone(vm_): avz = config.get_cloud_config_value('availability_zone', vm_, __opts__, search_global=False) if (avz is None): return None zones = _list_availability_zones(vm_) if (avz not in zones): raise SaltCloudException("The specified availability zone isn't valid in this region: {0}\n".format(avz)) elif (zones[avz] != 'available'): raise SaltCloudException("The specified availability zone isn't currently available: {0}\n".format(avz)) return avz
[ "def", "get_availability_zone", "(", "vm_", ")", ":", "avz", "=", "config", ".", "get_cloud_config_value", "(", "'availability_zone'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ")", "if", "(", "avz", "is", "None", ")", ":", "return", "None", "zones", "=", "_list_availability_zones", "(", "vm_", ")", "if", "(", "avz", "not", "in", "zones", ")", ":", "raise", "SaltCloudException", "(", "\"The specified availability zone isn't valid in this region: {0}\\n\"", ".", "format", "(", "avz", ")", ")", "elif", "(", "zones", "[", "avz", "]", "!=", "'available'", ")", ":", "raise", "SaltCloudException", "(", "\"The specified availability zone isn't currently available: {0}\\n\"", ".", "format", "(", "avz", ")", ")", "return", "avz" ]
return the availability zone to use .
train
true
639
def metric_cleanup(): logging.debug('metric_cleanup') pass
[ "def", "metric_cleanup", "(", ")", ":", "logging", ".", "debug", "(", "'metric_cleanup'", ")", "pass" ]
clean up the metric module .
train
false
640
def _error_page(request, status): return render(request, ('%d.html' % status), status=status)
[ "def", "_error_page", "(", "request", ",", "status", ")", ":", "return", "render", "(", "request", ",", "(", "'%d.html'", "%", "status", ")", ",", "status", "=", "status", ")" ]
render error pages with jinja2 .
train
false
641
def get_a_ssh_config(box_name): output = subprocess.check_output(['vagrant', 'ssh-config', box_name]) config = SSHConfig() config.parse(StringIO(output)) host_config = config.lookup(box_name) for id in host_config['identityfile']: if os.path.isfile(id): host_config['identityfile'] = id return dict(((v, host_config[k]) for (k, v) in _ssh_to_ansible))
[ "def", "get_a_ssh_config", "(", "box_name", ")", ":", "output", "=", "subprocess", ".", "check_output", "(", "[", "'vagrant'", ",", "'ssh-config'", ",", "box_name", "]", ")", "config", "=", "SSHConfig", "(", ")", "config", ".", "parse", "(", "StringIO", "(", "output", ")", ")", "host_config", "=", "config", ".", "lookup", "(", "box_name", ")", "for", "id", "in", "host_config", "[", "'identityfile'", "]", ":", "if", "os", ".", "path", ".", "isfile", "(", "id", ")", ":", "host_config", "[", "'identityfile'", "]", "=", "id", "return", "dict", "(", "(", "(", "v", ",", "host_config", "[", "k", "]", ")", "for", "(", "k", ",", "v", ")", "in", "_ssh_to_ansible", ")", ")" ]
gives back a map of all the machines ssh configurations .
train
false
644
def workers(profile='default'): config = get_running(profile) lbn = config['worker.list'].split(',') worker_list = [] ret = {} for lb in lbn: try: worker_list.extend(config['worker.{0}.balance_workers'.format(lb)].split(',')) except KeyError: pass worker_list = list(set(worker_list)) for worker in worker_list: ret[worker] = {'activation': config['worker.{0}.activation'.format(worker)], 'state': config['worker.{0}.state'.format(worker)]} return ret
[ "def", "workers", "(", "profile", "=", "'default'", ")", ":", "config", "=", "get_running", "(", "profile", ")", "lbn", "=", "config", "[", "'worker.list'", "]", ".", "split", "(", "','", ")", "worker_list", "=", "[", "]", "ret", "=", "{", "}", "for", "lb", "in", "lbn", ":", "try", ":", "worker_list", ".", "extend", "(", "config", "[", "'worker.{0}.balance_workers'", ".", "format", "(", "lb", ")", "]", ".", "split", "(", "','", ")", ")", "except", "KeyError", ":", "pass", "worker_list", "=", "list", "(", "set", "(", "worker_list", ")", ")", "for", "worker", "in", "worker_list", ":", "ret", "[", "worker", "]", "=", "{", "'activation'", ":", "config", "[", "'worker.{0}.activation'", ".", "format", "(", "worker", ")", "]", ",", "'state'", ":", "config", "[", "'worker.{0}.state'", ".", "format", "(", "worker", ")", "]", "}", "return", "ret" ]
return a list of member workers and their status cli examples: .
train
true
645
def warn_or_error(removal_version, deprecated_entity_description, hint=None, stacklevel=3): removal_semver = validate_removal_semver(removal_version) msg = u'DEPRECATED: {} {} removed in version {}.'.format(deprecated_entity_description, get_deprecated_tense(removal_version), removal_version) if hint: msg += u'\n {}'.format(hint) if (removal_semver > PANTS_SEMVER): warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel) else: raise CodeRemovedError(msg)
[ "def", "warn_or_error", "(", "removal_version", ",", "deprecated_entity_description", ",", "hint", "=", "None", ",", "stacklevel", "=", "3", ")", ":", "removal_semver", "=", "validate_removal_semver", "(", "removal_version", ")", "msg", "=", "u'DEPRECATED: {} {} removed in version {}.'", ".", "format", "(", "deprecated_entity_description", ",", "get_deprecated_tense", "(", "removal_version", ")", ",", "removal_version", ")", "if", "hint", ":", "msg", "+=", "u'\\n {}'", ".", "format", "(", "hint", ")", "if", "(", "removal_semver", ">", "PANTS_SEMVER", ")", ":", "warnings", ".", "warn", "(", "msg", ",", "DeprecationWarning", ",", "stacklevel", "=", "stacklevel", ")", "else", ":", "raise", "CodeRemovedError", "(", "msg", ")" ]
check the removal_version against the current pants version .
train
false
646
def get_version_number(): config_parser = ConfigParser.RawConfigParser() config_file = os.path.join(os.path.dirname(__file__), os.pardir, 'res', 'roboto.cfg') config_parser.read(config_file) version_number = config_parser.get('main', 'version') assert re.match('[0-9]+\\.[0-9]{3}', version_number) return version_number
[ "def", "get_version_number", "(", ")", ":", "config_parser", "=", "ConfigParser", ".", "RawConfigParser", "(", ")", "config_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "os", ".", "pardir", ",", "'res'", ",", "'roboto.cfg'", ")", "config_parser", ".", "read", "(", "config_file", ")", "version_number", "=", "config_parser", ".", "get", "(", "'main'", ",", "'version'", ")", "assert", "re", ".", "match", "(", "'[0-9]+\\\\.[0-9]{3}'", ",", "version_number", ")", "return", "version_number" ]
returns the version number as a string .
train
false
647
def DeleteCommitInformation(rebalance): loc = data_store.DB.Location() if (not os.path.exists(loc)): return False if (not os.path.isdir(loc)): return False tempdir = _GetTransactionDirectory(loc, rebalance.id) tempfile = utils.JoinPath(tempdir, constants.TRANSACTION_FILENAME) try: os.unlink(tempfile) except OSError: pass return True
[ "def", "DeleteCommitInformation", "(", "rebalance", ")", ":", "loc", "=", "data_store", ".", "DB", ".", "Location", "(", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "loc", ")", ")", ":", "return", "False", "if", "(", "not", "os", ".", "path", ".", "isdir", "(", "loc", ")", ")", ":", "return", "False", "tempdir", "=", "_GetTransactionDirectory", "(", "loc", ",", "rebalance", ".", "id", ")", "tempfile", "=", "utils", ".", "JoinPath", "(", "tempdir", ",", "constants", ".", "TRANSACTION_FILENAME", ")", "try", ":", "os", ".", "unlink", "(", "tempfile", ")", "except", "OSError", ":", "pass", "return", "True" ]
remove file with rebalance information .
train
false
650
def get_account_info(env, app, swift_source=None): (version, account, _junk, _junk) = split_path(env['PATH_INFO'], 2, 4, True) info = _get_info_from_caches(app, env, account) if (not info): env.setdefault('swift.infocache', {}) req = _prepare_pre_auth_info_request(env, ('/%s/%s' % (version, account)), (swift_source or 'GET_ACCOUNT_INFO')) resp = req.get_response(app) info = _get_info_from_infocache(env, account) if (info is None): info = set_info_cache(app, env, account, None, resp) if info: info = info.copy() else: info = headers_to_account_info({}, 0) for field in ('container_count', 'bytes', 'total_object_count'): if (info.get(field) is None): info[field] = 0 else: info[field] = int(info[field]) return info
[ "def", "get_account_info", "(", "env", ",", "app", ",", "swift_source", "=", "None", ")", ":", "(", "version", ",", "account", ",", "_junk", ",", "_junk", ")", "=", "split_path", "(", "env", "[", "'PATH_INFO'", "]", ",", "2", ",", "4", ",", "True", ")", "info", "=", "_get_info_from_caches", "(", "app", ",", "env", ",", "account", ")", "if", "(", "not", "info", ")", ":", "env", ".", "setdefault", "(", "'swift.infocache'", ",", "{", "}", ")", "req", "=", "_prepare_pre_auth_info_request", "(", "env", ",", "(", "'/%s/%s'", "%", "(", "version", ",", "account", ")", ")", ",", "(", "swift_source", "or", "'GET_ACCOUNT_INFO'", ")", ")", "resp", "=", "req", ".", "get_response", "(", "app", ")", "info", "=", "_get_info_from_infocache", "(", "env", ",", "account", ")", "if", "(", "info", "is", "None", ")", ":", "info", "=", "set_info_cache", "(", "app", ",", "env", ",", "account", ",", "None", ",", "resp", ")", "if", "info", ":", "info", "=", "info", ".", "copy", "(", ")", "else", ":", "info", "=", "headers_to_account_info", "(", "{", "}", ",", "0", ")", "for", "field", "in", "(", "'container_count'", ",", "'bytes'", ",", "'total_object_count'", ")", ":", "if", "(", "info", ".", "get", "(", "field", ")", "is", "None", ")", ":", "info", "[", "field", "]", "=", "0", "else", ":", "info", "[", "field", "]", "=", "int", "(", "info", "[", "field", "]", ")", "return", "info" ]
get the info structure for an account .
train
false
652
def _render_report_form(start_str, end_str, start_letter, end_letter, report_type, total_count_error=False, date_fmt_error=False): context = {'total_count_error': total_count_error, 'date_fmt_error': date_fmt_error, 'start_date': start_str, 'end_date': end_str, 'start_letter': start_letter, 'end_letter': end_letter, 'requested_report': report_type} return render_to_response('shoppingcart/download_report.html', context)
[ "def", "_render_report_form", "(", "start_str", ",", "end_str", ",", "start_letter", ",", "end_letter", ",", "report_type", ",", "total_count_error", "=", "False", ",", "date_fmt_error", "=", "False", ")", ":", "context", "=", "{", "'total_count_error'", ":", "total_count_error", ",", "'date_fmt_error'", ":", "date_fmt_error", ",", "'start_date'", ":", "start_str", ",", "'end_date'", ":", "end_str", ",", "'start_letter'", ":", "start_letter", ",", "'end_letter'", ":", "end_letter", ",", "'requested_report'", ":", "report_type", "}", "return", "render_to_response", "(", "'shoppingcart/download_report.html'", ",", "context", ")" ]
helper function that renders the purchase form .
train
false
655
def _read_output(commandstring): import contextlib try: import tempfile fp = tempfile.NamedTemporaryFile() except ImportError: fp = open(('/tmp/_osx_support.%s' % (os.getpid(),)), 'w+b') with contextlib.closing(fp) as fp: cmd = ("%s 2>/dev/null >'%s'" % (commandstring, fp.name)) return (fp.read().strip() if (not os.system(cmd)) else None)
[ "def", "_read_output", "(", "commandstring", ")", ":", "import", "contextlib", "try", ":", "import", "tempfile", "fp", "=", "tempfile", ".", "NamedTemporaryFile", "(", ")", "except", "ImportError", ":", "fp", "=", "open", "(", "(", "'/tmp/_osx_support.%s'", "%", "(", "os", ".", "getpid", "(", ")", ",", ")", ")", ",", "'w+b'", ")", "with", "contextlib", ".", "closing", "(", "fp", ")", "as", "fp", ":", "cmd", "=", "(", "\"%s 2>/dev/null >'%s'\"", "%", "(", "commandstring", ",", "fp", ".", "name", ")", ")", "return", "(", "fp", ".", "read", "(", ")", ".", "strip", "(", ")", "if", "(", "not", "os", ".", "system", "(", "cmd", ")", ")", "else", "None", ")" ]
output from successful command execution or none .
train
false
656
def _check_fun(fun, d, *args, **kwargs): want_shape = d.shape d = fun(d, *args, **kwargs) if (not isinstance(d, np.ndarray)): raise TypeError('Return value must be an ndarray') if (d.shape != want_shape): raise ValueError(('Return data must have shape %s not %s' % (want_shape, d.shape))) return d
[ "def", "_check_fun", "(", "fun", ",", "d", ",", "*", "args", ",", "**", "kwargs", ")", ":", "want_shape", "=", "d", ".", "shape", "d", "=", "fun", "(", "d", ",", "*", "args", ",", "**", "kwargs", ")", "if", "(", "not", "isinstance", "(", "d", ",", "np", ".", "ndarray", ")", ")", ":", "raise", "TypeError", "(", "'Return value must be an ndarray'", ")", "if", "(", "d", ".", "shape", "!=", "want_shape", ")", ":", "raise", "ValueError", "(", "(", "'Return data must have shape %s not %s'", "%", "(", "want_shape", ",", "d", ".", "shape", ")", ")", ")", "return", "d" ]
check shapes .
train
false
657
def history_remove_completed(): logging.info('Scheduled removal of all completed jobs') history_db = HistoryDB() history_db.remove_completed() history_db.close() del history_db
[ "def", "history_remove_completed", "(", ")", ":", "logging", ".", "info", "(", "'Scheduled removal of all completed jobs'", ")", "history_db", "=", "HistoryDB", "(", ")", "history_db", ".", "remove_completed", "(", ")", "history_db", ".", "close", "(", ")", "del", "history_db" ]
remove all completed jobs from history .
train
false
658
def buf_to_color_str(buf): space = ' ' pix = '{{bg#{0:02x}{1:02x}{2:02x}}} ' pixels = [] for h in range(buf.shape[0]): last = None for w in range(buf.shape[1]): rgb = buf[(h, w)] if ((last is not None) and (last == rgb).all()): pixels.append(space) else: pixels.append(pix.format(*rgb)) last = rgb pixels.append('{NO_COLOR}\n') pixels[(-1)] = pixels[(-1)].rstrip() return ''.join(pixels)
[ "def", "buf_to_color_str", "(", "buf", ")", ":", "space", "=", "' '", "pix", "=", "'{{bg#{0:02x}{1:02x}{2:02x}}} '", "pixels", "=", "[", "]", "for", "h", "in", "range", "(", "buf", ".", "shape", "[", "0", "]", ")", ":", "last", "=", "None", "for", "w", "in", "range", "(", "buf", ".", "shape", "[", "1", "]", ")", ":", "rgb", "=", "buf", "[", "(", "h", ",", "w", ")", "]", "if", "(", "(", "last", "is", "not", "None", ")", "and", "(", "last", "==", "rgb", ")", ".", "all", "(", ")", ")", ":", "pixels", ".", "append", "(", "space", ")", "else", ":", "pixels", ".", "append", "(", "pix", ".", "format", "(", "*", "rgb", ")", ")", "last", "=", "rgb", "pixels", ".", "append", "(", "'{NO_COLOR}\\n'", ")", "pixels", "[", "(", "-", "1", ")", "]", "=", "pixels", "[", "(", "-", "1", ")", "]", ".", "rstrip", "(", ")", "return", "''", ".", "join", "(", "pixels", ")" ]
converts an rgb array to a xonsh color string .
train
false
659
def setup_hass_instance(emulated_hue_config): hass = get_test_home_assistant() run_coroutine_threadsafe(core_components.async_setup(hass, {core.DOMAIN: {}}), hass.loop).result() bootstrap.setup_component(hass, http.DOMAIN, {http.DOMAIN: {http.CONF_SERVER_PORT: HTTP_SERVER_PORT}}) bootstrap.setup_component(hass, emulated_hue.DOMAIN, emulated_hue_config) return hass
[ "def", "setup_hass_instance", "(", "emulated_hue_config", ")", ":", "hass", "=", "get_test_home_assistant", "(", ")", "run_coroutine_threadsafe", "(", "core_components", ".", "async_setup", "(", "hass", ",", "{", "core", ".", "DOMAIN", ":", "{", "}", "}", ")", ",", "hass", ".", "loop", ")", ".", "result", "(", ")", "bootstrap", ".", "setup_component", "(", "hass", ",", "http", ".", "DOMAIN", ",", "{", "http", ".", "DOMAIN", ":", "{", "http", ".", "CONF_SERVER_PORT", ":", "HTTP_SERVER_PORT", "}", "}", ")", "bootstrap", ".", "setup_component", "(", "hass", ",", "emulated_hue", ".", "DOMAIN", ",", "emulated_hue_config", ")", "return", "hass" ]
set up the home assistant instance to test .
train
false
660
def test_url_completion(qtmodeltester, config_stub, web_history, quickmarks, bookmarks): config_stub.data['completion'] = {'timestamp-format': '%Y-%m-%d', 'web-history-max-items': 2} model = urlmodel.UrlCompletionModel() qtmodeltester.data_display_may_return_none = True qtmodeltester.check(model) _check_completions(model, {'Quickmarks': [('https://wiki.archlinux.org', 'aw', ''), ('https://duckduckgo.com', 'ddg', ''), ('https://wikipedia.org', 'wiki', '')], 'Bookmarks': [('https://github.com', 'GitHub', ''), ('https://python.org', 'Welcome to Python.org', ''), ('http://qutebrowser.org', 'qutebrowser | qutebrowser', '')], 'History': [('https://python.org', 'Welcome to Python.org', '2016-03-08'), ('https://github.com', 'GitHub', '2016-05-01')]})
[ "def", "test_url_completion", "(", "qtmodeltester", ",", "config_stub", ",", "web_history", ",", "quickmarks", ",", "bookmarks", ")", ":", "config_stub", ".", "data", "[", "'completion'", "]", "=", "{", "'timestamp-format'", ":", "'%Y-%m-%d'", ",", "'web-history-max-items'", ":", "2", "}", "model", "=", "urlmodel", ".", "UrlCompletionModel", "(", ")", "qtmodeltester", ".", "data_display_may_return_none", "=", "True", "qtmodeltester", ".", "check", "(", "model", ")", "_check_completions", "(", "model", ",", "{", "'Quickmarks'", ":", "[", "(", "'https://wiki.archlinux.org'", ",", "'aw'", ",", "''", ")", ",", "(", "'https://duckduckgo.com'", ",", "'ddg'", ",", "''", ")", ",", "(", "'https://wikipedia.org'", ",", "'wiki'", ",", "''", ")", "]", ",", "'Bookmarks'", ":", "[", "(", "'https://github.com'", ",", "'GitHub'", ",", "''", ")", ",", "(", "'https://python.org'", ",", "'Welcome to Python.org'", ",", "''", ")", ",", "(", "'http://qutebrowser.org'", ",", "'qutebrowser | qutebrowser'", ",", "''", ")", "]", ",", "'History'", ":", "[", "(", "'https://python.org'", ",", "'Welcome to Python.org'", ",", "'2016-03-08'", ")", ",", "(", "'https://github.com'", ",", "'GitHub'", ",", "'2016-05-01'", ")", "]", "}", ")" ]
test the results of url completion .
train
false
661
def dummyModelParams(perm): errScore = 50 if (perm['modelParams']['sensorParams']['encoders']['address'] is not None): errScore -= 20 if (perm['modelParams']['sensorParams']['encoders']['gym'] is not None): errScore -= 10 if (perm['modelParams']['sensorParams']['encoders']['timestamp_dayOfWeek'] is not None): errScore += 30 if (perm['modelParams']['sensorParams']['encoders']['timestamp_timeOfDay'] is not None): errScore += 40 dummyModelParams = dict(metricValue=errScore, iterations=int(os.environ.get('NTA_TEST_numIterations', '1')), waitTime=None, sysExitModelRange=os.environ.get('NTA_TEST_sysExitModelRange', None), errModelRange=os.environ.get('NTA_TEST_errModelRange', None), jobFailErr=bool(os.environ.get('NTA_TEST_jobFailErr', False))) return dummyModelParams
[ "def", "dummyModelParams", "(", "perm", ")", ":", "errScore", "=", "50", "if", "(", "perm", "[", "'modelParams'", "]", "[", "'sensorParams'", "]", "[", "'encoders'", "]", "[", "'address'", "]", "is", "not", "None", ")", ":", "errScore", "-=", "20", "if", "(", "perm", "[", "'modelParams'", "]", "[", "'sensorParams'", "]", "[", "'encoders'", "]", "[", "'gym'", "]", "is", "not", "None", ")", ":", "errScore", "-=", "10", "if", "(", "perm", "[", "'modelParams'", "]", "[", "'sensorParams'", "]", "[", "'encoders'", "]", "[", "'timestamp_dayOfWeek'", "]", "is", "not", "None", ")", ":", "errScore", "+=", "30", "if", "(", "perm", "[", "'modelParams'", "]", "[", "'sensorParams'", "]", "[", "'encoders'", "]", "[", "'timestamp_timeOfDay'", "]", "is", "not", "None", ")", ":", "errScore", "+=", "40", "dummyModelParams", "=", "dict", "(", "metricValue", "=", "errScore", ",", "iterations", "=", "int", "(", "os", ".", "environ", ".", "get", "(", "'NTA_TEST_numIterations'", ",", "'1'", ")", ")", ",", "waitTime", "=", "None", ",", "sysExitModelRange", "=", "os", ".", "environ", ".", "get", "(", "'NTA_TEST_sysExitModelRange'", ",", "None", ")", ",", "errModelRange", "=", "os", ".", "environ", ".", "get", "(", "'NTA_TEST_errModelRange'", ",", "None", ")", ",", "jobFailErr", "=", "bool", "(", "os", ".", "environ", ".", "get", "(", "'NTA_TEST_jobFailErr'", ",", "False", ")", ")", ")", "return", "dummyModelParams" ]
this function can be used for hypersearch algorithm development .
train
false
662
def _gluster(cmd): return _gluster_ok(_gluster_xml(cmd))
[ "def", "_gluster", "(", "cmd", ")", ":", "return", "_gluster_ok", "(", "_gluster_xml", "(", "cmd", ")", ")" ]
perform a gluster command and return a boolean status .
train
false
663
def keyvaluesplit(s): if (u'=' not in s): raise TypeError(u'Option must look like option=json_value') if (s[0] == u'_'): raise ValueError(u"Option names must not start with `_'") idx = s.index(u'=') o = s[:idx] val = parse_value(s[(idx + 1):]) return (o, val)
[ "def", "keyvaluesplit", "(", "s", ")", ":", "if", "(", "u'='", "not", "in", "s", ")", ":", "raise", "TypeError", "(", "u'Option must look like option=json_value'", ")", "if", "(", "s", "[", "0", "]", "==", "u'_'", ")", ":", "raise", "ValueError", "(", "u\"Option names must not start with `_'\"", ")", "idx", "=", "s", ".", "index", "(", "u'='", ")", "o", "=", "s", "[", ":", "idx", "]", "val", "=", "parse_value", "(", "s", "[", "(", "idx", "+", "1", ")", ":", "]", ")", "return", "(", "o", ",", "val", ")" ]
split k1 .
train
false
664
def libvlc_audio_set_channel(p_mi, channel): f = (_Cfunctions.get('libvlc_audio_set_channel', None) or _Cfunction('libvlc_audio_set_channel', ((1,), (1,)), None, ctypes.c_int, MediaPlayer, ctypes.c_int)) return f(p_mi, channel)
[ "def", "libvlc_audio_set_channel", "(", "p_mi", ",", "channel", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_audio_set_channel'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_audio_set_channel'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ")", ",", "None", ",", "ctypes", ".", "c_int", ",", "MediaPlayer", ",", "ctypes", ".", "c_int", ")", ")", "return", "f", "(", "p_mi", ",", "channel", ")" ]
set current audio channel .
train
true
665
def arccoth(val): return numpy.arctanh((1.0 / val))
[ "def", "arccoth", "(", "val", ")", ":", "return", "numpy", ".", "arctanh", "(", "(", "1.0", "/", "val", ")", ")" ]
inverse hyperbolic cotangent .
train
false
666
def RunInstaller(): try: os.makedirs(os.path.dirname(config_lib.CONFIG['Installer.logfile'])) except OSError: pass handler = logging.FileHandler(config_lib.CONFIG['Installer.logfile'], mode='wb') handler.setLevel(logging.DEBUG) logging.getLogger().addHandler(handler) config_lib.CONFIG.Initialize(filename=flags.FLAGS.config, reset=True) config_lib.CONFIG.AddContext('Installer Context', 'Context applied when we run the client installer.') logging.warn('Starting installation procedure for GRR client.') try: Installer().Init() except Exception as e: logging.exception('Installation failed: %s', e) InstallerNotifyServer() sys.exit((-1)) sys.exit(0)
[ "def", "RunInstaller", "(", ")", ":", "try", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "config_lib", ".", "CONFIG", "[", "'Installer.logfile'", "]", ")", ")", "except", "OSError", ":", "pass", "handler", "=", "logging", ".", "FileHandler", "(", "config_lib", ".", "CONFIG", "[", "'Installer.logfile'", "]", ",", "mode", "=", "'wb'", ")", "handler", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "logging", ".", "getLogger", "(", ")", ".", "addHandler", "(", "handler", ")", "config_lib", ".", "CONFIG", ".", "Initialize", "(", "filename", "=", "flags", ".", "FLAGS", ".", "config", ",", "reset", "=", "True", ")", "config_lib", ".", "CONFIG", ".", "AddContext", "(", "'Installer Context'", ",", "'Context applied when we run the client installer.'", ")", "logging", ".", "warn", "(", "'Starting installation procedure for GRR client.'", ")", "try", ":", "Installer", "(", ")", ".", "Init", "(", ")", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "'Installation failed: %s'", ",", "e", ")", "InstallerNotifyServer", "(", ")", "sys", ".", "exit", "(", "(", "-", "1", ")", ")", "sys", ".", "exit", "(", "0", ")" ]
run all registered installers .
train
true
668
def whichall(command, path=None, verbose=0, exts=None): return list(whichgen(command, path, verbose, exts))
[ "def", "whichall", "(", "command", ",", "path", "=", "None", ",", "verbose", "=", "0", ",", "exts", "=", "None", ")", ":", "return", "list", "(", "whichgen", "(", "command", ",", "path", ",", "verbose", ",", "exts", ")", ")" ]
return a list of full paths to all matches of the given command on the path .
train
true
669
def upload_may_enroll_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name): start_time = time() start_date = datetime.now(UTC) num_reports = 1 task_progress = TaskProgress(action_name, num_reports, start_time) current_step = {'step': 'Calculating info about students who may enroll'} task_progress.update_task_state(extra_meta=current_step) query_features = task_input.get('features') student_data = list_may_enroll(course_id, query_features) (header, rows) = format_dictlist(student_data, query_features) task_progress.attempted = task_progress.succeeded = len(rows) task_progress.skipped = (task_progress.total - task_progress.attempted) rows.insert(0, header) current_step = {'step': 'Uploading CSV'} task_progress.update_task_state(extra_meta=current_step) upload_csv_to_report_store(rows, 'may_enroll_info', course_id, start_date) return task_progress.update_task_state(extra_meta=current_step)
[ "def", "upload_may_enroll_csv", "(", "_xmodule_instance_args", ",", "_entry_id", ",", "course_id", ",", "task_input", ",", "action_name", ")", ":", "start_time", "=", "time", "(", ")", "start_date", "=", "datetime", ".", "now", "(", "UTC", ")", "num_reports", "=", "1", "task_progress", "=", "TaskProgress", "(", "action_name", ",", "num_reports", ",", "start_time", ")", "current_step", "=", "{", "'step'", ":", "'Calculating info about students who may enroll'", "}", "task_progress", ".", "update_task_state", "(", "extra_meta", "=", "current_step", ")", "query_features", "=", "task_input", ".", "get", "(", "'features'", ")", "student_data", "=", "list_may_enroll", "(", "course_id", ",", "query_features", ")", "(", "header", ",", "rows", ")", "=", "format_dictlist", "(", "student_data", ",", "query_features", ")", "task_progress", ".", "attempted", "=", "task_progress", ".", "succeeded", "=", "len", "(", "rows", ")", "task_progress", ".", "skipped", "=", "(", "task_progress", ".", "total", "-", "task_progress", ".", "attempted", ")", "rows", ".", "insert", "(", "0", ",", "header", ")", "current_step", "=", "{", "'step'", ":", "'Uploading CSV'", "}", "task_progress", ".", "update_task_state", "(", "extra_meta", "=", "current_step", ")", "upload_csv_to_report_store", "(", "rows", ",", "'may_enroll_info'", ",", "course_id", ",", "start_date", ")", "return", "task_progress", ".", "update_task_state", "(", "extra_meta", "=", "current_step", ")" ]
for a given course_id .
train
false
670
@login_required def display_person_edit_name_do(request): user = request.user new_first = request.POST['first_name'] new_last = request.POST['last_name'] user.first_name = new_first user.last_name = new_last user.save() return HttpResponseRedirect(('/people/%s' % urllib.quote(user.username)))
[ "@", "login_required", "def", "display_person_edit_name_do", "(", "request", ")", ":", "user", "=", "request", ".", "user", "new_first", "=", "request", ".", "POST", "[", "'first_name'", "]", "new_last", "=", "request", ".", "POST", "[", "'last_name'", "]", "user", ".", "first_name", "=", "new_first", "user", ".", "last_name", "=", "new_last", "user", ".", "save", "(", ")", "return", "HttpResponseRedirect", "(", "(", "'/people/%s'", "%", "urllib", ".", "quote", "(", "user", ".", "username", ")", ")", ")" ]
take the new first name and last name out of the post .
train
false
671
def _fsck_ext(device): msgs = {0: 'No errors', 1: 'Filesystem errors corrected', 2: 'System should be rebooted', 4: 'Filesystem errors left uncorrected', 8: 'Operational error', 16: 'Usage or syntax error', 32: 'Fsck canceled by user request', 128: 'Shared-library error'} return msgs.get(__salt__['cmd.run_all']('fsck -f -n {0}'.format(device))['retcode'], 'Unknown error')
[ "def", "_fsck_ext", "(", "device", ")", ":", "msgs", "=", "{", "0", ":", "'No errors'", ",", "1", ":", "'Filesystem errors corrected'", ",", "2", ":", "'System should be rebooted'", ",", "4", ":", "'Filesystem errors left uncorrected'", ",", "8", ":", "'Operational error'", ",", "16", ":", "'Usage or syntax error'", ",", "32", ":", "'Fsck canceled by user request'", ",", "128", ":", "'Shared-library error'", "}", "return", "msgs", ".", "get", "(", "__salt__", "[", "'cmd.run_all'", "]", "(", "'fsck -f -n {0}'", ".", "format", "(", "device", ")", ")", "[", "'retcode'", "]", ",", "'Unknown error'", ")" ]
check an ext2/ext3/ext4 file system .
train
false
672
def is_loaded(): global _lib return (_lib is not None)
[ "def", "is_loaded", "(", ")", ":", "global", "_lib", "return", "(", "_lib", "is", "not", "None", ")" ]
check to see if the specified kernel module is loaded cli example: .
train
false
673
def _get_soup(url, backend='selenium'): if (backend == 'requests'): req = requests.get(url, headers={'User-Agent': UA}) html_doc = req.text soup = BeautifulSoup(html_doc) if (soup.find('div', attrs={'id': 'gs_ab_md'}) is None): print 'Falling back on to selenium backend due to captcha.' backend = 'selenium' if (backend == 'selenium'): from selenium import webdriver import selenium.webdriver.support.ui as ui driver = webdriver.Firefox() wait = ui.WebDriverWait(driver, 200) driver.get(url) wait.until((lambda driver: driver.find_elements_by_id('gs_ab_md'))) html_doc = driver.page_source soup = BeautifulSoup(html_doc) driver.close() return soup
[ "def", "_get_soup", "(", "url", ",", "backend", "=", "'selenium'", ")", ":", "if", "(", "backend", "==", "'requests'", ")", ":", "req", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "{", "'User-Agent'", ":", "UA", "}", ")", "html_doc", "=", "req", ".", "text", "soup", "=", "BeautifulSoup", "(", "html_doc", ")", "if", "(", "soup", ".", "find", "(", "'div'", ",", "attrs", "=", "{", "'id'", ":", "'gs_ab_md'", "}", ")", "is", "None", ")", ":", "print", "'Falling back on to selenium backend due to captcha.'", "backend", "=", "'selenium'", "if", "(", "backend", "==", "'selenium'", ")", ":", "from", "selenium", "import", "webdriver", "import", "selenium", ".", "webdriver", ".", "support", ".", "ui", "as", "ui", "driver", "=", "webdriver", ".", "Firefox", "(", ")", "wait", "=", "ui", ".", "WebDriverWait", "(", "driver", ",", "200", ")", "driver", ".", "get", "(", "url", ")", "wait", ".", "until", "(", "(", "lambda", "driver", ":", "driver", ".", "find_elements_by_id", "(", "'gs_ab_md'", ")", ")", ")", "html_doc", "=", "driver", ".", "page_source", "soup", "=", "BeautifulSoup", "(", "html_doc", ")", "driver", ".", "close", "(", ")", "return", "soup" ]
get beautifulsoup object from url .
train
false
675
def test_request_prompts(): def run(txt, prompts): with settings(prompts=prompts): ol = OutputLooper(str, 'upper', None, list(txt), None) return ol._get_prompt_response() prompts = {'prompt2': 'response2', 'prompt1': 'response1', 'prompt': 'response'} eq_(run('this is a prompt for prompt1', prompts), ('prompt1', 'response1')) eq_(run('this is a prompt for prompt2', prompts), ('prompt2', 'response2')) eq_(run('this is a prompt for promptx:', prompts), (None, None)) eq_(run('prompt for promp', prompts), (None, None))
[ "def", "test_request_prompts", "(", ")", ":", "def", "run", "(", "txt", ",", "prompts", ")", ":", "with", "settings", "(", "prompts", "=", "prompts", ")", ":", "ol", "=", "OutputLooper", "(", "str", ",", "'upper'", ",", "None", ",", "list", "(", "txt", ")", ",", "None", ")", "return", "ol", ".", "_get_prompt_response", "(", ")", "prompts", "=", "{", "'prompt2'", ":", "'response2'", ",", "'prompt1'", ":", "'response1'", ",", "'prompt'", ":", "'response'", "}", "eq_", "(", "run", "(", "'this is a prompt for prompt1'", ",", "prompts", ")", ",", "(", "'prompt1'", ",", "'response1'", ")", ")", "eq_", "(", "run", "(", "'this is a prompt for prompt2'", ",", "prompts", ")", ",", "(", "'prompt2'", ",", "'response2'", ")", ")", "eq_", "(", "run", "(", "'this is a prompt for promptx:'", ",", "prompts", ")", ",", "(", "None", ",", "None", ")", ")", "eq_", "(", "run", "(", "'prompt for promp'", ",", "prompts", ")", ",", "(", "None", ",", "None", ")", ")" ]
test valid responses from prompts .
train
false
676
def ensure_sep(sep, s, n=2): return (s + (sep * (n - s.count(sep))))
[ "def", "ensure_sep", "(", "sep", ",", "s", ",", "n", "=", "2", ")", ":", "return", "(", "s", "+", "(", "sep", "*", "(", "n", "-", "s", ".", "count", "(", "sep", ")", ")", ")", ")" ]
ensure text s ends in separator sep .
train
false