id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
sequencelengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
1,468
def BeginThreadsSimpleMarshal(numThreads, cookie): ret = [] for i in range(numThreads): hEvent = win32event.CreateEvent(None, 0, 0, None) thread.start_new(TestInterpInThread, (hEvent, cookie)) ret.append(hEvent) return ret
[ "def", "BeginThreadsSimpleMarshal", "(", "numThreads", ",", "cookie", ")", ":", "ret", "=", "[", "]", "for", "i", "in", "range", "(", "numThreads", ")", ":", "hEvent", "=", "win32event", ".", "CreateEvent", "(", "None", ",", "0", ",", "0", ",", "None", ")", "thread", ".", "start_new", "(", "TestInterpInThread", ",", "(", "hEvent", ",", "cookie", ")", ")", "ret", ".", "append", "(", "hEvent", ")", "return", "ret" ]
creates multiple threads using simple marshalling .
train
false
1,470
@set_database def get_download_youtube_ids(paths=None, downloaded=False, **kwargs): if paths: youtube_ids = dict() for path in paths: selector = (((Item.kind != 'Topic') & Item.path.contains(path)) & Item.youtube_id.is_null(False)) if downloaded: selector &= (Item.files_complete > 0) else: selector &= (Item.files_complete == 0) youtube_ids.update(dict([item for item in Item.select(Item.youtube_id, Item.title).where(selector).tuples() if item[0]])) return youtube_ids
[ "@", "set_database", "def", "get_download_youtube_ids", "(", "paths", "=", "None", ",", "downloaded", "=", "False", ",", "**", "kwargs", ")", ":", "if", "paths", ":", "youtube_ids", "=", "dict", "(", ")", "for", "path", "in", "paths", ":", "selector", "=", "(", "(", "(", "Item", ".", "kind", "!=", "'Topic'", ")", "&", "Item", ".", "path", ".", "contains", "(", "path", ")", ")", "&", "Item", ".", "youtube_id", ".", "is_null", "(", "False", ")", ")", "if", "downloaded", ":", "selector", "&=", "(", "Item", ".", "files_complete", ">", "0", ")", "else", ":", "selector", "&=", "(", "Item", ".", "files_complete", "==", "0", ")", "youtube_ids", ".", "update", "(", "dict", "(", "[", "item", "for", "item", "in", "Item", ".", "select", "(", "Item", ".", "youtube_id", ",", "Item", ".", "title", ")", ".", "where", "(", "selector", ")", ".", "tuples", "(", ")", "if", "item", "[", "0", "]", "]", ")", ")", "return", "youtube_ids" ]
convenience function for taking a list of content ids and returning all associated youtube_ids for downloads .
train
false
1,471
def precision_recall_curve(y_true, probas_pred, pos_label=None, sample_weight=None): (fps, tps, thresholds) = _binary_clf_curve(y_true, probas_pred, pos_label=pos_label, sample_weight=sample_weight) precision = (tps / (tps + fps)) recall = (tps / tps[(-1)]) last_ind = tps.searchsorted(tps[(-1)]) sl = slice(last_ind, None, (-1)) return (np.r_[(precision[sl], 1)], np.r_[(recall[sl], 0)], thresholds[sl])
[ "def", "precision_recall_curve", "(", "y_true", ",", "probas_pred", ",", "pos_label", "=", "None", ",", "sample_weight", "=", "None", ")", ":", "(", "fps", ",", "tps", ",", "thresholds", ")", "=", "_binary_clf_curve", "(", "y_true", ",", "probas_pred", ",", "pos_label", "=", "pos_label", ",", "sample_weight", "=", "sample_weight", ")", "precision", "=", "(", "tps", "/", "(", "tps", "+", "fps", ")", ")", "recall", "=", "(", "tps", "/", "tps", "[", "(", "-", "1", ")", "]", ")", "last_ind", "=", "tps", ".", "searchsorted", "(", "tps", "[", "(", "-", "1", ")", "]", ")", "sl", "=", "slice", "(", "last_ind", ",", "None", ",", "(", "-", "1", ")", ")", "return", "(", "np", ".", "r_", "[", "(", "precision", "[", "sl", "]", ",", "1", ")", "]", ",", "np", ".", "r_", "[", "(", "recall", "[", "sl", "]", ",", "0", ")", "]", ",", "thresholds", "[", "sl", "]", ")" ]
compute precision-recall pairs for different probability thresholds note: this implementation is restricted to the binary classification task .
train
false
1,472
def find_value(stdout, key): match = _HORCM_PATTERNS[key]['pattern'].search(stdout) if match: if (_HORCM_PATTERNS[key]['type'] is list): return [value.strip() for value in LDEV_SEP_PATTERN.split(match.group(key))] return _HORCM_PATTERNS[key]['type'](match.group(key)) return None
[ "def", "find_value", "(", "stdout", ",", "key", ")", ":", "match", "=", "_HORCM_PATTERNS", "[", "key", "]", "[", "'pattern'", "]", ".", "search", "(", "stdout", ")", "if", "match", ":", "if", "(", "_HORCM_PATTERNS", "[", "key", "]", "[", "'type'", "]", "is", "list", ")", ":", "return", "[", "value", ".", "strip", "(", ")", "for", "value", "in", "LDEV_SEP_PATTERN", ".", "split", "(", "match", ".", "group", "(", "key", ")", ")", "]", "return", "_HORCM_PATTERNS", "[", "key", "]", "[", "'type'", "]", "(", "match", ".", "group", "(", "key", ")", ")", "return", "None" ]
return the first match from the given raidcom command output .
train
false
1,474
@treeio_login_required @handle_response_format def currency_delete(request, currency_id, response_format='html'): currency = get_object_or_404(Currency, pk=currency_id) if (not request.user.profile.has_permission(currency, mode='w')): return user_denied(request, "You don't have access to this Currency", response_format) if currency.is_default: return user_denied(request, 'You cannot delete the Base Currency', response_format) if request.POST: if ('delete' in request.POST): if ('trash' in request.POST): currency.trash = True currency.save() else: currency.delete() return HttpResponseRedirect(reverse('finance_settings_view')) elif ('cancel' in request.POST): return HttpResponseRedirect(reverse('finance_currency_view', args=[currency.id])) return render_to_response('finance/currency_delete', {'currency': currency}, context_instance=RequestContext(request), response_format=response_format)
[ "@", "treeio_login_required", "@", "handle_response_format", "def", "currency_delete", "(", "request", ",", "currency_id", ",", "response_format", "=", "'html'", ")", ":", "currency", "=", "get_object_or_404", "(", "Currency", ",", "pk", "=", "currency_id", ")", "if", "(", "not", "request", ".", "user", ".", "profile", ".", "has_permission", "(", "currency", ",", "mode", "=", "'w'", ")", ")", ":", "return", "user_denied", "(", "request", ",", "\"You don't have access to this Currency\"", ",", "response_format", ")", "if", "currency", ".", "is_default", ":", "return", "user_denied", "(", "request", ",", "'You cannot delete the Base Currency'", ",", "response_format", ")", "if", "request", ".", "POST", ":", "if", "(", "'delete'", "in", "request", ".", "POST", ")", ":", "if", "(", "'trash'", "in", "request", ".", "POST", ")", ":", "currency", ".", "trash", "=", "True", "currency", ".", "save", "(", ")", "else", ":", "currency", ".", "delete", "(", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'finance_settings_view'", ")", ")", "elif", "(", "'cancel'", "in", "request", ".", "POST", ")", ":", "return", "HttpResponseRedirect", "(", "reverse", "(", "'finance_currency_view'", ",", "args", "=", "[", "currency", ".", "id", "]", ")", ")", "return", "render_to_response", "(", "'finance/currency_delete'", ",", "{", "'currency'", ":", "currency", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", "response_format", "=", "response_format", ")" ]
currency delete .
train
false
1,475
def MakeUniformPmf(low, high, n): pmf = Pmf() for x in np.linspace(low, high, n): pmf.Set(x, 1) pmf.Normalize() return pmf
[ "def", "MakeUniformPmf", "(", "low", ",", "high", ",", "n", ")", ":", "pmf", "=", "Pmf", "(", ")", "for", "x", "in", "np", ".", "linspace", "(", "low", ",", "high", ",", "n", ")", ":", "pmf", ".", "Set", "(", "x", ",", "1", ")", "pmf", ".", "Normalize", "(", ")", "return", "pmf" ]
make a uniform pmf .
train
true
1,476
def change_process_owner(uid, gid): try: os.setgid(gid) os.setuid(uid) except Exception as exc: error = DaemonOSEnvironmentError(('Unable to change file creation mask (%(exc)s)' % vars())) raise error
[ "def", "change_process_owner", "(", "uid", ",", "gid", ")", ":", "try", ":", "os", ".", "setgid", "(", "gid", ")", "os", ".", "setuid", "(", "uid", ")", "except", "Exception", "as", "exc", ":", "error", "=", "DaemonOSEnvironmentError", "(", "(", "'Unable to change file creation mask (%(exc)s)'", "%", "vars", "(", ")", ")", ")", "raise", "error" ]
change the owning uid and gid of this process .
train
false
1,479
def list_bucket_files(bucket_name, prefix, max_keys=1000): scope = 'https://www.googleapis.com/auth/devstorage.read_only' url = ('https://%s.commondatastorage.googleapis.com/?' % bucket_name) query = [('max-keys', max_keys)] if prefix: query.append(('prefix', prefix)) url += urllib.urlencode(query) (auth_token, _) = app_identity.get_access_token(scope) result = urlfetch.fetch(url, method=urlfetch.GET, headers={'Authorization': ('OAuth %s' % auth_token), 'x-goog-api-version': '2'}) if (result and (result.status_code == 200)): doc = xml.dom.minidom.parseString(result.content) return [node.childNodes[0].data for node in doc.getElementsByTagName('Key')] raise BackupValidationException('Request to Google Cloud Storage failed')
[ "def", "list_bucket_files", "(", "bucket_name", ",", "prefix", ",", "max_keys", "=", "1000", ")", ":", "scope", "=", "'https://www.googleapis.com/auth/devstorage.read_only'", "url", "=", "(", "'https://%s.commondatastorage.googleapis.com/?'", "%", "bucket_name", ")", "query", "=", "[", "(", "'max-keys'", ",", "max_keys", ")", "]", "if", "prefix", ":", "query", ".", "append", "(", "(", "'prefix'", ",", "prefix", ")", ")", "url", "+=", "urllib", ".", "urlencode", "(", "query", ")", "(", "auth_token", ",", "_", ")", "=", "app_identity", ".", "get_access_token", "(", "scope", ")", "result", "=", "urlfetch", ".", "fetch", "(", "url", ",", "method", "=", "urlfetch", ".", "GET", ",", "headers", "=", "{", "'Authorization'", ":", "(", "'OAuth %s'", "%", "auth_token", ")", ",", "'x-goog-api-version'", ":", "'2'", "}", ")", "if", "(", "result", "and", "(", "result", ".", "status_code", "==", "200", ")", ")", ":", "doc", "=", "xml", ".", "dom", ".", "minidom", ".", "parseString", "(", "result", ".", "content", ")", "return", "[", "node", ".", "childNodes", "[", "0", "]", ".", "data", "for", "node", "in", "doc", ".", "getElementsByTagName", "(", "'Key'", ")", "]", "raise", "BackupValidationException", "(", "'Request to Google Cloud Storage failed'", ")" ]
returns a listing of of a bucket that matches the given prefix .
train
false
1,480
def runmodule(name='__main__', **kw): main(defaultTest=name, **kw)
[ "def", "runmodule", "(", "name", "=", "'__main__'", ",", "**", "kw", ")", ":", "main", "(", "defaultTest", "=", "name", ",", "**", "kw", ")" ]
collect and run tests in a single module only .
train
false
1,481
def plot_histograms(ax, prng, nb_samples=10000): params = ((10, 10), (4, 12), (50, 12), (6, 55)) for (a, b) in params: values = prng.beta(a, b, size=nb_samples) ax.hist(values, histtype='stepfilled', bins=30, alpha=0.8, normed=True) ax.annotate('Annotation', xy=(0.25, 4.25), xycoords='data', xytext=(0.9, 0.9), textcoords='axes fraction', va='top', ha='right', bbox=dict(boxstyle='round', alpha=0.2), arrowprops=dict(arrowstyle='->', connectionstyle='angle,angleA=-95,angleB=35,rad=10')) return ax
[ "def", "plot_histograms", "(", "ax", ",", "prng", ",", "nb_samples", "=", "10000", ")", ":", "params", "=", "(", "(", "10", ",", "10", ")", ",", "(", "4", ",", "12", ")", ",", "(", "50", ",", "12", ")", ",", "(", "6", ",", "55", ")", ")", "for", "(", "a", ",", "b", ")", "in", "params", ":", "values", "=", "prng", ".", "beta", "(", "a", ",", "b", ",", "size", "=", "nb_samples", ")", "ax", ".", "hist", "(", "values", ",", "histtype", "=", "'stepfilled'", ",", "bins", "=", "30", ",", "alpha", "=", "0.8", ",", "normed", "=", "True", ")", "ax", ".", "annotate", "(", "'Annotation'", ",", "xy", "=", "(", "0.25", ",", "4.25", ")", ",", "xycoords", "=", "'data'", ",", "xytext", "=", "(", "0.9", ",", "0.9", ")", ",", "textcoords", "=", "'axes fraction'", ",", "va", "=", "'top'", ",", "ha", "=", "'right'", ",", "bbox", "=", "dict", "(", "boxstyle", "=", "'round'", ",", "alpha", "=", "0.2", ")", ",", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "'->'", ",", "connectionstyle", "=", "'angle,angleA=-95,angleB=35,rad=10'", ")", ")", "return", "ax" ]
plot 4 histograms and a text annotation .
train
false
1,482
def is_centos_or_rhel(distribution): return distribution.startswith(('centos-', 'rhel-'))
[ "def", "is_centos_or_rhel", "(", "distribution", ")", ":", "return", "distribution", ".", "startswith", "(", "(", "'centos-'", ",", "'rhel-'", ")", ")" ]
determine whether the named distribution is a version of centos or rhel .
train
false
1,483
def nested_view(request): c = Client() c.get('/no_template_view/') return render(request, 'base.html', {'nested': 'yes'})
[ "def", "nested_view", "(", "request", ")", ":", "c", "=", "Client", "(", ")", "c", ".", "get", "(", "'/no_template_view/'", ")", "return", "render", "(", "request", ",", "'base.html'", ",", "{", "'nested'", ":", "'yes'", "}", ")" ]
a view that uses test client to call another view .
train
false
1,485
def id_(reset=False): if reset: id_pattern = re.compile('Monit id (?P<id>[^ ]+)') cmd = 'echo y|monit -r' out = __salt__['cmd.run_all'](cmd, python_shell=True) ret = id_pattern.search(out['stdout']).group('id') return (ret if ret else False) else: cmd = 'monit -i' out = __salt__['cmd.run'](cmd) ret = out.split(':')[(-1)].strip() return ret
[ "def", "id_", "(", "reset", "=", "False", ")", ":", "if", "reset", ":", "id_pattern", "=", "re", ".", "compile", "(", "'Monit id (?P<id>[^ ]+)'", ")", "cmd", "=", "'echo y|monit -r'", "out", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "python_shell", "=", "True", ")", "ret", "=", "id_pattern", ".", "search", "(", "out", "[", "'stdout'", "]", ")", ".", "group", "(", "'id'", ")", "return", "(", "ret", "if", "ret", "else", "False", ")", "else", ":", "cmd", "=", "'monit -i'", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", "ret", "=", "out", ".", "split", "(", "':'", ")", "[", "(", "-", "1", ")", "]", ".", "strip", "(", ")", "return", "ret" ]
return the id .
train
true
1,486
@pytest.fixture def small_push_dir(tmpdir): contents = ('abcdefghijlmnopqrstuvwxyz\n' * 10000) push_dir = tmpdir.join('push-from').ensure(dir=True) push_dir.join('arbitrary-file').write(contents) push_dir.join('pg_xlog').mksymlinkto('/tmp/wal-e-test-must-not-exist') push_dir.join('holy-smokes').ensure() return push_dir
[ "@", "pytest", ".", "fixture", "def", "small_push_dir", "(", "tmpdir", ")", ":", "contents", "=", "(", "'abcdefghijlmnopqrstuvwxyz\\n'", "*", "10000", ")", "push_dir", "=", "tmpdir", ".", "join", "(", "'push-from'", ")", ".", "ensure", "(", "dir", "=", "True", ")", "push_dir", ".", "join", "(", "'arbitrary-file'", ")", ".", "write", "(", "contents", ")", "push_dir", ".", "join", "(", "'pg_xlog'", ")", ".", "mksymlinkto", "(", "'/tmp/wal-e-test-must-not-exist'", ")", "push_dir", ".", "join", "(", "'holy-smokes'", ")", ".", "ensure", "(", ")", "return", "push_dir" ]
create a small pg data directory-alike .
train
false
1,488
def test_found(): with pytest.raises(falcon.http_status.HTTPStatus) as redirect: hug.redirect.found('/') assert ('302' in redirect.value.status)
[ "def", "test_found", "(", ")", ":", "with", "pytest", ".", "raises", "(", "falcon", ".", "http_status", ".", "HTTPStatus", ")", "as", "redirect", ":", "hug", ".", "redirect", ".", "found", "(", "'/'", ")", "assert", "(", "'302'", "in", "redirect", ".", "value", ".", "status", ")" ]
test to ensure function causes a redirect with http 302 status code .
train
false
1,490
def rgb_to_ints(rgb): if (len(rgb) == 6): return tuple([int(h, 16) for h in RE_RGB6.split(rgb)[1:4]]) else: return tuple([int((h * 2), 16) for h in RE_RGB3.split(rgb)[1:4]])
[ "def", "rgb_to_ints", "(", "rgb", ")", ":", "if", "(", "len", "(", "rgb", ")", "==", "6", ")", ":", "return", "tuple", "(", "[", "int", "(", "h", ",", "16", ")", "for", "h", "in", "RE_RGB6", ".", "split", "(", "rgb", ")", "[", "1", ":", "4", "]", "]", ")", "else", ":", "return", "tuple", "(", "[", "int", "(", "(", "h", "*", "2", ")", ",", "16", ")", "for", "h", "in", "RE_RGB3", ".", "split", "(", "rgb", ")", "[", "1", ":", "4", "]", "]", ")" ]
converts an rgb string into a tuple of ints .
train
false
1,491
def _process_quantiles(x, dim): x = np.asarray(x, dtype=float) if (x.ndim == 0): x = x[np.newaxis] elif (x.ndim == 1): if (dim == 1): x = x[:, np.newaxis] else: x = x[np.newaxis, :] return x
[ "def", "_process_quantiles", "(", "x", ",", "dim", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ",", "dtype", "=", "float", ")", "if", "(", "x", ".", "ndim", "==", "0", ")", ":", "x", "=", "x", "[", "np", ".", "newaxis", "]", "elif", "(", "x", ".", "ndim", "==", "1", ")", ":", "if", "(", "dim", "==", "1", ")", ":", "x", "=", "x", "[", ":", ",", "np", ".", "newaxis", "]", "else", ":", "x", "=", "x", "[", "np", ".", "newaxis", ",", ":", "]", "return", "x" ]
adjust quantiles array so that last axis labels the components of each data point .
train
false
1,492
def deserialize(collection, topological=True): datastruct = deserialize_raw(collection.collection_type()) if (topological and (type(datastruct) == list)): datastruct.sort(__depth_cmp) if (type(datastruct) == dict): collection.from_dict(datastruct) elif (type(datastruct) == list): collection.from_list(datastruct)
[ "def", "deserialize", "(", "collection", ",", "topological", "=", "True", ")", ":", "datastruct", "=", "deserialize_raw", "(", "collection", ".", "collection_type", "(", ")", ")", "if", "(", "topological", "and", "(", "type", "(", "datastruct", ")", "==", "list", ")", ")", ":", "datastruct", ".", "sort", "(", "__depth_cmp", ")", "if", "(", "type", "(", "datastruct", ")", "==", "dict", ")", ":", "collection", ".", "from_dict", "(", "datastruct", ")", "elif", "(", "type", "(", "datastruct", ")", "==", "list", ")", ":", "collection", ".", "from_list", "(", "datastruct", ")" ]
deserialize any string or stream like object into a python data structure .
train
false
1,494
def test_ada_sk_estimator(): check_estimator(ADASYN)
[ "def", "test_ada_sk_estimator", "(", ")", ":", "check_estimator", "(", "ADASYN", ")" ]
test the sklearn estimator compatibility .
train
false
1,495
def gcg(seq): try: seq = str(seq) except AttributeError: pass index = checksum = 0 for char in seq: index += 1 checksum += (index * ord(char.upper())) if (index == 57): index = 0 return (checksum % 10000)
[ "def", "gcg", "(", "seq", ")", ":", "try", ":", "seq", "=", "str", "(", "seq", ")", "except", "AttributeError", ":", "pass", "index", "=", "checksum", "=", "0", "for", "char", "in", "seq", ":", "index", "+=", "1", "checksum", "+=", "(", "index", "*", "ord", "(", "char", ".", "upper", "(", ")", ")", ")", "if", "(", "index", "==", "57", ")", ":", "index", "=", "0", "return", "(", "checksum", "%", "10000", ")" ]
returns the gcg checksum for a sequence .
train
false
1,496
def ramp(x=None, v_min=0, v_max=1, name=None): return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name)
[ "def", "ramp", "(", "x", "=", "None", ",", "v_min", "=", "0", ",", "v_max", "=", "1", ",", "name", "=", "None", ")", ":", "return", "tf", ".", "clip_by_value", "(", "x", ",", "clip_value_min", "=", "v_min", ",", "clip_value_max", "=", "v_max", ",", "name", "=", "name", ")" ]
the ramp activation function .
train
true
1,497
def extract_line(text, index): p = (text.rfind('\n', 0, index) + 1) q = text.find('\n', index) if (q < 0): q = len(text) return text[p:q]
[ "def", "extract_line", "(", "text", ",", "index", ")", ":", "p", "=", "(", "text", ".", "rfind", "(", "'\\n'", ",", "0", ",", "index", ")", "+", "1", ")", "q", "=", "text", ".", "find", "(", "'\\n'", ",", "index", ")", "if", "(", "q", "<", "0", ")", ":", "q", "=", "len", "(", "text", ")", "return", "text", "[", "p", ":", "q", "]" ]
text may be a multiline string; extract only the line containing the given character index .
train
false
1,498
def parse_ref(container, refspec): refspec = to_bytes(refspec) possible_refs = [refspec, ('refs/' + refspec), ('refs/tags/' + refspec), ('refs/heads/' + refspec), ('refs/remotes/' + refspec), (('refs/remotes/' + refspec) + '/HEAD')] for ref in possible_refs: if (ref in container): return ref else: raise KeyError(refspec)
[ "def", "parse_ref", "(", "container", ",", "refspec", ")", ":", "refspec", "=", "to_bytes", "(", "refspec", ")", "possible_refs", "=", "[", "refspec", ",", "(", "'refs/'", "+", "refspec", ")", ",", "(", "'refs/tags/'", "+", "refspec", ")", ",", "(", "'refs/heads/'", "+", "refspec", ")", ",", "(", "'refs/remotes/'", "+", "refspec", ")", ",", "(", "(", "'refs/remotes/'", "+", "refspec", ")", "+", "'/HEAD'", ")", "]", "for", "ref", "in", "possible_refs", ":", "if", "(", "ref", "in", "container", ")", ":", "return", "ref", "else", ":", "raise", "KeyError", "(", "refspec", ")" ]
parse a string referring to a reference .
train
false
1,499
@newrelic.agent.function_trace() @allow_CORS_GET @prevent_indexing def _document_deleted(request, deletion_logs): if (request.user and request.user.has_perm('wiki.restore_document')): deletion_log = deletion_logs.order_by('-pk')[0] context = {'deletion_log': deletion_log} return render(request, 'wiki/deletion_log.html', context, status=404) raise Http404
[ "@", "newrelic", ".", "agent", ".", "function_trace", "(", ")", "@", "allow_CORS_GET", "@", "prevent_indexing", "def", "_document_deleted", "(", "request", ",", "deletion_logs", ")", ":", "if", "(", "request", ".", "user", "and", "request", ".", "user", ".", "has_perm", "(", "'wiki.restore_document'", ")", ")", ":", "deletion_log", "=", "deletion_logs", ".", "order_by", "(", "'-pk'", ")", "[", "0", "]", "context", "=", "{", "'deletion_log'", ":", "deletion_log", "}", "return", "render", "(", "request", ",", "'wiki/deletion_log.html'", ",", "context", ",", "status", "=", "404", ")", "raise", "Http404" ]
when a document has been deleted return a 404 .
train
false
1,500
def check_arg_errcode(result, func, cargs): check_err(arg_byref(cargs)) return result
[ "def", "check_arg_errcode", "(", "result", ",", "func", ",", "cargs", ")", ":", "check_err", "(", "arg_byref", "(", "cargs", ")", ")", "return", "result" ]
the error code is returned in the last argument .
train
false
1,501
def ascent(): import pickle import os fname = os.path.join(os.path.dirname(__file__), 'ascent.dat') with open(fname, 'rb') as f: ascent = array(pickle.load(f)) return ascent
[ "def", "ascent", "(", ")", ":", "import", "pickle", "import", "os", "fname", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'ascent.dat'", ")", "with", "open", "(", "fname", ",", "'rb'", ")", "as", "f", ":", "ascent", "=", "array", "(", "pickle", ".", "load", "(", "f", ")", ")", "return", "ascent" ]
get an 8-bit grayscale bit-depth .
train
false
1,502
def addToNamePathDictionary(directoryPath, namePathDictionary): pluginFileNames = getPluginFileNamesFromDirectoryPath(directoryPath) for pluginFileName in pluginFileNames: namePathDictionary[pluginFileName.replace('_', '')] = os.path.join(directoryPath, pluginFileName)
[ "def", "addToNamePathDictionary", "(", "directoryPath", ",", "namePathDictionary", ")", ":", "pluginFileNames", "=", "getPluginFileNamesFromDirectoryPath", "(", "directoryPath", ")", "for", "pluginFileName", "in", "pluginFileNames", ":", "namePathDictionary", "[", "pluginFileName", ".", "replace", "(", "'_'", ",", "''", ")", "]", "=", "os", ".", "path", ".", "join", "(", "directoryPath", ",", "pluginFileName", ")" ]
add to the name path dictionary .
train
false
1,503
def _module(language): return _modules.setdefault(language, __import__(language, globals(), {}, [], (-1)))
[ "def", "_module", "(", "language", ")", ":", "return", "_modules", ".", "setdefault", "(", "language", ",", "__import__", "(", "language", ",", "globals", "(", ")", ",", "{", "}", ",", "[", "]", ",", "(", "-", "1", ")", ")", ")" ]
returns the given language module .
train
false
1,505
def reset_ignored(): cmd = ['softwareupdate', '--reset-ignored'] salt.utils.mac_utils.execute_return_success(cmd) return (list_ignored() == [])
[ "def", "reset_ignored", "(", ")", ":", "cmd", "=", "[", "'softwareupdate'", ",", "'--reset-ignored'", "]", "salt", ".", "utils", ".", "mac_utils", ".", "execute_return_success", "(", "cmd", ")", "return", "(", "list_ignored", "(", ")", "==", "[", "]", ")" ]
make sure the ignored updates are not ignored anymore .
train
false
1,506
def percentiles(a, pcts, axis=None): scores = [] try: n = len(pcts) except TypeError: pcts = [pcts] n = 0 for (i, p) in enumerate(pcts): if (axis is None): score = stats.scoreatpercentile(a.ravel(), p) else: score = np.apply_along_axis(stats.scoreatpercentile, axis, a, p) scores.append(score) scores = np.asarray(scores) if (not n): scores = scores.squeeze() return scores
[ "def", "percentiles", "(", "a", ",", "pcts", ",", "axis", "=", "None", ")", ":", "scores", "=", "[", "]", "try", ":", "n", "=", "len", "(", "pcts", ")", "except", "TypeError", ":", "pcts", "=", "[", "pcts", "]", "n", "=", "0", "for", "(", "i", ",", "p", ")", "in", "enumerate", "(", "pcts", ")", ":", "if", "(", "axis", "is", "None", ")", ":", "score", "=", "stats", ".", "scoreatpercentile", "(", "a", ".", "ravel", "(", ")", ",", "p", ")", "else", ":", "score", "=", "np", ".", "apply_along_axis", "(", "stats", ".", "scoreatpercentile", ",", "axis", ",", "a", ",", "p", ")", "scores", ".", "append", "(", "score", ")", "scores", "=", "np", ".", "asarray", "(", "scores", ")", "if", "(", "not", "n", ")", ":", "scores", "=", "scores", ".", "squeeze", "(", ")", "return", "scores" ]
like scoreatpercentile but can take and return array of percentiles .
train
true
1,507
def p_field_id(p): p[0] = p[1]
[ "def", "p_field_id", "(", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]" ]
field_id : intconstant : .
train
false
1,508
def from_xml(xml): if (xml is None): return None deserializer = XmlDeserializer() return deserializer(xml)
[ "def", "from_xml", "(", "xml", ")", ":", "if", "(", "xml", "is", "None", ")", ":", "return", "None", "deserializer", "=", "XmlDeserializer", "(", ")", "return", "deserializer", "(", "xml", ")" ]
deserialize xml to a dictionary .
train
false
1,510
def gamma_correct(cs, c): gamma = cs.gamma if (gamma == GAMMA_REC709): cc = 0.018 if (c < cc): c = (((1.099 * math.pow(cc, 0.45)) - 0.099) / cc) else: c = ((1.099 * math.pow(c, 0.45)) - 0.099) else: c = math.pow(c, (1.0 / gamma)) return c
[ "def", "gamma_correct", "(", "cs", ",", "c", ")", ":", "gamma", "=", "cs", ".", "gamma", "if", "(", "gamma", "==", "GAMMA_REC709", ")", ":", "cc", "=", "0.018", "if", "(", "c", "<", "cc", ")", ":", "c", "=", "(", "(", "(", "1.099", "*", "math", ".", "pow", "(", "cc", ",", "0.45", ")", ")", "-", "0.099", ")", "/", "cc", ")", "else", ":", "c", "=", "(", "(", "1.099", "*", "math", ".", "pow", "(", "c", ",", "0.45", ")", ")", "-", "0.099", ")", "else", ":", "c", "=", "math", ".", "pow", "(", "c", ",", "(", "1.0", "/", "gamma", ")", ")", "return", "c" ]
transform linear rgb values to nonlinear rgb values .
train
false
1,512
def JoinLists(**args): out = {} for (key, val) in args.items(): if val: out[key] = StrJoin(JOIN_DELIMS[key], val) else: out[key] = '' return out
[ "def", "JoinLists", "(", "**", "args", ")", ":", "out", "=", "{", "}", "for", "(", "key", ",", "val", ")", "in", "args", ".", "items", "(", ")", ":", "if", "val", ":", "out", "[", "key", "]", "=", "StrJoin", "(", "JOIN_DELIMS", "[", "key", "]", ",", "val", ")", "else", ":", "out", "[", "key", "]", "=", "''", "return", "out" ]
take a dictionary of {long_name:values} .
train
false
1,513
def safe_dump_all(documents, stream=None, **kwds): return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
[ "def", "safe_dump_all", "(", "documents", ",", "stream", "=", "None", ",", "**", "kwds", ")", ":", "return", "dump_all", "(", "documents", ",", "stream", ",", "Dumper", "=", "SafeDumper", ",", "**", "kwds", ")" ]
serialize a sequence of python objects into a yaml stream .
train
true
1,514
def find_bucket_key(s3_path): s3_components = s3_path.split('/') bucket = s3_components[0] s3_key = '' if (len(s3_components) > 1): s3_key = '/'.join(s3_components[1:]) return (bucket, s3_key)
[ "def", "find_bucket_key", "(", "s3_path", ")", ":", "s3_components", "=", "s3_path", ".", "split", "(", "'/'", ")", "bucket", "=", "s3_components", "[", "0", "]", "s3_key", "=", "''", "if", "(", "len", "(", "s3_components", ")", ">", "1", ")", ":", "s3_key", "=", "'/'", ".", "join", "(", "s3_components", "[", "1", ":", "]", ")", "return", "(", "bucket", ",", "s3_key", ")" ]
this is a helper function that given an s3 path such that the path is of the form: bucket/key it will return the bucket and the key represented by the s3 path .
train
false
1,515
def _textlist(self, _addtail=False): result = [] if ((not _addtail) and (self.text is not None)): result.append(self.text) for elem in self: result.extend(elem.textlist(True)) if (_addtail and (self.tail is not None)): result.append(self.tail) return result
[ "def", "_textlist", "(", "self", ",", "_addtail", "=", "False", ")", ":", "result", "=", "[", "]", "if", "(", "(", "not", "_addtail", ")", "and", "(", "self", ".", "text", "is", "not", "None", ")", ")", ":", "result", ".", "append", "(", "self", ".", "text", ")", "for", "elem", "in", "self", ":", "result", ".", "extend", "(", "elem", ".", "textlist", "(", "True", ")", ")", "if", "(", "_addtail", "and", "(", "self", ".", "tail", "is", "not", "None", ")", ")", ":", "result", ".", "append", "(", "self", ".", "tail", ")", "return", "result" ]
returns a list of text strings contained within an element and its sub-elements .
train
true
1,516
def columnate(l, prefix): if (not l): return '' l = l[:] clen = max((len(s) for s in l)) ncols = ((tty_width() - len(prefix)) / (clen + 2)) if (ncols <= 1): ncols = 1 clen = 0 cols = [] while (len(l) % ncols): l.append('') rows = (len(l) / ncols) for s in range(0, len(l), rows): cols.append(l[s:(s + rows)]) out = '' for row in zip(*cols): out += ((prefix + ''.join((('%-*s' % ((clen + 2), s)) for s in row))) + '\n') return out
[ "def", "columnate", "(", "l", ",", "prefix", ")", ":", "if", "(", "not", "l", ")", ":", "return", "''", "l", "=", "l", "[", ":", "]", "clen", "=", "max", "(", "(", "len", "(", "s", ")", "for", "s", "in", "l", ")", ")", "ncols", "=", "(", "(", "tty_width", "(", ")", "-", "len", "(", "prefix", ")", ")", "/", "(", "clen", "+", "2", ")", ")", "if", "(", "ncols", "<=", "1", ")", ":", "ncols", "=", "1", "clen", "=", "0", "cols", "=", "[", "]", "while", "(", "len", "(", "l", ")", "%", "ncols", ")", ":", "l", ".", "append", "(", "''", ")", "rows", "=", "(", "len", "(", "l", ")", "/", "ncols", ")", "for", "s", "in", "range", "(", "0", ",", "len", "(", "l", ")", ",", "rows", ")", ":", "cols", ".", "append", "(", "l", "[", "s", ":", "(", "s", "+", "rows", ")", "]", ")", "out", "=", "''", "for", "row", "in", "zip", "(", "*", "cols", ")", ":", "out", "+=", "(", "(", "prefix", "+", "''", ".", "join", "(", "(", "(", "'%-*s'", "%", "(", "(", "clen", "+", "2", ")", ",", "s", ")", ")", "for", "s", "in", "row", ")", ")", ")", "+", "'\\n'", ")", "return", "out" ]
format elements of l in columns with prefix leading each line .
train
false
1,517
def _pb_timestamp_to_datetime(timestamp_pb): return (_EPOCH + datetime.timedelta(seconds=timestamp_pb.seconds, microseconds=(timestamp_pb.nanos / 1000.0)))
[ "def", "_pb_timestamp_to_datetime", "(", "timestamp_pb", ")", ":", "return", "(", "_EPOCH", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp_pb", ".", "seconds", ",", "microseconds", "=", "(", "timestamp_pb", ".", "nanos", "/", "1000.0", ")", ")", ")" ]
convert a timestamp protobuf to a datetime object .
train
true
1,518
def get_effective_domain_id(request): default_domain = get_default_domain(request) domain_id = default_domain.get('id') domain_name = default_domain.get('name') return (None if (domain_name == DEFAULT_DOMAIN) else domain_id)
[ "def", "get_effective_domain_id", "(", "request", ")", ":", "default_domain", "=", "get_default_domain", "(", "request", ")", "domain_id", "=", "default_domain", ".", "get", "(", "'id'", ")", "domain_name", "=", "default_domain", ".", "get", "(", "'name'", ")", "return", "(", "None", "if", "(", "domain_name", "==", "DEFAULT_DOMAIN", ")", "else", "domain_id", ")" ]
gets the id of the default domain to use when creating identity objects .
train
true
1,519
def mulmatmat(matlist1, matlist2, K): matcol = [list(i) for i in zip(*matlist2)] result = [] for row in matlist1: result.append([mulrowcol(row, col, K) for col in matcol]) return result
[ "def", "mulmatmat", "(", "matlist1", ",", "matlist2", ",", "K", ")", ":", "matcol", "=", "[", "list", "(", "i", ")", "for", "i", "in", "zip", "(", "*", "matlist2", ")", "]", "result", "=", "[", "]", "for", "row", "in", "matlist1", ":", "result", ".", "append", "(", "[", "mulrowcol", "(", "row", ",", "col", ",", "K", ")", "for", "col", "in", "matcol", "]", ")", "return", "result" ]
multiplies two matrices by multiplying each row with each column at a time .
train
false
1,520
def total_result_to_dict(total): return {'meta': extract_meta(total), 'duration': _get_duration(world), 'features': [extract_feature_data(feature_result) for feature_result in total.feature_results]}
[ "def", "total_result_to_dict", "(", "total", ")", ":", "return", "{", "'meta'", ":", "extract_meta", "(", "total", ")", ",", "'duration'", ":", "_get_duration", "(", "world", ")", ",", "'features'", ":", "[", "extract_feature_data", "(", "feature_result", ")", "for", "feature_result", "in", "total", ".", "feature_results", "]", "}" ]
transform a totalresult to a json-serializable python dictionary .
train
false
1,521
def test_determinism_even_sequences(): rng = np.random.RandomState(123) lengths = rng.randint(1, 10, 100) data = [(['w'] * l) for l in lengths] batch_size = 5 my_iter = EvenSequencesSubsetIterator(data, batch_size) visited1 = ([0] * len(data)) for (b_ind, ind_list) in enumerate(my_iter): assert [(len(data[i]) == len(data[ind_list[0]])) for i in ind_list] for i in ind_list: visited1[i] = b_ind my_iter = EvenSequencesSubsetIterator(data, batch_size) visited2 = ([0] * len(data)) for (b_ind, ind_list) in enumerate(my_iter): assert [(len(data[i]) == len(data[ind_list[0]])) for i in ind_list] for i in ind_list: visited2[i] = b_ind assert np.all((np.asarray(visited1) == np.asarray(visited2)))
[ "def", "test_determinism_even_sequences", "(", ")", ":", "rng", "=", "np", ".", "random", ".", "RandomState", "(", "123", ")", "lengths", "=", "rng", ".", "randint", "(", "1", ",", "10", ",", "100", ")", "data", "=", "[", "(", "[", "'w'", "]", "*", "l", ")", "for", "l", "in", "lengths", "]", "batch_size", "=", "5", "my_iter", "=", "EvenSequencesSubsetIterator", "(", "data", ",", "batch_size", ")", "visited1", "=", "(", "[", "0", "]", "*", "len", "(", "data", ")", ")", "for", "(", "b_ind", ",", "ind_list", ")", "in", "enumerate", "(", "my_iter", ")", ":", "assert", "[", "(", "len", "(", "data", "[", "i", "]", ")", "==", "len", "(", "data", "[", "ind_list", "[", "0", "]", "]", ")", ")", "for", "i", "in", "ind_list", "]", "for", "i", "in", "ind_list", ":", "visited1", "[", "i", "]", "=", "b_ind", "my_iter", "=", "EvenSequencesSubsetIterator", "(", "data", ",", "batch_size", ")", "visited2", "=", "(", "[", "0", "]", "*", "len", "(", "data", ")", ")", "for", "(", "b_ind", ",", "ind_list", ")", "in", "enumerate", "(", "my_iter", ")", ":", "assert", "[", "(", "len", "(", "data", "[", "i", "]", ")", "==", "len", "(", "data", "[", "ind_list", "[", "0", "]", "]", ")", ")", "for", "i", "in", "ind_list", "]", "for", "i", "in", "ind_list", ":", "visited2", "[", "i", "]", "=", "b_ind", "assert", "np", ".", "all", "(", "(", "np", ".", "asarray", "(", "visited1", ")", "==", "np", ".", "asarray", "(", "visited2", ")", ")", ")" ]
check that evensequencessubsetiterator deterministically visits entries of a dataset of sequence data .
train
false
1,523
def visit_binary_product(fn, expr): stack = [] def visit(element): if isinstance(element, ScalarSelect): (yield element) elif ((element.__visit_name__ == 'binary') and operators.is_comparison(element.operator)): stack.insert(0, element) for l in visit(element.left): for r in visit(element.right): fn(stack[0], l, r) stack.pop(0) for elem in element.get_children(): visit(elem) else: if isinstance(element, ColumnClause): (yield element) for elem in element.get_children(): for e in visit(elem): (yield e) list(visit(expr))
[ "def", "visit_binary_product", "(", "fn", ",", "expr", ")", ":", "stack", "=", "[", "]", "def", "visit", "(", "element", ")", ":", "if", "isinstance", "(", "element", ",", "ScalarSelect", ")", ":", "(", "yield", "element", ")", "elif", "(", "(", "element", ".", "__visit_name__", "==", "'binary'", ")", "and", "operators", ".", "is_comparison", "(", "element", ".", "operator", ")", ")", ":", "stack", ".", "insert", "(", "0", ",", "element", ")", "for", "l", "in", "visit", "(", "element", ".", "left", ")", ":", "for", "r", "in", "visit", "(", "element", ".", "right", ")", ":", "fn", "(", "stack", "[", "0", "]", ",", "l", ",", "r", ")", "stack", ".", "pop", "(", "0", ")", "for", "elem", "in", "element", ".", "get_children", "(", ")", ":", "visit", "(", "elem", ")", "else", ":", "if", "isinstance", "(", "element", ",", "ColumnClause", ")", ":", "(", "yield", "element", ")", "for", "elem", "in", "element", ".", "get_children", "(", ")", ":", "for", "e", "in", "visit", "(", "elem", ")", ":", "(", "yield", "e", ")", "list", "(", "visit", "(", "expr", ")", ")" ]
produce a traversal of the given expression .
train
false
1,525
@pytest.fixture(autouse=True) def init_fake_clipboard(quteproc): quteproc.send_cmd(':debug-set-fake-clipboard')
[ "@", "pytest", ".", "fixture", "(", "autouse", "=", "True", ")", "def", "init_fake_clipboard", "(", "quteproc", ")", ":", "quteproc", ".", "send_cmd", "(", "':debug-set-fake-clipboard'", ")" ]
make sure the fake clipboard will be used .
train
false
1,527
def mock_responses(resps): def wrapper(func): @responses.activate @functools.wraps(func) def wrapped(*args, **kwargs): for resp in resps: responses.add(*resp.args, **resp.kwargs) return func(*args, **kwargs) return wrapped return wrapper
[ "def", "mock_responses", "(", "resps", ")", ":", "def", "wrapper", "(", "func", ")", ":", "@", "responses", ".", "activate", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "**", "kwargs", ")", ":", "for", "resp", "in", "resps", ":", "responses", ".", "add", "(", "*", "resp", ".", "args", ",", "**", "resp", ".", "kwargs", ")", "return", "func", "(", "*", "args", ",", "**", "kwargs", ")", "return", "wrapped", "return", "wrapper" ]
mock responses for sleepiq .
train
false
1,528
def _restore_int(name, value): if ((name == 'http01_port') and (value == 'None')): logger.info('updating legacy http01_port value') return cli.flag_default('http01_port') try: return int(value) except ValueError: raise errors.Error('Expected a numeric value for {0}'.format(name))
[ "def", "_restore_int", "(", "name", ",", "value", ")", ":", "if", "(", "(", "name", "==", "'http01_port'", ")", "and", "(", "value", "==", "'None'", ")", ")", ":", "logger", ".", "info", "(", "'updating legacy http01_port value'", ")", "return", "cli", ".", "flag_default", "(", "'http01_port'", ")", "try", ":", "return", "int", "(", "value", ")", "except", "ValueError", ":", "raise", "errors", ".", "Error", "(", "'Expected a numeric value for {0}'", ".", "format", "(", "name", ")", ")" ]
restores an integer key-value pair from a renewal config file .
train
false
1,529
def context_get(stack, name): return stack.get(name)
[ "def", "context_get", "(", "stack", ",", "name", ")", ":", "return", "stack", ".", "get", "(", "name", ")" ]
find and return a name from a contextstack instance .
train
false
1,532
def _get_cols_m2m(cls, k, child, fk_left_col_name, fk_right_col_name, fk_left_deferrable, fk_left_initially, fk_right_deferrable, fk_right_initially, fk_left_ondelete, fk_left_onupdate, fk_right_ondelete, fk_right_onupdate): (col_info, left_col) = _get_col_o2m(cls, fk_left_col_name, ondelete=fk_left_ondelete, onupdate=fk_left_onupdate, deferrable=fk_left_deferrable, initially=fk_left_initially) right_col = _get_col_o2o(cls, k, child, fk_right_col_name, ondelete=fk_right_ondelete, onupdate=fk_right_onupdate, deferrable=fk_right_deferrable, initially=fk_right_initially) left_col.primary_key = right_col.primary_key = True return (left_col, right_col)
[ "def", "_get_cols_m2m", "(", "cls", ",", "k", ",", "child", ",", "fk_left_col_name", ",", "fk_right_col_name", ",", "fk_left_deferrable", ",", "fk_left_initially", ",", "fk_right_deferrable", ",", "fk_right_initially", ",", "fk_left_ondelete", ",", "fk_left_onupdate", ",", "fk_right_ondelete", ",", "fk_right_onupdate", ")", ":", "(", "col_info", ",", "left_col", ")", "=", "_get_col_o2m", "(", "cls", ",", "fk_left_col_name", ",", "ondelete", "=", "fk_left_ondelete", ",", "onupdate", "=", "fk_left_onupdate", ",", "deferrable", "=", "fk_left_deferrable", ",", "initially", "=", "fk_left_initially", ")", "right_col", "=", "_get_col_o2o", "(", "cls", ",", "k", ",", "child", ",", "fk_right_col_name", ",", "ondelete", "=", "fk_right_ondelete", ",", "onupdate", "=", "fk_right_onupdate", ",", "deferrable", "=", "fk_right_deferrable", ",", "initially", "=", "fk_right_initially", ")", "left_col", ".", "primary_key", "=", "right_col", ".", "primary_key", "=", "True", "return", "(", "left_col", ",", "right_col", ")" ]
gets the parent and child classes and returns foreign keys to both tables .
train
false
1,533
def _valid_code(seed, drift=0): return totp(key=seed, t=(int(time.time()) + (drift * 30)))
[ "def", "_valid_code", "(", "seed", ",", "drift", "=", "0", ")", ":", "return", "totp", "(", "key", "=", "seed", ",", "t", "=", "(", "int", "(", "time", ".", "time", "(", ")", ")", "+", "(", "drift", "*", "30", ")", ")", ")" ]
generate a valid code .
train
false
1,534
def assert_equal_steps(test_case, expected, actual): expected_steps = getattr(expected, 'steps') actual_steps = getattr(actual, 'steps') if (None in (expected_steps, actual_steps)): test_case.assertEqual(expected, actual) else: mismatch_steps = [] missing_steps = [] index = 0 for (index, expected_step) in enumerate(expected_steps): try: actual_step = actual_steps[index] except IndexError: missing_steps = expected_steps[index:] break if (expected_step != actual_step): mismatch_steps.append('* expected: {} !=\n actual: {}'.format(expected_step, actual_step)) extra_steps = actual_steps[(index + 1):] if (mismatch_steps or missing_steps or extra_steps): test_case.fail('Step Mismatch\nMismatch:\n{}\nMissing:\n{}\nExtra:\n{}'.format('\n'.join(mismatch_steps), missing_steps, extra_steps))
[ "def", "assert_equal_steps", "(", "test_case", ",", "expected", ",", "actual", ")", ":", "expected_steps", "=", "getattr", "(", "expected", ",", "'steps'", ")", "actual_steps", "=", "getattr", "(", "actual", ",", "'steps'", ")", "if", "(", "None", "in", "(", "expected_steps", ",", "actual_steps", ")", ")", ":", "test_case", ".", "assertEqual", "(", "expected", ",", "actual", ")", "else", ":", "mismatch_steps", "=", "[", "]", "missing_steps", "=", "[", "]", "index", "=", "0", "for", "(", "index", ",", "expected_step", ")", "in", "enumerate", "(", "expected_steps", ")", ":", "try", ":", "actual_step", "=", "actual_steps", "[", "index", "]", "except", "IndexError", ":", "missing_steps", "=", "expected_steps", "[", "index", ":", "]", "break", "if", "(", "expected_step", "!=", "actual_step", ")", ":", "mismatch_steps", ".", "append", "(", "'* expected: {} !=\\n actual: {}'", ".", "format", "(", "expected_step", ",", "actual_step", ")", ")", "extra_steps", "=", "actual_steps", "[", "(", "index", "+", "1", ")", ":", "]", "if", "(", "mismatch_steps", "or", "missing_steps", "or", "extra_steps", ")", ":", "test_case", ".", "fail", "(", "'Step Mismatch\\nMismatch:\\n{}\\nMissing:\\n{}\\nExtra:\\n{}'", ".", "format", "(", "'\\n'", ".", "join", "(", "mismatch_steps", ")", ",", "missing_steps", ",", "extra_steps", ")", ")" ]
assert that the list of provided steps are the same .
train
false
1,535
def set_global_options(options): global _global_options _global_options = dict(options)
[ "def", "set_global_options", "(", "options", ")", ":", "global", "_global_options", "_global_options", "=", "dict", "(", "options", ")" ]
sets the global options used as defaults for web server execution .
train
false
1,536
def get_pairs(all_files, read1_indicator, read2_indicator, match_barcodes=False, barcode_indicator='_I1_'): pairs = {} bc_pairs = {} read1_files = [] read2_files = [] bc_files = [] for curr_file in all_files: curr_file_string_r1 = curr_file.split(read1_indicator) curr_file_string_r2 = curr_file.split(read2_indicator) if match_barcodes: curr_file_string_bc = curr_file.split(barcode_indicator) if (len(curr_file_string_r1) == 2): read1_files.append(curr_file_string_r1) elif (len(curr_file_string_r2) == 2): read2_files.append(curr_file_string_r2) elif (match_barcodes and (len(curr_file_string_bc) == 2)): bc_files.append(curr_file_string_bc) else: raise ValueError, ((('Invalid filename found for splitting on input ' + ('for file %s, ' % curr_file)) + 'check input read1_indicator ') + 'and read2_indicator parameters as well.') for curr_read1 in read1_files: for curr_read2 in read2_files: if (curr_read1 == curr_read2): pairs[read1_indicator.join(curr_read1)] = read2_indicator.join(curr_read2) if match_barcodes: for curr_read1 in read1_files: for curr_bc in bc_files: if (curr_read1 == curr_bc): bc_pairs[read1_indicator.join(curr_read1)] = barcode_indicator.join(curr_bc) forward_reads = set(pairs.keys()) bc_reads = set(bc_pairs.keys()) non_matching_f_reads = (forward_reads - bc_reads) if non_matching_f_reads: raise ValueError, ('Found forward reads without matching barcodes file: %s' % non_matching_f_reads) return (pairs, bc_pairs)
[ "def", "get_pairs", "(", "all_files", ",", "read1_indicator", ",", "read2_indicator", ",", "match_barcodes", "=", "False", ",", "barcode_indicator", "=", "'_I1_'", ")", ":", "pairs", "=", "{", "}", "bc_pairs", "=", "{", "}", "read1_files", "=", "[", "]", "read2_files", "=", "[", "]", "bc_files", "=", "[", "]", "for", "curr_file", "in", "all_files", ":", "curr_file_string_r1", "=", "curr_file", ".", "split", "(", "read1_indicator", ")", "curr_file_string_r2", "=", "curr_file", ".", "split", "(", "read2_indicator", ")", "if", "match_barcodes", ":", "curr_file_string_bc", "=", "curr_file", ".", "split", "(", "barcode_indicator", ")", "if", "(", "len", "(", "curr_file_string_r1", ")", "==", "2", ")", ":", "read1_files", ".", "append", "(", "curr_file_string_r1", ")", "elif", "(", "len", "(", "curr_file_string_r2", ")", "==", "2", ")", ":", "read2_files", ".", "append", "(", "curr_file_string_r2", ")", "elif", "(", "match_barcodes", "and", "(", "len", "(", "curr_file_string_bc", ")", "==", "2", ")", ")", ":", "bc_files", ".", "append", "(", "curr_file_string_bc", ")", "else", ":", "raise", "ValueError", ",", "(", "(", "(", "'Invalid filename found for splitting on input '", "+", "(", "'for file %s, '", "%", "curr_file", ")", ")", "+", "'check input read1_indicator '", ")", "+", "'and read2_indicator parameters as well.'", ")", "for", "curr_read1", "in", "read1_files", ":", "for", "curr_read2", "in", "read2_files", ":", "if", "(", "curr_read1", "==", "curr_read2", ")", ":", "pairs", "[", "read1_indicator", ".", "join", "(", "curr_read1", ")", "]", "=", "read2_indicator", ".", "join", "(", "curr_read2", ")", "if", "match_barcodes", ":", "for", "curr_read1", "in", "read1_files", ":", "for", "curr_bc", "in", "bc_files", ":", "if", "(", "curr_read1", "==", "curr_bc", ")", ":", "bc_pairs", "[", "read1_indicator", ".", "join", "(", "curr_read1", ")", "]", "=", "barcode_indicator", ".", "join", "(", "curr_bc", ")", "forward_reads", "=", "set", "(", "pairs", ".", "keys", "(", ")", ")", "bc_reads", "=", "set", "(", "bc_pairs", ".", "keys", "(", ")", ")", "non_matching_f_reads", "=", "(", "forward_reads", "-", "bc_reads", ")", "if", "non_matching_f_reads", ":", "raise", "ValueError", ",", "(", "'Found forward reads without matching barcodes file: %s'", "%", "non_matching_f_reads", ")", "return", "(", "pairs", ",", "bc_pairs", ")" ]
get the list of callback/errback pairs from the user .
train
false
1,537
def _json_decode_datetime(d): return datetime.datetime.strptime(d['isostr'], _DATETIME_FORMAT)
[ "def", "_json_decode_datetime", "(", "d", ")", ":", "return", "datetime", ".", "datetime", ".", "strptime", "(", "d", "[", "'isostr'", "]", ",", "_DATETIME_FORMAT", ")" ]
converts a dict of json primitives to a datetime object .
train
false
1,538
def solow_jacobian(t, k, g, n, s, alpha, delta): jac = (((s * alpha) * (k ** (alpha - 1))) - ((g + n) + delta)) return jac
[ "def", "solow_jacobian", "(", "t", ",", "k", ",", "g", ",", "n", ",", "s", ",", "alpha", ",", "delta", ")", ":", "jac", "=", "(", "(", "(", "s", "*", "alpha", ")", "*", "(", "k", "**", "(", "alpha", "-", "1", ")", ")", ")", "-", "(", "(", "g", "+", "n", ")", "+", "delta", ")", ")", "return", "jac" ]
jacobian matrix for the solow model .
train
false
1,540
def _get_all_permissions(opts): builtin = _get_builtin_permissions(opts) custom = list(opts.permissions) return (builtin + custom)
[ "def", "_get_all_permissions", "(", "opts", ")", ":", "builtin", "=", "_get_builtin_permissions", "(", "opts", ")", "custom", "=", "list", "(", "opts", ".", "permissions", ")", "return", "(", "builtin", "+", "custom", ")" ]
returns for all permissions in the given opts .
train
false
1,541
def no_setting_conf_directly_in_tests(logical_line, filename): if ('nova/tests/' in filename): res = conf_attribute_set_re.match(logical_line) if res: (yield (0, 'N320: Setting CONF.* attributes directly in tests is forbidden. Use self.flags(option=value) instead'))
[ "def", "no_setting_conf_directly_in_tests", "(", "logical_line", ",", "filename", ")", ":", "if", "(", "'nova/tests/'", "in", "filename", ")", ":", "res", "=", "conf_attribute_set_re", ".", "match", "(", "logical_line", ")", "if", "res", ":", "(", "yield", "(", "0", ",", "'N320: Setting CONF.* attributes directly in tests is forbidden. Use self.flags(option=value) instead'", ")", ")" ]
check for setting conf .
train
false
1,542
def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs): if (not self.account): raise ValueError('Attempting to create a new database with no account set') self.create_container_table(conn) self.create_account_stat_table(conn, put_timestamp)
[ "def", "prespi_AccountBroker_initialize", "(", "self", ",", "conn", ",", "put_timestamp", ",", "**", "kwargs", ")", ":", "if", "(", "not", "self", ".", "account", ")", ":", "raise", "ValueError", "(", "'Attempting to create a new database with no account set'", ")", "self", ".", "create_container_table", "(", "conn", ")", "self", ".", "create_account_stat_table", "(", "conn", ",", "put_timestamp", ")" ]
the accountbroker initialze() function before we added the policy stat table .
train
false
1,543
def stats_aggregate(): return s3_rest_controller()
[ "def", "stats_aggregate", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
restful crud controller .
train
false
1,544
def numeric_mixing_matrix(G, attribute, nodes=None, normalized=True): d = attribute_mixing_dict(G, attribute, nodes) s = set(d.keys()) for (k, v) in d.items(): s.update(v.keys()) m = max(s) mapping = dict(zip(range((m + 1)), range((m + 1)))) a = dict_to_numpy_array(d, mapping=mapping) if normalized: a = (a / a.sum()) return a
[ "def", "numeric_mixing_matrix", "(", "G", ",", "attribute", ",", "nodes", "=", "None", ",", "normalized", "=", "True", ")", ":", "d", "=", "attribute_mixing_dict", "(", "G", ",", "attribute", ",", "nodes", ")", "s", "=", "set", "(", "d", ".", "keys", "(", ")", ")", "for", "(", "k", ",", "v", ")", "in", "d", ".", "items", "(", ")", ":", "s", ".", "update", "(", "v", ".", "keys", "(", ")", ")", "m", "=", "max", "(", "s", ")", "mapping", "=", "dict", "(", "zip", "(", "range", "(", "(", "m", "+", "1", ")", ")", ",", "range", "(", "(", "m", "+", "1", ")", ")", ")", ")", "a", "=", "dict_to_numpy_array", "(", "d", ",", "mapping", "=", "mapping", ")", "if", "normalized", ":", "a", "=", "(", "a", "/", "a", ".", "sum", "(", ")", ")", "return", "a" ]
return numeric mixing matrix for attribute .
train
false
1,545
def _write_proxy_conf(proxyfile): msg = 'Invalid value for proxy file provided!, Supplied value = {0}'.format(proxyfile) log.trace('Salt Proxy Module: write proxy conf') if proxyfile: log.debug('Writing proxy conf file') with salt.utils.fopen(proxyfile, 'w') as proxy_conf: proxy_conf.write('master = {0}'.format(__grains__['master'])) msg = 'Wrote proxy file {0}'.format(proxyfile) log.debug(msg) return msg
[ "def", "_write_proxy_conf", "(", "proxyfile", ")", ":", "msg", "=", "'Invalid value for proxy file provided!, Supplied value = {0}'", ".", "format", "(", "proxyfile", ")", "log", ".", "trace", "(", "'Salt Proxy Module: write proxy conf'", ")", "if", "proxyfile", ":", "log", ".", "debug", "(", "'Writing proxy conf file'", ")", "with", "salt", ".", "utils", ".", "fopen", "(", "proxyfile", ",", "'w'", ")", "as", "proxy_conf", ":", "proxy_conf", ".", "write", "(", "'master = {0}'", ".", "format", "(", "__grains__", "[", "'master'", "]", ")", ")", "msg", "=", "'Wrote proxy file {0}'", ".", "format", "(", "proxyfile", ")", "log", ".", "debug", "(", "msg", ")", "return", "msg" ]
write to file .
train
true
1,547
def _get_config_errors(request, cache=True): global _CONFIG_ERROR_LIST if ((not cache) or (_CONFIG_ERROR_LIST is None)): error_list = [] for module in appmanager.DESKTOP_MODULES: try: validator = getattr(module.conf, CONFIG_VALIDATOR) except AttributeError: continue if (not callable(validator)): LOG.warn(('Auto config validation: %s.%s is not a function' % (module.conf.__name__, CONFIG_VALIDATOR))) continue try: for (confvar, error) in validator(request.user): error = {'name': (confvar if isinstance(confvar, str) else confvar.get_fully_qualifying_key()), 'message': error} if isinstance(confvar, BoundConfig): error['value'] = confvar.get() error_list.append(error) except Exception as ex: LOG.exception(('Error in config validation by %s: %s' % (module.nice_name, ex))) _CONFIG_ERROR_LIST = error_list return _CONFIG_ERROR_LIST
[ "def", "_get_config_errors", "(", "request", ",", "cache", "=", "True", ")", ":", "global", "_CONFIG_ERROR_LIST", "if", "(", "(", "not", "cache", ")", "or", "(", "_CONFIG_ERROR_LIST", "is", "None", ")", ")", ":", "error_list", "=", "[", "]", "for", "module", "in", "appmanager", ".", "DESKTOP_MODULES", ":", "try", ":", "validator", "=", "getattr", "(", "module", ".", "conf", ",", "CONFIG_VALIDATOR", ")", "except", "AttributeError", ":", "continue", "if", "(", "not", "callable", "(", "validator", ")", ")", ":", "LOG", ".", "warn", "(", "(", "'Auto config validation: %s.%s is not a function'", "%", "(", "module", ".", "conf", ".", "__name__", ",", "CONFIG_VALIDATOR", ")", ")", ")", "continue", "try", ":", "for", "(", "confvar", ",", "error", ")", "in", "validator", "(", "request", ".", "user", ")", ":", "error", "=", "{", "'name'", ":", "(", "confvar", "if", "isinstance", "(", "confvar", ",", "str", ")", "else", "confvar", ".", "get_fully_qualifying_key", "(", ")", ")", ",", "'message'", ":", "error", "}", "if", "isinstance", "(", "confvar", ",", "BoundConfig", ")", ":", "error", "[", "'value'", "]", "=", "confvar", ".", "get", "(", ")", "error_list", ".", "append", "(", "error", ")", "except", "Exception", "as", "ex", ":", "LOG", ".", "exception", "(", "(", "'Error in config validation by %s: %s'", "%", "(", "module", ".", "nice_name", ",", "ex", ")", ")", ")", "_CONFIG_ERROR_LIST", "=", "error_list", "return", "_CONFIG_ERROR_LIST" ]
returns a list of tuples .
train
false
1,548
def find_room(name, api_url=None, api_key=None, api_version=None): rooms = list_rooms(api_url=api_url, api_key=api_key, api_version=api_version) if rooms: for x in range(0, len(rooms)): if (rooms[x]['name'] == name): return rooms[x] return False
[ "def", "find_room", "(", "name", ",", "api_url", "=", "None", ",", "api_key", "=", "None", ",", "api_version", "=", "None", ")", ":", "rooms", "=", "list_rooms", "(", "api_url", "=", "api_url", ",", "api_key", "=", "api_key", ",", "api_version", "=", "api_version", ")", "if", "rooms", ":", "for", "x", "in", "range", "(", "0", ",", "len", "(", "rooms", ")", ")", ":", "if", "(", "rooms", "[", "x", "]", "[", "'name'", "]", "==", "name", ")", ":", "return", "rooms", "[", "x", "]", "return", "False" ]
find a room by name and return it .
train
false
1,549
def _get_object(obj_ref): return _db_content[obj_ref.type][obj_ref]
[ "def", "_get_object", "(", "obj_ref", ")", ":", "return", "_db_content", "[", "obj_ref", ".", "type", "]", "[", "obj_ref", "]" ]
helper function to retrieve objtype from pillars if objname is string_types .
train
false
1,551
def country_codes(): create_country_codes() try: country_codes = tk.get_action('tag_list')(data_dict={'vocabulary_id': 'country_codes'}) return country_codes except tk.ObjectNotFound: return None
[ "def", "country_codes", "(", ")", ":", "create_country_codes", "(", ")", "try", ":", "country_codes", "=", "tk", ".", "get_action", "(", "'tag_list'", ")", "(", "data_dict", "=", "{", "'vocabulary_id'", ":", "'country_codes'", "}", ")", "return", "country_codes", "except", "tk", ".", "ObjectNotFound", ":", "return", "None" ]
return the list of country codes from the country codes vocabulary .
train
false
1,552
def reset_orig(): mpl.rcParams.update(_orig_rc_params)
[ "def", "reset_orig", "(", ")", ":", "mpl", ".", "rcParams", ".", "update", "(", "_orig_rc_params", ")" ]
restore all rc params to original settings .
train
false
1,554
def run_network(filename, n, eta): random.seed(12345678) np.random.seed(12345678) (training_data, validation_data, test_data) = mnist_loader.load_data_wrapper() net = network2.Network([784, n, 10], cost=network2.CrossEntropyCost) print 'Train the network using the default starting weights.' (default_vc, default_va, default_tc, default_ta) = net.SGD(training_data, 30, 10, eta, lmbda=5.0, evaluation_data=validation_data, monitor_evaluation_accuracy=True) print 'Train the network using the large starting weights.' net.large_weight_initializer() (large_vc, large_va, large_tc, large_ta) = net.SGD(training_data, 30, 10, eta, lmbda=5.0, evaluation_data=validation_data, monitor_evaluation_accuracy=True) f = open(filename, 'w') json.dump({'default_weight_initialization': [default_vc, default_va, default_tc, default_ta], 'large_weight_initialization': [large_vc, large_va, large_tc, large_ta]}, f) f.close()
[ "def", "run_network", "(", "filename", ",", "n", ",", "eta", ")", ":", "random", ".", "seed", "(", "12345678", ")", "np", ".", "random", ".", "seed", "(", "12345678", ")", "(", "training_data", ",", "validation_data", ",", "test_data", ")", "=", "mnist_loader", ".", "load_data_wrapper", "(", ")", "net", "=", "network2", ".", "Network", "(", "[", "784", ",", "n", ",", "10", "]", ",", "cost", "=", "network2", ".", "CrossEntropyCost", ")", "print", "'Train the network using the default starting weights.'", "(", "default_vc", ",", "default_va", ",", "default_tc", ",", "default_ta", ")", "=", "net", ".", "SGD", "(", "training_data", ",", "30", ",", "10", ",", "eta", ",", "lmbda", "=", "5.0", ",", "evaluation_data", "=", "validation_data", ",", "monitor_evaluation_accuracy", "=", "True", ")", "print", "'Train the network using the large starting weights.'", "net", ".", "large_weight_initializer", "(", ")", "(", "large_vc", ",", "large_va", ",", "large_tc", ",", "large_ta", ")", "=", "net", ".", "SGD", "(", "training_data", ",", "30", ",", "10", ",", "eta", ",", "lmbda", "=", "5.0", ",", "evaluation_data", "=", "validation_data", ",", "monitor_evaluation_accuracy", "=", "True", ")", "f", "=", "open", "(", "filename", ",", "'w'", ")", "json", ".", "dump", "(", "{", "'default_weight_initialization'", ":", "[", "default_vc", ",", "default_va", ",", "default_tc", ",", "default_ta", "]", ",", "'large_weight_initialization'", ":", "[", "large_vc", ",", "large_va", ",", "large_tc", ",", "large_ta", "]", "}", ",", "f", ")", "f", ".", "close", "(", ")" ]
train the network for num_epochs on training_set_size images .
train
false
1,556
def barycenter_kneighbors_graph(X, n_neighbors, reg=0.001, n_jobs=1): knn = NearestNeighbors((n_neighbors + 1), n_jobs=n_jobs).fit(X) X = knn._fit_X n_samples = X.shape[0] ind = knn.kneighbors(X, return_distance=False)[:, 1:] data = barycenter_weights(X, X[ind], reg=reg) indptr = np.arange(0, ((n_samples * n_neighbors) + 1), n_neighbors) return csr_matrix((data.ravel(), ind.ravel(), indptr), shape=(n_samples, n_samples))
[ "def", "barycenter_kneighbors_graph", "(", "X", ",", "n_neighbors", ",", "reg", "=", "0.001", ",", "n_jobs", "=", "1", ")", ":", "knn", "=", "NearestNeighbors", "(", "(", "n_neighbors", "+", "1", ")", ",", "n_jobs", "=", "n_jobs", ")", ".", "fit", "(", "X", ")", "X", "=", "knn", ".", "_fit_X", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "ind", "=", "knn", ".", "kneighbors", "(", "X", ",", "return_distance", "=", "False", ")", "[", ":", ",", "1", ":", "]", "data", "=", "barycenter_weights", "(", "X", ",", "X", "[", "ind", "]", ",", "reg", "=", "reg", ")", "indptr", "=", "np", ".", "arange", "(", "0", ",", "(", "(", "n_samples", "*", "n_neighbors", ")", "+", "1", ")", ",", "n_neighbors", ")", "return", "csr_matrix", "(", "(", "data", ".", "ravel", "(", ")", ",", "ind", ".", "ravel", "(", ")", ",", "indptr", ")", ",", "shape", "=", "(", "n_samples", ",", "n_samples", ")", ")" ]
computes the barycenter weighted graph of k-neighbors for points in x parameters x : {array-like .
train
false
1,557
def staticfile(filename, root=None, match='', content_types=None, debug=False): request = cherrypy.serving.request if (request.method not in ('GET', 'HEAD')): if debug: cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE') return False if (match and (not re.search(match, request.path_info))): if debug: cherrypy.log(('request.path_info %r does not match pattern %r' % (request.path_info, match)), 'TOOLS.STATICFILE') return False if (not os.path.isabs(filename)): if (not root): msg = ("Static tool requires an absolute filename (got '%s')." % filename) if debug: cherrypy.log(msg, 'TOOLS.STATICFILE') raise ValueError(msg) filename = os.path.join(root, filename) return _attempt(filename, content_types, debug=debug)
[ "def", "staticfile", "(", "filename", ",", "root", "=", "None", ",", "match", "=", "''", ",", "content_types", "=", "None", ",", "debug", "=", "False", ")", ":", "request", "=", "cherrypy", ".", "serving", ".", "request", "if", "(", "request", ".", "method", "not", "in", "(", "'GET'", ",", "'HEAD'", ")", ")", ":", "if", "debug", ":", "cherrypy", ".", "log", "(", "'request.method not GET or HEAD'", ",", "'TOOLS.STATICFILE'", ")", "return", "False", "if", "(", "match", "and", "(", "not", "re", ".", "search", "(", "match", ",", "request", ".", "path_info", ")", ")", ")", ":", "if", "debug", ":", "cherrypy", ".", "log", "(", "(", "'request.path_info %r does not match pattern %r'", "%", "(", "request", ".", "path_info", ",", "match", ")", ")", ",", "'TOOLS.STATICFILE'", ")", "return", "False", "if", "(", "not", "os", ".", "path", ".", "isabs", "(", "filename", ")", ")", ":", "if", "(", "not", "root", ")", ":", "msg", "=", "(", "\"Static tool requires an absolute filename (got '%s').\"", "%", "filename", ")", "if", "debug", ":", "cherrypy", ".", "log", "(", "msg", ",", "'TOOLS.STATICFILE'", ")", "raise", "ValueError", "(", "msg", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "return", "_attempt", "(", "filename", ",", "content_types", ",", "debug", "=", "debug", ")" ]
serve a static resource from the given filename .
train
false
1,558
@with_setup(prepare_stdout, registry.clear) def test_xunit_xml_output_with_no_errors(): called = [] def assert_correct_xml_output(filename, doc): called.append(True) expect(doc.toxml).when.called.doesnt.throw(UnicodeDecodeError) old = xunit_output.write_xml_doc xunit_output.write_xml_doc = assert_correct_xml_output runner = Runner(feature_name('xunit_unicode_and_bytestring_mixing'), enable_xunit=True) try: runner.run() finally: xunit_output.write_xml_doc = old
[ "@", "with_setup", "(", "prepare_stdout", ",", "registry", ".", "clear", ")", "def", "test_xunit_xml_output_with_no_errors", "(", ")", ":", "called", "=", "[", "]", "def", "assert_correct_xml_output", "(", "filename", ",", "doc", ")", ":", "called", ".", "append", "(", "True", ")", "expect", "(", "doc", ".", "toxml", ")", ".", "when", ".", "called", ".", "doesnt", ".", "throw", "(", "UnicodeDecodeError", ")", "old", "=", "xunit_output", ".", "write_xml_doc", "xunit_output", ".", "write_xml_doc", "=", "assert_correct_xml_output", "runner", "=", "Runner", "(", "feature_name", "(", "'xunit_unicode_and_bytestring_mixing'", ")", ",", "enable_xunit", "=", "True", ")", "try", ":", "runner", ".", "run", "(", ")", "finally", ":", "xunit_output", ".", "write_xml_doc", "=", "old" ]
test xunit doc xml output .
train
false
1,559
def showHttpErrorCodes(): if kb.httpErrorCodes: warnMsg = 'HTTP error codes detected during run:\n' warnMsg += ', '.join((('%d (%s) - %d times' % (code, (httplib.responses[code] if (code in httplib.responses) else '?'), count)) for (code, count) in kb.httpErrorCodes.items())) logger.warn(warnMsg) if any((((str(_).startswith('4') or str(_).startswith('5')) and (_ != httplib.INTERNAL_SERVER_ERROR) and (_ != kb.originalCode)) for _ in kb.httpErrorCodes.keys())): msg = 'too many 4xx and/or 5xx HTTP error codes ' msg += 'could mean that some kind of protection is involved (e.g. WAF)' logger.debug(msg)
[ "def", "showHttpErrorCodes", "(", ")", ":", "if", "kb", ".", "httpErrorCodes", ":", "warnMsg", "=", "'HTTP error codes detected during run:\\n'", "warnMsg", "+=", "', '", ".", "join", "(", "(", "(", "'%d (%s) - %d times'", "%", "(", "code", ",", "(", "httplib", ".", "responses", "[", "code", "]", "if", "(", "code", "in", "httplib", ".", "responses", ")", "else", "'?'", ")", ",", "count", ")", ")", "for", "(", "code", ",", "count", ")", "in", "kb", ".", "httpErrorCodes", ".", "items", "(", ")", ")", ")", "logger", ".", "warn", "(", "warnMsg", ")", "if", "any", "(", "(", "(", "(", "str", "(", "_", ")", ".", "startswith", "(", "'4'", ")", "or", "str", "(", "_", ")", ".", "startswith", "(", "'5'", ")", ")", "and", "(", "_", "!=", "httplib", ".", "INTERNAL_SERVER_ERROR", ")", "and", "(", "_", "!=", "kb", ".", "originalCode", ")", ")", "for", "_", "in", "kb", ".", "httpErrorCodes", ".", "keys", "(", ")", ")", ")", ":", "msg", "=", "'too many 4xx and/or 5xx HTTP error codes '", "msg", "+=", "'could mean that some kind of protection is involved (e.g. WAF)'", "logger", ".", "debug", "(", "msg", ")" ]
shows all http error codes raised till now .
train
false
1,560
def _imgLink(col, latex, model): txt = _latexFromHtml(col, latex) fname = ('latex-%s.png' % checksum(txt.encode('utf8'))) link = ('<img class=latex src="%s">' % fname) if os.path.exists(fname): return link elif (not build): return ('[latex]%s[/latex]' % latex) else: err = _buildImg(col, txt, fname, model) if err: return err else: return link
[ "def", "_imgLink", "(", "col", ",", "latex", ",", "model", ")", ":", "txt", "=", "_latexFromHtml", "(", "col", ",", "latex", ")", "fname", "=", "(", "'latex-%s.png'", "%", "checksum", "(", "txt", ".", "encode", "(", "'utf8'", ")", ")", ")", "link", "=", "(", "'<img class=latex src=\"%s\">'", "%", "fname", ")", "if", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "return", "link", "elif", "(", "not", "build", ")", ":", "return", "(", "'[latex]%s[/latex]'", "%", "latex", ")", "else", ":", "err", "=", "_buildImg", "(", "col", ",", "txt", ",", "fname", ",", "model", ")", "if", "err", ":", "return", "err", "else", ":", "return", "link" ]
return an img link for latex .
train
false
1,561
def get_concept_classified(): ct._write_head() df = _get_type_data((ct.SINA_CONCEPTS_INDEX_URL % (ct.P_TYPE['http'], ct.DOMAINS['sf'], ct.PAGES['cpt']))) data = [] for row in df.values: rowDf = _get_detail(row[0]) rowDf['c_name'] = row[1] data.append(rowDf) data = pd.concat(data, ignore_index=True) return data
[ "def", "get_concept_classified", "(", ")", ":", "ct", ".", "_write_head", "(", ")", "df", "=", "_get_type_data", "(", "(", "ct", ".", "SINA_CONCEPTS_INDEX_URL", "%", "(", "ct", ".", "P_TYPE", "[", "'http'", "]", ",", "ct", ".", "DOMAINS", "[", "'sf'", "]", ",", "ct", ".", "PAGES", "[", "'cpt'", "]", ")", ")", ")", "data", "=", "[", "]", "for", "row", "in", "df", ".", "values", ":", "rowDf", "=", "_get_detail", "(", "row", "[", "0", "]", ")", "rowDf", "[", "'c_name'", "]", "=", "row", "[", "1", "]", "data", ".", "append", "(", "rowDf", ")", "data", "=", "pd", ".", "concat", "(", "data", ",", "ignore_index", "=", "True", ")", "return", "data" ]
return dataframe code :股票代码 name :股票名称 c_name :概念名称 .
train
false
1,562
def assert_equal_in(logical_line): res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or asse_equal_in_end_with_true_or_false_re.search(logical_line)) if res: (yield (0, 'N338: Use assertIn/NotIn(A, B) rather than assertEqual(A in B, True/False) when checking collection contents.'))
[ "def", "assert_equal_in", "(", "logical_line", ")", ":", "res", "=", "(", "asse_equal_in_start_with_true_or_false_re", ".", "search", "(", "logical_line", ")", "or", "asse_equal_in_end_with_true_or_false_re", ".", "search", "(", "logical_line", ")", ")", "if", "res", ":", "(", "yield", "(", "0", ",", "'N338: Use assertIn/NotIn(A, B) rather than assertEqual(A in B, True/False) when checking collection contents.'", ")", ")" ]
check for assertequal .
train
false
1,565
def getUnpackedLoops(loops): if (len(loops) == 1): firstLoop = loops[0] if (firstLoop.__class__ == list): return firstLoop return loops
[ "def", "getUnpackedLoops", "(", "loops", ")", ":", "if", "(", "len", "(", "loops", ")", "==", "1", ")", ":", "firstLoop", "=", "loops", "[", "0", "]", "if", "(", "firstLoop", ".", "__class__", "==", "list", ")", ":", "return", "firstLoop", "return", "loops" ]
get unpacked loops .
train
false
1,566
def p_assignment_operator(t): pass
[ "def", "p_assignment_operator", "(", "t", ")", ":", "pass" ]
assignment_operator : equals | timesequal | divequal | modequal | plusequal | minusequal | lshiftequal | rshiftequal | andequal | orequal | xorequal .
train
false
1,569
@verbose def tfr_stockwell(inst, fmin=None, fmax=None, n_fft=None, width=1.0, decim=1, return_itc=False, n_jobs=1, verbose=None): data = _get_data(inst, return_itc) picks = pick_types(inst.info, meg=True, eeg=True) info = pick_info(inst.info, picks) data = data[:, picks, :] n_jobs = check_n_jobs(n_jobs) (power, itc, freqs) = _induced_power_stockwell(data, sfreq=info['sfreq'], fmin=fmin, fmax=fmax, n_fft=n_fft, width=width, decim=decim, return_itc=return_itc, n_jobs=n_jobs) times = inst.times[::decim].copy() nave = len(data) out = AverageTFR(info, power, times, freqs, nave, method='stockwell-power') if return_itc: out = (out, AverageTFR(deepcopy(info), itc, times.copy(), freqs.copy(), nave, method='stockwell-itc')) return out
[ "@", "verbose", "def", "tfr_stockwell", "(", "inst", ",", "fmin", "=", "None", ",", "fmax", "=", "None", ",", "n_fft", "=", "None", ",", "width", "=", "1.0", ",", "decim", "=", "1", ",", "return_itc", "=", "False", ",", "n_jobs", "=", "1", ",", "verbose", "=", "None", ")", ":", "data", "=", "_get_data", "(", "inst", ",", "return_itc", ")", "picks", "=", "pick_types", "(", "inst", ".", "info", ",", "meg", "=", "True", ",", "eeg", "=", "True", ")", "info", "=", "pick_info", "(", "inst", ".", "info", ",", "picks", ")", "data", "=", "data", "[", ":", ",", "picks", ",", ":", "]", "n_jobs", "=", "check_n_jobs", "(", "n_jobs", ")", "(", "power", ",", "itc", ",", "freqs", ")", "=", "_induced_power_stockwell", "(", "data", ",", "sfreq", "=", "info", "[", "'sfreq'", "]", ",", "fmin", "=", "fmin", ",", "fmax", "=", "fmax", ",", "n_fft", "=", "n_fft", ",", "width", "=", "width", ",", "decim", "=", "decim", ",", "return_itc", "=", "return_itc", ",", "n_jobs", "=", "n_jobs", ")", "times", "=", "inst", ".", "times", "[", ":", ":", "decim", "]", ".", "copy", "(", ")", "nave", "=", "len", "(", "data", ")", "out", "=", "AverageTFR", "(", "info", ",", "power", ",", "times", ",", "freqs", ",", "nave", ",", "method", "=", "'stockwell-power'", ")", "if", "return_itc", ":", "out", "=", "(", "out", ",", "AverageTFR", "(", "deepcopy", "(", "info", ")", ",", "itc", ",", "times", ".", "copy", "(", ")", ",", "freqs", ".", "copy", "(", ")", ",", "nave", ",", "method", "=", "'stockwell-itc'", ")", ")", "return", "out" ]
time-frequency representation using stockwell transform .
train
false
1,570
def _prepare_report_dir(dir_name): dir_name.rmtree_p() dir_name.mkdir_p()
[ "def", "_prepare_report_dir", "(", "dir_name", ")", ":", "dir_name", ".", "rmtree_p", "(", ")", "dir_name", ".", "mkdir_p", "(", ")" ]
sets a given directory to a created .
train
false
1,572
def build_user_vars(registry, xml_parent, data): XML.SubElement(xml_parent, 'org.jenkinsci.plugins.builduser.BuildUser')
[ "def", "build_user_vars", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "XML", ".", "SubElement", "(", "xml_parent", ",", "'org.jenkinsci.plugins.builduser.BuildUser'", ")" ]
yaml: build-user-vars set environment variables to the value of the user that started the build .
train
false
1,573
def sum_expr(operators): return lo.LinOp(lo.SUM, operators[0].size, operators, None)
[ "def", "sum_expr", "(", "operators", ")", ":", "return", "lo", ".", "LinOp", "(", "lo", ".", "SUM", ",", "operators", "[", "0", "]", ".", "size", ",", "operators", ",", "None", ")" ]
add linear operators .
train
false
1,574
def _get_service_endpoint(context, svc, region=None, public=True): region = _safe_region(region) context = (context or identity) url_type = {True: 'public', False: 'private'}[public] svc_obj = context.services.get(svc) if (not svc_obj): return None ep = svc_obj.endpoints.get(region, {}).get(url_type) if (not ep): ep = svc_obj.endpoints.get('ALL', {}).get(url_type) return ep
[ "def", "_get_service_endpoint", "(", "context", ",", "svc", ",", "region", "=", "None", ",", "public", "=", "True", ")", ":", "region", "=", "_safe_region", "(", "region", ")", "context", "=", "(", "context", "or", "identity", ")", "url_type", "=", "{", "True", ":", "'public'", ",", "False", ":", "'private'", "}", "[", "public", "]", "svc_obj", "=", "context", ".", "services", ".", "get", "(", "svc", ")", "if", "(", "not", "svc_obj", ")", ":", "return", "None", "ep", "=", "svc_obj", ".", "endpoints", ".", "get", "(", "region", ",", "{", "}", ")", ".", "get", "(", "url_type", ")", "if", "(", "not", "ep", ")", ":", "ep", "=", "svc_obj", ".", "endpoints", ".", "get", "(", "'ALL'", ",", "{", "}", ")", ".", "get", "(", "url_type", ")", "return", "ep" ]
parses the services dict to get the proper endpoint for the given service .
train
true
1,575
def examplesfullTest(vm, prompt=Prompt): installPexpect(vm, prompt) vm.sendline('sudo -n python ~/mininet/examples/test/runner.py -v')
[ "def", "examplesfullTest", "(", "vm", ",", "prompt", "=", "Prompt", ")", ":", "installPexpect", "(", "vm", ",", "prompt", ")", "vm", ".", "sendline", "(", "'sudo -n python ~/mininet/examples/test/runner.py -v'", ")" ]
full test of mininet examples .
train
false
1,577
@testing.requires_testing_data def test_dipole_fitting_ctf(): raw_ctf = read_raw_ctf(fname_ctf).set_eeg_reference() events = make_fixed_length_events(raw_ctf, 1) evoked = Epochs(raw_ctf, events, 1, 0, 0, baseline=None).average() cov = make_ad_hoc_cov(evoked.info) sphere = make_sphere_model((0.0, 0.0, 0.0)) fit_dipole(evoked, cov, sphere)
[ "@", "testing", ".", "requires_testing_data", "def", "test_dipole_fitting_ctf", "(", ")", ":", "raw_ctf", "=", "read_raw_ctf", "(", "fname_ctf", ")", ".", "set_eeg_reference", "(", ")", "events", "=", "make_fixed_length_events", "(", "raw_ctf", ",", "1", ")", "evoked", "=", "Epochs", "(", "raw_ctf", ",", "events", ",", "1", ",", "0", ",", "0", ",", "baseline", "=", "None", ")", ".", "average", "(", ")", "cov", "=", "make_ad_hoc_cov", "(", "evoked", ".", "info", ")", "sphere", "=", "make_sphere_model", "(", "(", "0.0", ",", "0.0", ",", "0.0", ")", ")", "fit_dipole", "(", "evoked", ",", "cov", ",", "sphere", ")" ]
test dipole fitting with ctf data .
train
false
1,579
def release(): return uname()[2]
[ "def", "release", "(", ")", ":", "return", "uname", "(", ")", "[", "2", "]" ]
version should be a string like 0 .
train
false
1,580
def net_if_addrs(): has_enums = (sys.version_info >= (3, 4)) if has_enums: import socket rawlist = _psplatform.net_if_addrs() rawlist.sort(key=(lambda x: x[1])) ret = collections.defaultdict(list) for (name, fam, addr, mask, broadcast, ptp) in rawlist: if has_enums: try: fam = socket.AddressFamily(fam) except ValueError: if (WINDOWS and (fam == (-1))): fam = _psplatform.AF_LINK elif (hasattr(_psplatform, 'AF_LINK') and (_psplatform.AF_LINK == fam)): fam = _psplatform.AF_LINK if (fam == _psplatform.AF_LINK): separator = (':' if POSIX else '-') while (addr.count(separator) < 5): addr += ('%s00' % separator) ret[name].append(_common.snic(fam, addr, mask, broadcast, ptp)) return dict(ret)
[ "def", "net_if_addrs", "(", ")", ":", "has_enums", "=", "(", "sys", ".", "version_info", ">=", "(", "3", ",", "4", ")", ")", "if", "has_enums", ":", "import", "socket", "rawlist", "=", "_psplatform", ".", "net_if_addrs", "(", ")", "rawlist", ".", "sort", "(", "key", "=", "(", "lambda", "x", ":", "x", "[", "1", "]", ")", ")", "ret", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "(", "name", ",", "fam", ",", "addr", ",", "mask", ",", "broadcast", ",", "ptp", ")", "in", "rawlist", ":", "if", "has_enums", ":", "try", ":", "fam", "=", "socket", ".", "AddressFamily", "(", "fam", ")", "except", "ValueError", ":", "if", "(", "WINDOWS", "and", "(", "fam", "==", "(", "-", "1", ")", ")", ")", ":", "fam", "=", "_psplatform", ".", "AF_LINK", "elif", "(", "hasattr", "(", "_psplatform", ",", "'AF_LINK'", ")", "and", "(", "_psplatform", ".", "AF_LINK", "==", "fam", ")", ")", ":", "fam", "=", "_psplatform", ".", "AF_LINK", "if", "(", "fam", "==", "_psplatform", ".", "AF_LINK", ")", ":", "separator", "=", "(", "':'", "if", "POSIX", "else", "'-'", ")", "while", "(", "addr", ".", "count", "(", "separator", ")", "<", "5", ")", ":", "addr", "+=", "(", "'%s00'", "%", "separator", ")", "ret", "[", "name", "]", ".", "append", "(", "_common", ".", "snic", "(", "fam", ",", "addr", ",", "mask", ",", "broadcast", ",", "ptp", ")", ")", "return", "dict", "(", "ret", ")" ]
return the addresses associated to each nic installed on the system as a dictionary whose keys are the nic names and value is a list of namedtuples for each address assigned to the nic .
train
false
1,582
def testCount(): deleteMatches() deletePlayers() c = countPlayers() if (c == '0'): raise TypeError("countPlayers should return numeric zero, not string '0'.") if (c != 0): raise ValueError('After deletion, countPlayers should return zero.') print '1. countPlayers() returns 0 after initial deletePlayers() execution.' registerPlayer('Chandra Nalaar') c = countPlayers() if (c != 1): raise ValueError('After one player registers, countPlayers() should be 1. Got {c}'.format(c=c)) print '2. countPlayers() returns 1 after one player is registered.' registerPlayer('Jace Beleren') c = countPlayers() if (c != 2): raise ValueError('After two players register, countPlayers() should be 2. Got {c}'.format(c=c)) print '3. countPlayers() returns 2 after two players are registered.' deletePlayers() c = countPlayers() if (c != 0): raise ValueError('After deletion, countPlayers should return zero.') print '4. countPlayers() returns zero after registered players are deleted.\n5. Player records successfully deleted.'
[ "def", "testCount", "(", ")", ":", "deleteMatches", "(", ")", "deletePlayers", "(", ")", "c", "=", "countPlayers", "(", ")", "if", "(", "c", "==", "'0'", ")", ":", "raise", "TypeError", "(", "\"countPlayers should return numeric zero, not string '0'.\"", ")", "if", "(", "c", "!=", "0", ")", ":", "raise", "ValueError", "(", "'After deletion, countPlayers should return zero.'", ")", "print", "'1. countPlayers() returns 0 after initial deletePlayers() execution.'", "registerPlayer", "(", "'Chandra Nalaar'", ")", "c", "=", "countPlayers", "(", ")", "if", "(", "c", "!=", "1", ")", ":", "raise", "ValueError", "(", "'After one player registers, countPlayers() should be 1. Got {c}'", ".", "format", "(", "c", "=", "c", ")", ")", "print", "'2. countPlayers() returns 1 after one player is registered.'", "registerPlayer", "(", "'Jace Beleren'", ")", "c", "=", "countPlayers", "(", ")", "if", "(", "c", "!=", "2", ")", ":", "raise", "ValueError", "(", "'After two players register, countPlayers() should be 2. Got {c}'", ".", "format", "(", "c", "=", "c", ")", ")", "print", "'3. countPlayers() returns 2 after two players are registered.'", "deletePlayers", "(", ")", "c", "=", "countPlayers", "(", ")", "if", "(", "c", "!=", "0", ")", ":", "raise", "ValueError", "(", "'After deletion, countPlayers should return zero.'", ")", "print", "'4. countPlayers() returns zero after registered players are deleted.\\n5. Player records successfully deleted.'" ]
test for initial player count .
train
false
1,583
def get_collectors_from_module(mod): for attrname in dir(mod): attr = getattr(mod, attrname) if (inspect.isclass(attr) and issubclass(attr, Collector) and (attr != Collector)): if attrname.startswith('parent_'): continue fqcn = '.'.join([mod.__name__, attrname]) try: cls = load_dynamic_class(fqcn, Collector) (yield (cls.__name__, cls)) except Exception: logger.error('Failed to load Collector: %s. %s', fqcn, traceback.format_exc()) continue
[ "def", "get_collectors_from_module", "(", "mod", ")", ":", "for", "attrname", "in", "dir", "(", "mod", ")", ":", "attr", "=", "getattr", "(", "mod", ",", "attrname", ")", "if", "(", "inspect", ".", "isclass", "(", "attr", ")", "and", "issubclass", "(", "attr", ",", "Collector", ")", "and", "(", "attr", "!=", "Collector", ")", ")", ":", "if", "attrname", ".", "startswith", "(", "'parent_'", ")", ":", "continue", "fqcn", "=", "'.'", ".", "join", "(", "[", "mod", ".", "__name__", ",", "attrname", "]", ")", "try", ":", "cls", "=", "load_dynamic_class", "(", "fqcn", ",", "Collector", ")", "(", "yield", "(", "cls", ".", "__name__", ",", "cls", ")", ")", "except", "Exception", ":", "logger", ".", "error", "(", "'Failed to load Collector: %s. %s'", ",", "fqcn", ",", "traceback", ".", "format_exc", "(", ")", ")", "continue" ]
locate all of the collector classes within a given module .
train
true
1,584
def Instance2Str(o, d): if d.has_key(o.__class__): return d[o.__class__](o, d) cl = filter((lambda x, o=o: ((type(x) is types.ClassType) and isinstance(o, x))), d.keys()) if ((not cl) and hasattr(types, 'ObjectType')): cl = filter((lambda x, o=o: ((type(x) is types.TypeType) and isinstance(o, x) and (d[x] is not Instance2Str))), d.keys()) if (not cl): return d[types.StringType](o, d) d[o.__class__] = d[cl[0]] return d[cl[0]](o, d)
[ "def", "Instance2Str", "(", "o", ",", "d", ")", ":", "if", "d", ".", "has_key", "(", "o", ".", "__class__", ")", ":", "return", "d", "[", "o", ".", "__class__", "]", "(", "o", ",", "d", ")", "cl", "=", "filter", "(", "(", "lambda", "x", ",", "o", "=", "o", ":", "(", "(", "type", "(", "x", ")", "is", "types", ".", "ClassType", ")", "and", "isinstance", "(", "o", ",", "x", ")", ")", ")", ",", "d", ".", "keys", "(", ")", ")", "if", "(", "(", "not", "cl", ")", "and", "hasattr", "(", "types", ",", "'ObjectType'", ")", ")", ":", "cl", "=", "filter", "(", "(", "lambda", "x", ",", "o", "=", "o", ":", "(", "(", "type", "(", "x", ")", "is", "types", ".", "TypeType", ")", "and", "isinstance", "(", "o", ",", "x", ")", "and", "(", "d", "[", "x", "]", "is", "not", "Instance2Str", ")", ")", ")", ",", "d", ".", "keys", "(", ")", ")", "if", "(", "not", "cl", ")", ":", "return", "d", "[", "types", ".", "StringType", "]", "(", "o", ",", "d", ")", "d", "[", "o", ".", "__class__", "]", "=", "d", "[", "cl", "[", "0", "]", "]", "return", "d", "[", "cl", "[", "0", "]", "]", "(", "o", ",", "d", ")" ]
convert an instance to a string representation .
train
true
1,586
def search_by_build_id(hex_encoded_id): cache = ((cache_dir + '-libc.so.') + hex_encoded_id) if (os.path.exists(cache) and read(cache).startswith('\x7fELF')): log.info_once(('Using cached data from %r' % cache)) return cache log.info('Downloading data from GitHub') url_base = 'https://gitlab.com/libcdb/libcdb/raw/master/hashes/build_id/' url = urlparse.urljoin(url_base, hex_encoded_id) data = '' while (not data.startswith('\x7fELF')): data = wget(url) if (not data): return None if data.startswith('..'): url = (os.path.dirname(url) + '/') url = urlparse.urljoin(url, data) write(cache, data) return cache
[ "def", "search_by_build_id", "(", "hex_encoded_id", ")", ":", "cache", "=", "(", "(", "cache_dir", "+", "'-libc.so.'", ")", "+", "hex_encoded_id", ")", "if", "(", "os", ".", "path", ".", "exists", "(", "cache", ")", "and", "read", "(", "cache", ")", ".", "startswith", "(", "'\\x7fELF'", ")", ")", ":", "log", ".", "info_once", "(", "(", "'Using cached data from %r'", "%", "cache", ")", ")", "return", "cache", "log", ".", "info", "(", "'Downloading data from GitHub'", ")", "url_base", "=", "'https://gitlab.com/libcdb/libcdb/raw/master/hashes/build_id/'", "url", "=", "urlparse", ".", "urljoin", "(", "url_base", ",", "hex_encoded_id", ")", "data", "=", "''", "while", "(", "not", "data", ".", "startswith", "(", "'\\x7fELF'", ")", ")", ":", "data", "=", "wget", "(", "url", ")", "if", "(", "not", "data", ")", ":", "return", "None", "if", "data", ".", "startswith", "(", "'..'", ")", ":", "url", "=", "(", "os", ".", "path", ".", "dirname", "(", "url", ")", "+", "'/'", ")", "url", "=", "urlparse", ".", "urljoin", "(", "url", ",", "data", ")", "write", "(", "cache", ",", "data", ")", "return", "cache" ]
given a hex-encoded build id .
train
false
1,587
def strtotuple(s): if (not match('^[,.0-9 ()\\[\\]]*$', s)): raise Exception('Invalid characters in string for tuple conversion') if (s.count('(') != s.count(')')): raise Exception('Invalid count of ( and )') if (s.count('[') != s.count(']')): raise Exception('Invalid count of [ and ]') r = eval(s) if (type(r) not in (list, tuple)): raise Exception('Conversion failed') return r
[ "def", "strtotuple", "(", "s", ")", ":", "if", "(", "not", "match", "(", "'^[,.0-9 ()\\\\[\\\\]]*$'", ",", "s", ")", ")", ":", "raise", "Exception", "(", "'Invalid characters in string for tuple conversion'", ")", "if", "(", "s", ".", "count", "(", "'('", ")", "!=", "s", ".", "count", "(", "')'", ")", ")", ":", "raise", "Exception", "(", "'Invalid count of ( and )'", ")", "if", "(", "s", ".", "count", "(", "'['", ")", "!=", "s", ".", "count", "(", "']'", ")", ")", ":", "raise", "Exception", "(", "'Invalid count of [ and ]'", ")", "r", "=", "eval", "(", "s", ")", "if", "(", "type", "(", "r", ")", "not", "in", "(", "list", ",", "tuple", ")", ")", ":", "raise", "Exception", "(", "'Conversion failed'", ")", "return", "r" ]
convert a tuple string into a tuple with some security checks .
train
false
1,588
def decodeName(name): decodedName = name hexNumbers = re.findall('#([0-9a-f]{2})', name, (re.DOTALL | re.IGNORECASE)) for hexNumber in hexNumbers: try: decodedName = decodedName.replace(('#' + hexNumber), chr(int(hexNumber, 16))) except: return ((-1), 'Error decoding name') return (0, decodedName)
[ "def", "decodeName", "(", "name", ")", ":", "decodedName", "=", "name", "hexNumbers", "=", "re", ".", "findall", "(", "'#([0-9a-f]{2})'", ",", "name", ",", "(", "re", ".", "DOTALL", "|", "re", ".", "IGNORECASE", ")", ")", "for", "hexNumber", "in", "hexNumbers", ":", "try", ":", "decodedName", "=", "decodedName", ".", "replace", "(", "(", "'#'", "+", "hexNumber", ")", ",", "chr", "(", "int", "(", "hexNumber", ",", "16", ")", ")", ")", "except", ":", "return", "(", "(", "-", "1", ")", ",", "'Error decoding name'", ")", "return", "(", "0", ",", "decodedName", ")" ]
decode the given pdf name .
train
false
1,589
def RtlGetVersion(os_version_info_struct): rc = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version_info_struct)) if (rc != 0): raise exceptions.WindowsError('Getting Windows version failed.')
[ "def", "RtlGetVersion", "(", "os_version_info_struct", ")", ":", "rc", "=", "ctypes", ".", "windll", ".", "Ntdll", ".", "RtlGetVersion", "(", "ctypes", ".", "byref", "(", "os_version_info_struct", ")", ")", "if", "(", "rc", "!=", "0", ")", ":", "raise", "exceptions", ".", "WindowsError", "(", "'Getting Windows version failed.'", ")" ]
wraps the lowlevel rtlgetversion routine .
train
false
1,590
def parameter_banks(device, device_dict=DEVICE_DICT): if (device != None): if (device.class_name in device_dict.keys()): def names_to_params(bank): return map(partial(get_parameter_by_name, device), bank) return map(names_to_params, device_dict[device.class_name]) else: if (device.class_name in MAX_DEVICES): try: banks = device.get_bank_count() except: banks = 0 if (banks != 0): def _bank_parameters(bank_index): try: parameter_indices = device.get_bank_parameters(bank_index) except: parameter_indices = [] if (len(parameter_indices) != 8): return [None for i in range(0, 8)] else: return [(device.parameters[i] if (i != (-1)) else None) for i in parameter_indices] return map(_bank_parameters, range(0, banks)) return group(device_parameters_to_map(device), 8) return []
[ "def", "parameter_banks", "(", "device", ",", "device_dict", "=", "DEVICE_DICT", ")", ":", "if", "(", "device", "!=", "None", ")", ":", "if", "(", "device", ".", "class_name", "in", "device_dict", ".", "keys", "(", ")", ")", ":", "def", "names_to_params", "(", "bank", ")", ":", "return", "map", "(", "partial", "(", "get_parameter_by_name", ",", "device", ")", ",", "bank", ")", "return", "map", "(", "names_to_params", ",", "device_dict", "[", "device", ".", "class_name", "]", ")", "else", ":", "if", "(", "device", ".", "class_name", "in", "MAX_DEVICES", ")", ":", "try", ":", "banks", "=", "device", ".", "get_bank_count", "(", ")", "except", ":", "banks", "=", "0", "if", "(", "banks", "!=", "0", ")", ":", "def", "_bank_parameters", "(", "bank_index", ")", ":", "try", ":", "parameter_indices", "=", "device", ".", "get_bank_parameters", "(", "bank_index", ")", "except", ":", "parameter_indices", "=", "[", "]", "if", "(", "len", "(", "parameter_indices", ")", "!=", "8", ")", ":", "return", "[", "None", "for", "i", "in", "range", "(", "0", ",", "8", ")", "]", "else", ":", "return", "[", "(", "device", ".", "parameters", "[", "i", "]", "if", "(", "i", "!=", "(", "-", "1", ")", ")", "else", "None", ")", "for", "i", "in", "parameter_indices", "]", "return", "map", "(", "_bank_parameters", ",", "range", "(", "0", ",", "banks", ")", ")", "return", "group", "(", "device_parameters_to_map", "(", "device", ")", ",", "8", ")", "return", "[", "]" ]
determine the parameters to use for a device .
train
false
1,591
def OpenQuickFixList(focus=False, autoclose=False): vim.command(u'botright copen') SetFittingHeightForCurrentWindow() if autoclose: vim.command(u'au WinLeave <buffer> q') if VariableExists(u'#User#YcmQuickFixOpened'): vim.command(u'doautocmd User YcmQuickFixOpened') if (not focus): JumpToPreviousWindow()
[ "def", "OpenQuickFixList", "(", "focus", "=", "False", ",", "autoclose", "=", "False", ")", ":", "vim", ".", "command", "(", "u'botright copen'", ")", "SetFittingHeightForCurrentWindow", "(", ")", "if", "autoclose", ":", "vim", ".", "command", "(", "u'au WinLeave <buffer> q'", ")", "if", "VariableExists", "(", "u'#User#YcmQuickFixOpened'", ")", ":", "vim", ".", "command", "(", "u'doautocmd User YcmQuickFixOpened'", ")", "if", "(", "not", "focus", ")", ":", "JumpToPreviousWindow", "(", ")" ]
open the quickfix list to full width at the bottom of the screen with its height automatically set to fit all entries .
train
false
1,592
def _extract_nested_case(case_node, stmts_list): if isinstance(case_node.stmts[0], (c_ast.Case, c_ast.Default)): stmts_list.append(case_node.stmts.pop()) _extract_nested_case(stmts_list[(-1)], stmts_list)
[ "def", "_extract_nested_case", "(", "case_node", ",", "stmts_list", ")", ":", "if", "isinstance", "(", "case_node", ".", "stmts", "[", "0", "]", ",", "(", "c_ast", ".", "Case", ",", "c_ast", ".", "Default", ")", ")", ":", "stmts_list", ".", "append", "(", "case_node", ".", "stmts", ".", "pop", "(", ")", ")", "_extract_nested_case", "(", "stmts_list", "[", "(", "-", "1", ")", "]", ",", "stmts_list", ")" ]
recursively extract consecutive case statements that are made nested by the parser and add them to the stmts_list .
train
false
1,595
def summarize_exit_codes(exit_codes): for ec in exit_codes: if (ec != 0): return ec return 0
[ "def", "summarize_exit_codes", "(", "exit_codes", ")", ":", "for", "ec", "in", "exit_codes", ":", "if", "(", "ec", "!=", "0", ")", ":", "return", "ec", "return", "0" ]
take a list of exit codes .
train
false
1,596
def disable_key(key_id, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} try: key = conn.disable_key(key_id) r['result'] = True except boto.exception.BotoServerError as e: r['result'] = False r['error'] = __utils__['boto.get_error'](e) return r
[ "def", "disable_key", "(", "key_id", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "r", "=", "{", "}", "try", ":", "key", "=", "conn", ".", "disable_key", "(", "key_id", ")", "r", "[", "'result'", "]", "=", "True", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "r", "[", "'result'", "]", "=", "False", "r", "[", "'error'", "]", "=", "__utils__", "[", "'boto.get_error'", "]", "(", "e", ")", "return", "r" ]
mark key as disabled .
train
true
1,597
def get_drone(hostname): if (hostname == 'localhost'): return _LocalDrone() try: return _RemoteDrone(hostname) except DroneUnreachable: return None
[ "def", "get_drone", "(", "hostname", ")", ":", "if", "(", "hostname", "==", "'localhost'", ")", ":", "return", "_LocalDrone", "(", ")", "try", ":", "return", "_RemoteDrone", "(", "hostname", ")", "except", "DroneUnreachable", ":", "return", "None" ]
use this factory method to get drone objects .
train
false
1,599
def manage_accessed(wrapped): def accessed(session, *arg, **kw): session.accessed = now = int(time.time()) if (session._reissue_time is not None): if ((now - session.renewed) > session._reissue_time): session.changed() return wrapped(session, *arg, **kw) accessed.__doc__ = wrapped.__doc__ return accessed
[ "def", "manage_accessed", "(", "wrapped", ")", ":", "def", "accessed", "(", "session", ",", "*", "arg", ",", "**", "kw", ")", ":", "session", ".", "accessed", "=", "now", "=", "int", "(", "time", ".", "time", "(", ")", ")", "if", "(", "session", ".", "_reissue_time", "is", "not", "None", ")", ":", "if", "(", "(", "now", "-", "session", ".", "renewed", ")", ">", "session", ".", "_reissue_time", ")", ":", "session", ".", "changed", "(", ")", "return", "wrapped", "(", "session", ",", "*", "arg", ",", "**", "kw", ")", "accessed", ".", "__doc__", "=", "wrapped", ".", "__doc__", "return", "accessed" ]
decorator which causes a cookie to be renewed when an accessor method is called .
train
false
1,600
def get_legal(state): feature = np.zeros((1, state.size, state.size)) for (x, y) in state.get_legal_moves(): feature[(0, x, y)] = 1 return feature
[ "def", "get_legal", "(", "state", ")", ":", "feature", "=", "np", ".", "zeros", "(", "(", "1", ",", "state", ".", "size", ",", "state", ".", "size", ")", ")", "for", "(", "x", ",", "y", ")", "in", "state", ".", "get_legal_moves", "(", ")", ":", "feature", "[", "(", "0", ",", "x", ",", "y", ")", "]", "=", "1", "return", "feature" ]
zero at all illegal moves .
train
false