repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
numenta/htmresearch
htmresearch/support/expsuite.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/expsuite.py#L426-L490
def browse(self): """ go through all subfolders (starting at '.') and return information about the existing experiments. if the -B option is given, all parameters are shown, -b only displays the most important ones. this function does *not* execute any experiments. """ for d in self.get_exps('.'): params = self.get_params(d) name = params['name'] basename = name.split('/')[0] # if -e option is used, only show requested experiments if self.options.experiments and basename not in self.options.experiments: continue fullpath = os.path.join(params['path'], name) # calculate progress prog = 0 for i in range(params['repetitions']): prog += progress(params, i) prog /= params['repetitions'] # if progress flag is set, only show the progress bars if self.options.progress: bar = "[" bar += "="*int(prog/4) bar += " "*int(25-prog/4) bar += "]" print '%3i%% %27s %s'%(prog,bar,d) continue print '%16s %s'%('experiment', d) try: minfile = min( (os.path.join(dirname, filename) for dirname, dirnames, filenames in os.walk(fullpath) for filename in filenames if filename.endswith(('.log', '.cfg'))), key=lambda fn: os.stat(fn).st_mtime) maxfile = max( (os.path.join(dirname, filename) for dirname, dirnames, filenames in os.walk(fullpath) for filename in filenames if filename.endswith(('.log', '.cfg'))), key=lambda fn: os.stat(fn).st_mtime) except ValueError: print ' started %s'%'not yet' else: print ' started %s'%time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.stat(minfile).st_mtime)) print ' ended %s'%time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.stat(maxfile).st_mtime)) for k in ['repetitions', 'iterations']: print '%16s %s'%(k, params[k]) print '%16s %i%%'%('progress', prog) if self.options.browse_big: # more verbose output for p in [p for p in params if p not in ('repetitions', 'iterations', 'path', 'name')]: print '%16s %s'%(p, params[p]) print
[ "def", "browse", "(", "self", ")", ":", "for", "d", "in", "self", ".", "get_exps", "(", "'.'", ")", ":", "params", "=", "self", ".", "get_params", "(", "d", ")", "name", "=", "params", "[", "'name'", "]", "basename", "=", "name", ".", "split", "(...
go through all subfolders (starting at '.') and return information about the existing experiments. if the -B option is given, all parameters are shown, -b only displays the most important ones. this function does *not* execute any experiments.
[ "go", "through", "all", "subfolders", "(", "starting", "at", ".", ")", "and", "return", "information", "about", "the", "existing", "experiments", ".", "if", "the", "-", "B", "option", "is", "given", "all", "parameters", "are", "shown", "-", "b", "only", ...
python
train
mfcloud/python-zvm-sdk
smtLayer/powerVM.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/powerVM.py#L390-L484
def parseCmdline(rh): """ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.parseCmdline") if rh.totalParms >= 2: rh.userid = rh.request[1].upper() else: # Userid is missing. msg = msgs.msg['0010'][1] % modId rh.printLn("ES", msg) rh.updateResults(msgs.msg['0010'][0]) rh.printSysLog("Exit powerVM.parseCmdLine, rc: " + rh.results['overallRC']) return rh.results['overallRC'] if rh.totalParms == 2: rh.subfunction = rh.userid rh.userid = '' if rh.totalParms >= 3: rh.subfunction = rh.request[2].upper() # Verify the subfunction is valid. if rh.subfunction not in subfuncHandler: # Subfunction is missing. subList = ', '.join(sorted(subfuncHandler.keys())) msg = msgs.msg['0011'][1] % (modId, subList) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0011'][0]) # Parse the rest of the command line. if rh.results['overallRC'] == 0: rh.argPos = 3 # Begin Parsing at 4th operand generalUtils.parseCmdline(rh, posOpsList, keyOpsList) waiting = 0 if rh.results['overallRC'] == 0: if rh.subfunction == 'WAIT': waiting = 1 if rh.parms['desiredState'] not in vmOSUpDownStates: # Desired state is not: down, off, on or up. msg = msgs.msg['0013'][1] % (modId, rh.parms['desiredState'], ", ".join(vmOSUpDownStates)) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0013'][0]) if (rh.results['overallRC'] == 0 and 'wait' in rh.parms): waiting = 1 if 'desiredState' not in rh.parms: if rh.subfunction in ['ON', 'RESET', 'REBOOT']: rh.parms['desiredState'] = 'up' else: # OFF and SOFTOFF default to 'off'. rh.parms['desiredState'] = 'off' if rh.results['overallRC'] == 0 and waiting == 1: if rh.subfunction == 'ON' or rh.subfunction == 'RESET': if ('desiredState' not in rh.parms or rh.parms['desiredState'] not in vmOSUpStates): # Desired state is not: on or up. msg = msgs.msg['0013'][1] % (modId, rh.parms['desiredState'], ", ".join(vmOSUpStates)) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0013'][0]) if rh.results['overallRC'] == 0: if 'maxWait' not in rh.parms: rh.parms['maxWait'] = 300 if 'poll' not in rh.parms: rh.parms['poll'] = 15 rh.parms['maxQueries'] = (rh.parms['maxWait'] + rh.parms['poll'] - 1) / rh.parms['poll'] # If we had to do some rounding, give a warning # out to the command line user that the wait # won't be what they expected. if rh.parms['maxWait'] % rh.parms['poll'] != 0: msg = msgs.msg['0017'][1] % (modId, rh.parms['maxWait'], rh.parms['poll'], rh.parms['maxQueries'] * rh.parms['poll'], rh.parms['maxQueries']) rh.printLn("W", msg) rh.printSysLog("Exit powerVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC']
[ "def", "parseCmdline", "(", "rh", ")", ":", "rh", ".", "printSysLog", "(", "\"Enter powerVM.parseCmdline\"", ")", "if", "rh", ".", "totalParms", ">=", "2", ":", "rh", ".", "userid", "=", "rh", ".", "request", "[", "1", "]", ".", "upper", "(", ")", "e...
Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error
[ "Parse", "the", "request", "command", "input", "." ]
python
train
ArchiveTeam/wpull
wpull/processor/rule.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/processor/rule.py#L426-L433
def consult_response_hook(self, item_session: ItemSession) -> Actions: '''Return scripting action when a response ends.''' try: return self.hook_dispatcher.call( PluginFunctions.handle_response, item_session ) except HookDisconnected: return Actions.NORMAL
[ "def", "consult_response_hook", "(", "self", ",", "item_session", ":", "ItemSession", ")", "->", "Actions", ":", "try", ":", "return", "self", ".", "hook_dispatcher", ".", "call", "(", "PluginFunctions", ".", "handle_response", ",", "item_session", ")", "except"...
Return scripting action when a response ends.
[ "Return", "scripting", "action", "when", "a", "response", "ends", "." ]
python
train
edeposit/marcxml2mods
src/marcxml2mods/mods_postprocessor/monograph.py
https://github.com/edeposit/marcxml2mods/blob/7b44157e859b4d2a372f79598ddbf77e43d39812/src/marcxml2mods/mods_postprocessor/monograph.py#L82-L94
def add_uuid(dom, uuid): """ Add ``<mods:identifier>`` with `uuid`. """ mods_tag = get_mods_tag(dom) uuid_tag = dhtmlparser.HTMLElement( "mods:identifier", {"type": "uuid"}, [dhtmlparser.HTMLElement(uuid)] ) insert_tag(uuid_tag, dom.find("mods:identifier"), mods_tag)
[ "def", "add_uuid", "(", "dom", ",", "uuid", ")", ":", "mods_tag", "=", "get_mods_tag", "(", "dom", ")", "uuid_tag", "=", "dhtmlparser", ".", "HTMLElement", "(", "\"mods:identifier\"", ",", "{", "\"type\"", ":", "\"uuid\"", "}", ",", "[", "dhtmlparser", "."...
Add ``<mods:identifier>`` with `uuid`.
[ "Add", "<mods", ":", "identifier", ">", "with", "uuid", "." ]
python
train
facebook/watchman
winbuild/copy-dyn-deps.py
https://github.com/facebook/watchman/blob/d416c249dd8f463dc69fc2691d0f890598c045a9/winbuild/copy-dyn-deps.py#L147-L166
def resolve_dep(self, depname): """ Locate dep in the search path; if found, return its path. If not found in the search path, and the dep is not a system-provided dep, raise an error """ for d in self._search_path: name = os.path.join(d, depname) if self._mock: return name if os.path.exists(name): return name if self.resolve_dep_from_path(depname): # It's a system dep, so skip it return None message = "unable to find %s in %r" % (depname, self._search_path + self._path) print(message) # The conditional is to ease future debugging if True: raise RuntimeError(message) return None
[ "def", "resolve_dep", "(", "self", ",", "depname", ")", ":", "for", "d", "in", "self", ".", "_search_path", ":", "name", "=", "os", ".", "path", ".", "join", "(", "d", ",", "depname", ")", "if", "self", ".", "_mock", ":", "return", "name", "if", ...
Locate dep in the search path; if found, return its path. If not found in the search path, and the dep is not a system-provided dep, raise an error
[ "Locate", "dep", "in", "the", "search", "path", ";", "if", "found", "return", "its", "path", ".", "If", "not", "found", "in", "the", "search", "path", "and", "the", "dep", "is", "not", "a", "system", "-", "provided", "dep", "raise", "an", "error" ]
python
train
tgalal/python-axolotl
axolotl/util/keyhelper.py
https://github.com/tgalal/python-axolotl/blob/0c681af4b756f556e23a9bf961abfbc6f82800cc/axolotl/util/keyhelper.py#L21-L34
def generateIdentityKeyPair(): """ Generate an identity key pair. Clients should only do this once, at install time. @return the generated IdentityKeyPair. """ keyPair = Curve.generateKeyPair() publicKey = IdentityKey(keyPair.getPublicKey()) serialized = '0a21056e8936e8367f768a7bba008ade7cf58407bdc7a6aae293e2c' \ 'b7c06668dcd7d5e12205011524f0c15467100dd603e0d6020f4d293' \ 'edfbcd82129b14a88791ac81365c' serialized = binascii.unhexlify(serialized.encode()) identityKeyPair = IdentityKeyPair(publicKey, keyPair.getPrivateKey()) return identityKeyPair
[ "def", "generateIdentityKeyPair", "(", ")", ":", "keyPair", "=", "Curve", ".", "generateKeyPair", "(", ")", "publicKey", "=", "IdentityKey", "(", "keyPair", ".", "getPublicKey", "(", ")", ")", "serialized", "=", "'0a21056e8936e8367f768a7bba008ade7cf58407bdc7a6aae293e2...
Generate an identity key pair. Clients should only do this once, at install time. @return the generated IdentityKeyPair.
[ "Generate", "an", "identity", "key", "pair", ".", "Clients", "should", "only", "do", "this", "once", "at", "install", "time", "." ]
python
train
square/connect-python-sdk
squareconnect/models/v1_page.py
https://github.com/square/connect-python-sdk/blob/adc1d09e817986cdc607391580f71d6b48ed4066/squareconnect/models/v1_page.py#L116-L132
def page_index(self, page_index): """ Sets the page_index of this V1Page. The page's position in the merchant's list of pages. Always an integer between 0 and 6, inclusive. :param page_index: The page_index of this V1Page. :type: int """ if page_index is None: raise ValueError("Invalid value for `page_index`, must not be `None`") if page_index > 6: raise ValueError("Invalid value for `page_index`, must be a value less than or equal to `6`") if page_index < 0: raise ValueError("Invalid value for `page_index`, must be a value greater than or equal to `0`") self._page_index = page_index
[ "def", "page_index", "(", "self", ",", "page_index", ")", ":", "if", "page_index", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `page_index`, must not be `None`\"", ")", "if", "page_index", ">", "6", ":", "raise", "ValueError", "(", "\"Inval...
Sets the page_index of this V1Page. The page's position in the merchant's list of pages. Always an integer between 0 and 6, inclusive. :param page_index: The page_index of this V1Page. :type: int
[ "Sets", "the", "page_index", "of", "this", "V1Page", ".", "The", "page", "s", "position", "in", "the", "merchant", "s", "list", "of", "pages", ".", "Always", "an", "integer", "between", "0", "and", "6", "inclusive", "." ]
python
train
apache/airflow
airflow/contrib/hooks/gcp_pubsub_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_pubsub_hook.py#L196-L225
def delete_subscription(self, project, subscription, fail_if_not_exists=False): """Deletes a Pub/Sub subscription, if it exists. :param project: the GCP project ID where the subscription exists :type project: str :param subscription: the Pub/Sub subscription name to delete; do not include the ``projects/{project}/subscriptions/`` prefix. :type subscription: str :param fail_if_not_exists: if set, raise an exception if the topic does not exist :type fail_if_not_exists: bool """ service = self.get_conn() full_subscription = _format_subscription(project, subscription) try: service.projects().subscriptions().delete( subscription=full_subscription).execute(num_retries=self.num_retries) except HttpError as e: # Status code 404 indicates that the subscription was not found if str(e.resp['status']) == '404': message = 'Subscription does not exist: {}'.format( full_subscription) self.log.warning(message) if fail_if_not_exists: raise PubSubException(message) else: raise PubSubException( 'Error deleting subscription {}'.format(full_subscription), e)
[ "def", "delete_subscription", "(", "self", ",", "project", ",", "subscription", ",", "fail_if_not_exists", "=", "False", ")", ":", "service", "=", "self", ".", "get_conn", "(", ")", "full_subscription", "=", "_format_subscription", "(", "project", ",", "subscrip...
Deletes a Pub/Sub subscription, if it exists. :param project: the GCP project ID where the subscription exists :type project: str :param subscription: the Pub/Sub subscription name to delete; do not include the ``projects/{project}/subscriptions/`` prefix. :type subscription: str :param fail_if_not_exists: if set, raise an exception if the topic does not exist :type fail_if_not_exists: bool
[ "Deletes", "a", "Pub", "/", "Sub", "subscription", "if", "it", "exists", "." ]
python
test
guaix-ucm/pyemir
emirdrp/tools/display_slitlet_arrangement.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/tools/display_slitlet_arrangement.py#L42-L248
def display_slitlet_arrangement(fileobj, grism=None, spfilter=None, bbox=None, adjust=None, geometry=None, debugplot=0): """Display slitlet arrangment from CSUP keywords in FITS header. Parameters ---------- fileobj : file object FITS or TXT file object. grism : str Grism. grism : str Filter. bbox : tuple of 4 floats If not None, values for xmin, xmax, ymin and ymax. adjust : bool Adjust X range according to minimum and maximum csu_bar_left and csu_bar_right (note that this option is overriden by 'bbox') geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the Qt backend geometry. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot Returns ------- csu_bar_left : list of floats Location (mm) of the left bar for each slitlet. csu_bar_right : list of floats Location (mm) of the right bar for each slitlet, using the same origin employed for csu_bar_left (which is not the value stored in the FITS keywords. csu_bar_slit_center : list of floats Middle point (mm) in between the two bars defining a slitlet. csu_bar_slit_width : list of floats Slitlet width (mm), computed as the distance between the two bars defining the slitlet. """ if fileobj.name[-4:] == ".txt": if grism is None: raise ValueError("Undefined grism!") if spfilter is None: raise ValueError("Undefined filter!") # define CsuConfiguration object csu_config = CsuConfiguration() csu_config._csu_bar_left = [] csu_config._csu_bar_right = [] csu_config._csu_bar_slit_center = [] csu_config._csu_bar_slit_width = [] # since the input filename has been opened with argparse in binary # mode, it is necessary to close it and open it in text mode fileobj.close() # read TXT file with open(fileobj.name, mode='rt') as f: file_content = f.read().splitlines() next_id_bar = 1 for line in file_content: if len(line) > 0: if line[0] not in ['#']: line_contents = line.split() id_bar = int(line_contents[0]) position = float(line_contents[1]) if id_bar == next_id_bar: if id_bar <= EMIR_NBARS: csu_config._csu_bar_left.append(position) next_id_bar = id_bar + EMIR_NBARS else: csu_config._csu_bar_right.append(341.5 - position) next_id_bar = id_bar - EMIR_NBARS + 1 else: raise ValueError("Unexpected id_bar:" + str(id_bar)) # compute slit width and center for i in range(EMIR_NBARS): csu_config._csu_bar_slit_center.append( (csu_config._csu_bar_left[i] + csu_config._csu_bar_right[i])/2 ) csu_config._csu_bar_slit_width.append( csu_config._csu_bar_right[i] - csu_config._csu_bar_left[i] ) else: # read input FITS file hdulist = fits.open(fileobj.name) image_header = hdulist[0].header hdulist.close() # additional info from header grism = image_header['grism'] spfilter = image_header['filter'] # define slitlet arrangement csu_config = CsuConfiguration.define_from_fits(fileobj) # determine calibration if grism in ["J", "OPEN"] and spfilter == "J": wv_parameters = set_wv_parameters("J", "J") elif grism in ["H", "OPEN"] and spfilter == "H": wv_parameters = set_wv_parameters("H", "H") elif grism in ["K", "OPEN"] and spfilter == "Ksp": wv_parameters = set_wv_parameters("Ksp", "K") elif grism in ["LR", "OPEN"] and spfilter == "YJ": wv_parameters = set_wv_parameters("YJ", "LR") elif grism in ["LR", "OPEN"] and spfilter == "HK": wv_parameters = set_wv_parameters("HK", "LR") else: raise ValueError("Invalid grism + filter configuration") crval1 = wv_parameters['poly_crval1_linear'] cdelt1 = wv_parameters['poly_cdelt1_linear'] wvmin_useful = wv_parameters['wvmin_useful'] wvmax_useful = wv_parameters['wvmax_useful'] # display arrangement if abs(debugplot) >= 10: print("slit left right center width min.wave max.wave") print("==== ======= ======= ======= ===== ======== ========") for i in range(EMIR_NBARS): ibar = i + 1 csu_crval1 = crval1(csu_config.csu_bar_slit_center(ibar)) csu_cdelt1 = cdelt1(csu_config.csu_bar_slit_center(ibar)) csu_crvaln = csu_crval1 + (EMIR_NAXIS1 - 1) * csu_cdelt1 if wvmin_useful is not None: csu_crval1 = np.amax([csu_crval1, wvmin_useful]) if wvmax_useful is not None: csu_crvaln = np.amin([csu_crvaln, wvmax_useful]) print("{0:4d} {1:8.3f} {2:8.3f} {3:8.3f} {4:7.3f} " "{5:8.2f} {6:8.2f}".format( ibar, csu_config.csu_bar_left(ibar), csu_config.csu_bar_right(ibar), csu_config.csu_bar_slit_center(ibar), csu_config.csu_bar_slit_width(ibar), csu_crval1, csu_crvaln) ) print( "---> {0:8.3f} {1:8.3f} {2:8.3f} {3:7.3f} <- mean (all)".format( np.mean(csu_config._csu_bar_left), np.mean(csu_config._csu_bar_right), np.mean(csu_config._csu_bar_slit_center), np.mean(csu_config._csu_bar_slit_width) ) ) print( "---> {0:8.3f} {1:8.3f} {2:8.3f} {3:7.3f} <- mean (odd)".format( np.mean(csu_config._csu_bar_left[::2]), np.mean(csu_config._csu_bar_right[::2]), np.mean(csu_config._csu_bar_slit_center[::2]), np.mean(csu_config._csu_bar_slit_width[::2]) ) ) print( "---> {0:8.3f} {1:8.3f} {2:8.3f} {3:7.3f} <- mean (even)".format( np.mean(csu_config._csu_bar_left[1::2]), np.mean(csu_config._csu_bar_right[1::2]), np.mean(csu_config._csu_bar_slit_center[1::2]), np.mean(csu_config._csu_bar_slit_width[1::2]) ) ) # display slit arrangement if abs(debugplot) % 10 != 0: fig = plt.figure() set_window_geometry(geometry) ax = fig.add_subplot(111) if bbox is None: if adjust: xmin = min(csu_config._csu_bar_left) xmax = max(csu_config._csu_bar_right) dx = xmax - xmin if dx == 0: dx = 1 xmin -= dx/20 xmax += dx/20 ax.set_xlim(xmin, xmax) else: ax.set_xlim(0., 341.5) ax.set_ylim(0, 56) else: ax.set_xlim(bbox[0], bbox[1]) ax.set_ylim(bbox[2], bbox[3]) ax.set_xlabel('csu_bar_position (mm)') ax.set_ylabel('slit number') for i in range(EMIR_NBARS): ibar = i + 1 ax.add_patch(patches.Rectangle( (csu_config.csu_bar_left(ibar), ibar-0.5), csu_config.csu_bar_slit_width(ibar), 1.0)) ax.plot([0., csu_config.csu_bar_left(ibar)], [ibar, ibar], '-', color='gray') ax.plot([csu_config.csu_bar_right(ibar), 341.5], [ibar, ibar], '-', color='gray') plt.title("File: " + fileobj.name + "\ngrism=" + grism + ", filter=" + spfilter) pause_debugplot(debugplot, pltshow=True) # return results return csu_config._csu_bar_left, csu_config._csu_bar_right, \ csu_config._csu_bar_slit_center, csu_config._csu_bar_slit_width
[ "def", "display_slitlet_arrangement", "(", "fileobj", ",", "grism", "=", "None", ",", "spfilter", "=", "None", ",", "bbox", "=", "None", ",", "adjust", "=", "None", ",", "geometry", "=", "None", ",", "debugplot", "=", "0", ")", ":", "if", "fileobj", "....
Display slitlet arrangment from CSUP keywords in FITS header. Parameters ---------- fileobj : file object FITS or TXT file object. grism : str Grism. grism : str Filter. bbox : tuple of 4 floats If not None, values for xmin, xmax, ymin and ymax. adjust : bool Adjust X range according to minimum and maximum csu_bar_left and csu_bar_right (note that this option is overriden by 'bbox') geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the Qt backend geometry. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot Returns ------- csu_bar_left : list of floats Location (mm) of the left bar for each slitlet. csu_bar_right : list of floats Location (mm) of the right bar for each slitlet, using the same origin employed for csu_bar_left (which is not the value stored in the FITS keywords. csu_bar_slit_center : list of floats Middle point (mm) in between the two bars defining a slitlet. csu_bar_slit_width : list of floats Slitlet width (mm), computed as the distance between the two bars defining the slitlet.
[ "Display", "slitlet", "arrangment", "from", "CSUP", "keywords", "in", "FITS", "header", "." ]
python
train
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L1201-L1204
def fix_w291(self, result): """Remove trailing whitespace.""" fixed_line = self.source[result['line'] - 1].rstrip() self.source[result['line'] - 1] = fixed_line + '\n'
[ "def", "fix_w291", "(", "self", ",", "result", ")", ":", "fixed_line", "=", "self", ".", "source", "[", "result", "[", "'line'", "]", "-", "1", "]", ".", "rstrip", "(", ")", "self", ".", "source", "[", "result", "[", "'line'", "]", "-", "1", "]",...
Remove trailing whitespace.
[ "Remove", "trailing", "whitespace", "." ]
python
train
revelc/pyaccumulo
pyaccumulo/proxy/AccumuloProxy.py
https://github.com/revelc/pyaccumulo/blob/8adcf535bb82ba69c749efce785c9efc487e85de/pyaccumulo/proxy/AccumuloProxy.py#L2735-L2743
def revokeSystemPermission(self, login, user, perm): """ Parameters: - login - user - perm """ self.send_revokeSystemPermission(login, user, perm) self.recv_revokeSystemPermission()
[ "def", "revokeSystemPermission", "(", "self", ",", "login", ",", "user", ",", "perm", ")", ":", "self", ".", "send_revokeSystemPermission", "(", "login", ",", "user", ",", "perm", ")", "self", ".", "recv_revokeSystemPermission", "(", ")" ]
Parameters: - login - user - perm
[ "Parameters", ":", "-", "login", "-", "user", "-", "perm" ]
python
train
profitbricks/profitbricks-sdk-python
profitbricks/client.py
https://github.com/profitbricks/profitbricks-sdk-python/blob/2c804b141688eccb07d6ae56601d5c60a62abebd/profitbricks/client.py#L80-L96
def _read_config(self, filename=None): """ Read the user configuration """ if filename: self._config_filename = filename else: try: import appdirs except ImportError: raise Exception("Missing dependency for determining config path. Please install " "the 'appdirs' Python module.") self._config_filename = appdirs.user_config_dir(_LIBRARY_NAME, "ProfitBricks") + ".ini" if not self._config: self._config = configparser.ConfigParser() self._config.optionxform = str self._config.read(self._config_filename)
[ "def", "_read_config", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "filename", ":", "self", ".", "_config_filename", "=", "filename", "else", ":", "try", ":", "import", "appdirs", "except", "ImportError", ":", "raise", "Exception", "(", "\"M...
Read the user configuration
[ "Read", "the", "user", "configuration" ]
python
valid
coleifer/irc
irc.py
https://github.com/coleifer/irc/blob/f9d2bd6369aafe6cb0916c9406270ca8ecea2080/irc.py#L87-L105
def connect(self): """\ Connect to the IRC server using the nickname """ self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if self.use_ssl: self._sock = ssl.wrap_socket(self._sock) try: self._sock.connect((self.server, self.port)) except socket.error: self.logger.error('Unable to connect to %s on port %d' % (self.server, self.port), exc_info=1) return False self._sock_file = self._sock.makefile() if self.password: self.set_password() self.register_nick() self.register() return True
[ "def", "connect", "(", "self", ")", ":", "self", ".", "_sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "if", "self", ".", "use_ssl", ":", "self", ".", "_sock", "=", "ssl", ".", "wrap_socket...
\ Connect to the IRC server using the nickname
[ "\\", "Connect", "to", "the", "IRC", "server", "using", "the", "nickname" ]
python
test
tensorflow/probability
tensorflow_probability/python/mcmc/internal/util.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/internal/util.py#L221-L244
def maybe_call_fn_and_grads(fn, fn_arg_list, result=None, grads=None, check_non_none_grads=True, name=None): """Calls `fn` and computes the gradient of the result wrt `args_list`.""" with tf.compat.v1.name_scope(name, 'maybe_call_fn_and_grads', [fn_arg_list, result, grads]): fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list) else [fn_arg_list]) result, grads = _value_and_gradients(fn, fn_arg_list, result, grads) if not all(r.dtype.is_floating for r in (result if is_list_like(result) else [result])): # pylint: disable=superfluous-parens raise TypeError('Function result must be a `Tensor` with `float` ' '`dtype`.') if len(fn_arg_list) != len(grads): raise ValueError('Function args must be in one-to-one correspondence ' 'with grads.') if check_non_none_grads and any(g is None for g in grads): raise ValueError('Encountered `None` gradient.\n' ' fn_arg_list: {}\n' ' grads: {}'.format(fn_arg_list, grads)) return result, grads
[ "def", "maybe_call_fn_and_grads", "(", "fn", ",", "fn_arg_list", ",", "result", "=", "None", ",", "grads", "=", "None", ",", "check_non_none_grads", "=", "True", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope...
Calls `fn` and computes the gradient of the result wrt `args_list`.
[ "Calls", "fn", "and", "computes", "the", "gradient", "of", "the", "result", "wrt", "args_list", "." ]
python
test
neherlab/treetime
treetime/node_interpolator.py
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/node_interpolator.py#L165-L281
def convolve(cls, node_interp, branch_interp, max_or_integral='integral', n_grid_points = ttconf.NODE_GRID_SIZE, n_integral=ttconf.N_INTEGRAL, inverse_time=True, rel_tol=0.05, yc=10): ''' calculate H(t) = \int_tau f(t-tau)g(tau) if inverse_time=True H(t) = \int_tau f(t+tau)g(tau) if inverse_time=False This function determines the time points of the grid of the result to ensure an accurate approximation. ''' if max_or_integral not in ['max', 'integral']: raise Exception("Max_or_integral expected to be 'max' or 'integral', got " + str(max_or_integral) + " instead.") def conv_in_point(time_point): if max_or_integral == 'integral': # compute integral of the convolution return _evaluate_convolution(time_point, node_interp, branch_interp, n_integral=n_integral, return_log=True, inverse_time = inverse_time) else: # compute max of the convolution return _max_of_integrand(time_point, node_interp, branch_interp, return_log=True, inverse_time = inverse_time) # estimate peak and width joint_fwhm = (node_interp.fwhm + branch_interp.fwhm) min_fwhm = min(node_interp.fwhm, branch_interp.fwhm) # determine support of the resulting convolution # in order to be positive, the flipped support of f, shifted by t and g need to overlap if inverse_time: new_peak_pos = node_interp.peak_pos + branch_interp.peak_pos tmin = node_interp.xmin+branch_interp.xmin tmax = node_interp.xmax+branch_interp.xmax else: new_peak_pos = node_interp.peak_pos - branch_interp.peak_pos tmin = node_interp.xmin - branch_interp.xmax tmax = node_interp.xmax - branch_interp.xmin # make initial node grid consisting of linearly spaced points around # the center and quadratically spaced points at either end n = n_grid_points/3 center_width = 3*joint_fwhm grid_center = new_peak_pos + np.linspace(-1, 1, n)*center_width # add the right and left grid if it is needed right_range = (tmax - grid_center[-1]) if right_range>4*center_width: grid_right = grid_center[-1] + right_range*(np.linspace(0, 1, n)**2.0) elif right_range>0: # use linear grid the right_range is comparable to center_width grid_right = grid_center[-1] + right_range*np.linspace(0,1, int(min(n,1+0.5*n*right_range/center_width))) else: grid_right =[] left_range = grid_center[0]-tmin if left_range>4*center_width: grid_left = tmin + left_range*(np.linspace(0, 1, n)**2.0) elif left_range>0: grid_left = tmin + left_range*np.linspace(0,1, int(min(n,1+0.5*n*left_range/center_width))) else: grid_left =[] if tmin>-1: grid_zero_left = tmin + (tmax-tmin)*np.linspace(0,0.01,11)**2 else: grid_zero_left = [tmin] if tmax<1: grid_zero_right = tmax - (tmax-tmin)*np.linspace(0,0.01,11)**2 else: grid_zero_right = [tmax] # make grid and calculate convolution t_grid_0 = np.unique(np.concatenate([grid_zero_left, grid_left[:-1], grid_center, grid_right[1:], grid_zero_right])) t_grid_0 = t_grid_0[(t_grid_0 > tmin-ttconf.TINY_NUMBER) & (t_grid_0 < tmax+ttconf.TINY_NUMBER)] # res0 - the values of the convolution (integral or max) # t_0 - the value, at which the res0 achieves maximum # (when determining the maximum of the integrand, otherwise meaningless) res_0, t_0 = np.array([conv_in_point(t_val) for t_val in t_grid_0]).T # refine grid as necessary and add new points # calculate interpolation error at all internal points [2:-2] bc end points are sometime off scale interp_error = np.abs(res_0[3:-1]+res_0[1:-3]-2*res_0[2:-2]) # determine the number of extra points needed, criterion depends on distance from peak dy dy = (res_0[2:-2]-res_0.min()) dx = np.diff(t_grid_0) refine_factor = np.minimum(np.minimum(np.array(np.floor(np.sqrt(interp_error/(rel_tol*(1+(dy/yc)**4)))), dtype=int), np.array(100*(dx[1:-2]+dx[2:-1])/min_fwhm, dtype=int)), 10) insert_point_idx = np.zeros(interp_error.shape[0]+1, dtype=int) insert_point_idx[1:] = refine_factor insert_point_idx[:-1] += refine_factor # add additional points if there are any to add if np.sum(insert_point_idx): add_x = np.concatenate([np.linspace(t1,t2,n+2)[1:-1] for t1,t2,n in zip(t_grid_0[1:-2], t_grid_0[2:-1], insert_point_idx) if n>0]) # calculate convolution at these points add_y, add_t = np.array([conv_in_point(t_val) for t_val in add_x]).T t_grid_0 = np.concatenate((t_grid_0, add_x)) res_0 = np.concatenate ((res_0, add_y)) t_0 = np.concatenate ((t_0, add_t)) # instantiate the new interpolation object and return res_y = cls(t_grid_0, res_0, is_log=True, kind='linear') # the interpolation object, which is used to store the value of the # grid, which maximizes the convolution (for 'max' option), # or flat -1 distribution (for 'integral' option) # this grid is the optimal branch length res_t = Distribution(t_grid_0, t_0, is_log=True, min_width=node_interp.min_width, kind='linear') return res_y, res_t
[ "def", "convolve", "(", "cls", ",", "node_interp", ",", "branch_interp", ",", "max_or_integral", "=", "'integral'", ",", "n_grid_points", "=", "ttconf", ".", "NODE_GRID_SIZE", ",", "n_integral", "=", "ttconf", ".", "N_INTEGRAL", ",", "inverse_time", "=", "True",...
calculate H(t) = \int_tau f(t-tau)g(tau) if inverse_time=True H(t) = \int_tau f(t+tau)g(tau) if inverse_time=False This function determines the time points of the grid of the result to ensure an accurate approximation.
[ "calculate", "H", "(", "t", ")", "=", "\\", "int_tau", "f", "(", "t", "-", "tau", ")", "g", "(", "tau", ")", "if", "inverse_time", "=", "True", "H", "(", "t", ")", "=", "\\", "int_tau", "f", "(", "t", "+", "tau", ")", "g", "(", "tau", ")", ...
python
test
CityOfZion/neo-python
neo/Core/State/SpentCoinState.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/State/SpentCoinState.py#L172-L187
def Serialize(self, writer): """ Serialize full object. Args: writer (neo.IO.BinaryWriter): """ super(SpentCoinState, self).Serialize(writer) writer.WriteUInt256(self.TransactionHash) writer.WriteUInt32(self.TransactionHeight) writer.WriteVarInt(len(self.Items)) for item in self.Items: writer.WriteUInt16(item.index) writer.WriteUInt32(item.height)
[ "def", "Serialize", "(", "self", ",", "writer", ")", ":", "super", "(", "SpentCoinState", ",", "self", ")", ".", "Serialize", "(", "writer", ")", "writer", ".", "WriteUInt256", "(", "self", ".", "TransactionHash", ")", "writer", ".", "WriteUInt32", "(", ...
Serialize full object. Args: writer (neo.IO.BinaryWriter):
[ "Serialize", "full", "object", "." ]
python
train
infothrill/python-dyndnsc
dyndnsc/updater/afraid.py
https://github.com/infothrill/python-dyndnsc/blob/2196d48aa6098da9835a7611fbdb0b5f0fbf51e4/dyndnsc/updater/afraid.py#L60-L74
def compute_auth_key(userid, password): """ Compute the authentication key for freedns.afraid.org. This is the SHA1 hash of the string b'userid|password'. :param userid: ascii username :param password: ascii password :return: ascii authentication key (SHA1 at this point) """ import sys if sys.version_info >= (3, 0): return hashlib.sha1(b"|".join((userid.encode("ascii"), # noqa: S303 password.encode("ascii")))).hexdigest() return hashlib.sha1("|".join((userid, password))).hexdigest()
[ "def", "compute_auth_key", "(", "userid", ",", "password", ")", ":", "import", "sys", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ":", "return", "hashlib", ".", "sha1", "(", "b\"|\"", ".", "join", "(", "(", "userid", ".", "encode...
Compute the authentication key for freedns.afraid.org. This is the SHA1 hash of the string b'userid|password'. :param userid: ascii username :param password: ascii password :return: ascii authentication key (SHA1 at this point)
[ "Compute", "the", "authentication", "key", "for", "freedns", ".", "afraid", ".", "org", "." ]
python
train
20c/xbahn
xbahn/engineer.py
https://github.com/20c/xbahn/blob/afb27b0576841338a366d7cac0200a782bd84be6/xbahn/engineer.py#L214-L241
def make_command(self, ctx, name, info): """ make click sub-command from command info gotten from xbahn engineer """ @self.command() @click.option("--debug/--no-debug", default=False, help="Show debug information") @doc(info.get("description")) def func(*args, **kwargs): if "debug" in kwargs: del kwargs["debug"] fn = getattr(ctx.widget, name) result = fn(*args, **kwargs) click.echo("%s: %s> %s" % (ctx.params["host"],name,result)) ctx.conn.close() ctx.info_name = "%s %s" % (ctx.info_name , ctx.params["host"]) for a in info.get("arguments",[]): deco = click.argument(*a["args"], **a["kwargs"]) func = deco(func) for o in info.get("options",[]): deco = click.option(*o["args"], **o["kwargs"]) func = deco(func) return func
[ "def", "make_command", "(", "self", ",", "ctx", ",", "name", ",", "info", ")", ":", "@", "self", ".", "command", "(", ")", "@", "click", ".", "option", "(", "\"--debug/--no-debug\"", ",", "default", "=", "False", ",", "help", "=", "\"Show debug informati...
make click sub-command from command info gotten from xbahn engineer
[ "make", "click", "sub", "-", "command", "from", "command", "info", "gotten", "from", "xbahn", "engineer" ]
python
train
peterbrittain/asciimatics
asciimatics/screen.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/screen.py#L763-L793
def highlight(self, x, y, w, h, fg=None, bg=None, blend=100): """ Highlight a specified section of the screen. :param x: The column (x coord) for the start of the highlight. :param y: The line (y coord) for the start of the highlight. :param w: The width of the highlight (in characters). :param h: The height of the highlight (in characters). :param fg: The foreground colour of the highlight. :param bg: The background colour of the highlight. :param blend: How much (as a percentage) to take of the new colour when blending. The colours and attributes are the COLOUR_xxx and A_yyy constants defined in the Screen class. If fg or bg are None that means don't change the foreground/background as appropriate. """ # Convert to buffer coordinates y -= self._start_line for i in range(w): if x + i >= self.width or x + i < 0: continue for j in range(h): if y + j >= self._buffer_height or y + j < 0: continue old = self._buffer.get(x + i, y + j) new_bg = self._blend(bg, old[3], blend) new_fg = self._blend(fg, old[1], blend) self._buffer.set(x + i, y + j, (old[0], new_fg, old[2], new_bg, old[4]))
[ "def", "highlight", "(", "self", ",", "x", ",", "y", ",", "w", ",", "h", ",", "fg", "=", "None", ",", "bg", "=", "None", ",", "blend", "=", "100", ")", ":", "# Convert to buffer coordinates", "y", "-=", "self", ".", "_start_line", "for", "i", "in",...
Highlight a specified section of the screen. :param x: The column (x coord) for the start of the highlight. :param y: The line (y coord) for the start of the highlight. :param w: The width of the highlight (in characters). :param h: The height of the highlight (in characters). :param fg: The foreground colour of the highlight. :param bg: The background colour of the highlight. :param blend: How much (as a percentage) to take of the new colour when blending. The colours and attributes are the COLOUR_xxx and A_yyy constants defined in the Screen class. If fg or bg are None that means don't change the foreground/background as appropriate.
[ "Highlight", "a", "specified", "section", "of", "the", "screen", "." ]
python
train
python-wink/python-wink
src/pywink/devices/siren.py
https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/siren.py#L117-L130
def set_chime(self, sound, cycles=None): """ :param sound: a str, one of ["doorbell", "fur_elise", "doorbell_extended", "alert", "william_tell", "rondo_alla_turca", "police_siren", ""evacuation", "beep_beep", "beep", "inactive"] :param cycles: Undocumented seems to have no effect? :return: nothing """ desired_state = {"activate_chime": sound} if cycles is not None: desired_state.update({"chime_cycles": cycles}) response = self.api_interface.set_device_state(self, {"desired_state": desired_state}) self._update_state_from_response(response)
[ "def", "set_chime", "(", "self", ",", "sound", ",", "cycles", "=", "None", ")", ":", "desired_state", "=", "{", "\"activate_chime\"", ":", "sound", "}", "if", "cycles", "is", "not", "None", ":", "desired_state", ".", "update", "(", "{", "\"chime_cycles\"",...
:param sound: a str, one of ["doorbell", "fur_elise", "doorbell_extended", "alert", "william_tell", "rondo_alla_turca", "police_siren", ""evacuation", "beep_beep", "beep", "inactive"] :param cycles: Undocumented seems to have no effect? :return: nothing
[ ":", "param", "sound", ":", "a", "str", "one", "of", "[", "doorbell", "fur_elise", "doorbell_extended", "alert", "william_tell", "rondo_alla_turca", "police_siren", "evacuation", "beep_beep", "beep", "inactive", "]", ":", "param", "cycles", ":", "Undocumented", "s...
python
train
neurosnap/mudicom
mudicom/image.py
https://github.com/neurosnap/mudicom/blob/04011967007409f0c5253b4f308f53a7b0fc99c6/mudicom/image.py#L29-L37
def numpy(self): """ Grabs image data and converts it to a numpy array """ # load GDCM's image reading functionality image_reader = gdcm.ImageReader() image_reader.SetFileName(self.fname) if not image_reader.Read(): raise IOError("Could not read DICOM image") pixel_array = self._gdcm_to_numpy(image_reader.GetImage()) return pixel_array
[ "def", "numpy", "(", "self", ")", ":", "# load GDCM's image reading functionality", "image_reader", "=", "gdcm", ".", "ImageReader", "(", ")", "image_reader", ".", "SetFileName", "(", "self", ".", "fname", ")", "if", "not", "image_reader", ".", "Read", "(", ")...
Grabs image data and converts it to a numpy array
[ "Grabs", "image", "data", "and", "converts", "it", "to", "a", "numpy", "array" ]
python
train
Qiskit/qiskit-terra
qiskit/transpiler/coupling.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/transpiler/coupling.py#L109-L113
def physical_qubits(self): """Returns a sorted list of physical_qubits""" if self._qubit_list is None: self._qubit_list = sorted([pqubit for pqubit in self.graph.nodes]) return self._qubit_list
[ "def", "physical_qubits", "(", "self", ")", ":", "if", "self", ".", "_qubit_list", "is", "None", ":", "self", ".", "_qubit_list", "=", "sorted", "(", "[", "pqubit", "for", "pqubit", "in", "self", ".", "graph", ".", "nodes", "]", ")", "return", "self", ...
Returns a sorted list of physical_qubits
[ "Returns", "a", "sorted", "list", "of", "physical_qubits" ]
python
test
bcbio/bcbio-nextgen
bcbio/workflow/template.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L495-L504
def _convert_to_relpaths(data, work_dir): """Convert absolute paths in the input data to relative paths to the work directory. """ work_dir = os.path.abspath(work_dir) data["files"] = [os.path.relpath(f, work_dir) for f in data["files"]] for topk in ["metadata", "algorithm"]: for k, v in data[topk].items(): if isinstance(v, six.string_types) and os.path.isfile(v) and os.path.isabs(v): data[topk][k] = os.path.relpath(v, work_dir) return data
[ "def", "_convert_to_relpaths", "(", "data", ",", "work_dir", ")", ":", "work_dir", "=", "os", ".", "path", ".", "abspath", "(", "work_dir", ")", "data", "[", "\"files\"", "]", "=", "[", "os", ".", "path", ".", "relpath", "(", "f", ",", "work_dir", ")...
Convert absolute paths in the input data to relative paths to the work directory.
[ "Convert", "absolute", "paths", "in", "the", "input", "data", "to", "relative", "paths", "to", "the", "work", "directory", "." ]
python
train
Esri/ArcREST
src/arcrest/common/domain.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/domain.py#L71-L77
def removeCodedValue(self, name): """removes a codedValue by name""" for i in self._codedValues: if i['name'] == name: self._codedValues.remove(i) return True return False
[ "def", "removeCodedValue", "(", "self", ",", "name", ")", ":", "for", "i", "in", "self", ".", "_codedValues", ":", "if", "i", "[", "'name'", "]", "==", "name", ":", "self", ".", "_codedValues", ".", "remove", "(", "i", ")", "return", "True", "return"...
removes a codedValue by name
[ "removes", "a", "codedValue", "by", "name" ]
python
train
getsentry/rb
rb/clients.py
https://github.com/getsentry/rb/blob/569d1d13311f6c04bae537fc17e75da430e4ec45/rb/clients.py#L549-L565
def map(self, timeout=None, max_concurrency=64, auto_batch=None): """Returns a context manager for a map operation. This runs multiple queries in parallel and then joins in the end to collect all results. In the context manager the client available is a :class:`MappingClient`. Example usage:: results = {} with cluster.map() as client: for key in keys_to_fetch: results[key] = client.get(key) for key, promise in results.iteritems(): print '%s => %s' % (key, promise.value) """ return MapManager(self.get_mapping_client(max_concurrency, auto_batch), timeout=timeout)
[ "def", "map", "(", "self", ",", "timeout", "=", "None", ",", "max_concurrency", "=", "64", ",", "auto_batch", "=", "None", ")", ":", "return", "MapManager", "(", "self", ".", "get_mapping_client", "(", "max_concurrency", ",", "auto_batch", ")", ",", "timeo...
Returns a context manager for a map operation. This runs multiple queries in parallel and then joins in the end to collect all results. In the context manager the client available is a :class:`MappingClient`. Example usage:: results = {} with cluster.map() as client: for key in keys_to_fetch: results[key] = client.get(key) for key, promise in results.iteritems(): print '%s => %s' % (key, promise.value)
[ "Returns", "a", "context", "manager", "for", "a", "map", "operation", ".", "This", "runs", "multiple", "queries", "in", "parallel", "and", "then", "joins", "in", "the", "end", "to", "collect", "all", "results", "." ]
python
train
cisco-sas/kitty
kitty/model/low_level/encoder.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/model/low_level/encoder.py#L158-L163
def encode(self, value): ''' :param value: value to encode ''' encoded = strToBytes(value) + b'\x00' return Bits(bytes=encoded)
[ "def", "encode", "(", "self", ",", "value", ")", ":", "encoded", "=", "strToBytes", "(", "value", ")", "+", "b'\\x00'", "return", "Bits", "(", "bytes", "=", "encoded", ")" ]
:param value: value to encode
[ ":", "param", "value", ":", "value", "to", "encode" ]
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/management/commands/import.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/management/commands/import.py#L291-L324
async def import_aggregated(self, async_client, pid): """Import the SciObj at {pid}. If the SciObj is a Resource Map, also recursively import the aggregated objects. """ self._logger.info('Importing: {}'.format(pid)) task_set = set() object_info_pyxb = d1_common.types.dataoneTypes.ObjectInfo() object_info_pyxb.identifier = pid task_set.add(self.import_object(async_client, object_info_pyxb)) result_set, task_set = await asyncio.wait(task_set) assert len(result_set) == 1 assert not task_set sysmeta_pyxb = result_set.pop().result() if not sysmeta_pyxb: # Import was skipped return assert d1_common.xml.get_req_val(sysmeta_pyxb.identifier) == pid if d1_gmn.app.did.is_resource_map_db(pid): for member_pid in d1_gmn.app.resource_map.get_resource_map_members_by_map( pid ): self.progress_logger.event("Importing aggregated SciObj") self._logger.info('Importing aggregated SciObj. pid="{}"'.format(pid)) await self.import_aggregated(async_client, member_pid)
[ "async", "def", "import_aggregated", "(", "self", ",", "async_client", ",", "pid", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Importing: {}'", ".", "format", "(", "pid", ")", ")", "task_set", "=", "set", "(", ")", "object_info_pyxb", "=", "d1_...
Import the SciObj at {pid}. If the SciObj is a Resource Map, also recursively import the aggregated objects.
[ "Import", "the", "SciObj", "at", "{", "pid", "}", "." ]
python
train
jepegit/cellpy
cellpy/readers/dbreader.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/readers/dbreader.py#L182-L194
def select_serial_number_row(self, serial_number): """Select row for identification number serial_number Args: serial_number: serial number Returns: pandas.DataFrame """ sheet = self.table col = self.db_sheet_cols.id rows = sheet.loc[:, col] == serial_number return sheet.loc[rows, :]
[ "def", "select_serial_number_row", "(", "self", ",", "serial_number", ")", ":", "sheet", "=", "self", ".", "table", "col", "=", "self", ".", "db_sheet_cols", ".", "id", "rows", "=", "sheet", ".", "loc", "[", ":", ",", "col", "]", "==", "serial_number", ...
Select row for identification number serial_number Args: serial_number: serial number Returns: pandas.DataFrame
[ "Select", "row", "for", "identification", "number", "serial_number" ]
python
train
materialsproject/pymatgen
pymatgen/io/zeopp.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/zeopp.py#L456-L513
def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3, probe_rad=0.1): """ Computes the volume and surface area of isolated void using Zeo++. Useful to compute the volume and surface area of vacant site. Args: structure: pymatgen Structure containing vacancy rad_dict(optional): Dictionary with short name of elements and their radii. chan_rad(optional): Minimum channel Radius. probe_rad(optional): Probe radius for Monte Carlo sampling. Returns: volume: floating number representing the volume of void """ with ScratchDir('.'): name = "temp_zeo" zeo_inp_filename = name + ".cssr" ZeoCssr(structure).write_file(zeo_inp_filename) rad_file = None if rad_dict: rad_file = name + ".rad" with open(rad_file, 'w') as fp: for el in rad_dict.keys(): fp.write("{0} {1}".format(el, rad_dict[el])) atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file) vol_str = volume(atmnet, 0.3, probe_rad, 10000) sa_str = surface_area(atmnet, 0.3, probe_rad, 10000) vol = None sa = None for line in vol_str.split("\n"): if "Number_of_pockets" in line: fields = line.split() if float(fields[1]) > 1: vol = -1.0 break if float(fields[1]) == 0: vol = -1.0 break vol = float(fields[3]) for line in sa_str.split("\n"): if "Number_of_pockets" in line: fields = line.split() if float(fields[1]) > 1: # raise ValueError("Too many voids") sa = -1.0 break if float(fields[1]) == 0: sa = -1.0 break sa = float(fields[3]) if not vol or not sa: raise ValueError("Error in zeo++ output stream") return vol, sa
[ "def", "get_void_volume_surfarea", "(", "structure", ",", "rad_dict", "=", "None", ",", "chan_rad", "=", "0.3", ",", "probe_rad", "=", "0.1", ")", ":", "with", "ScratchDir", "(", "'.'", ")", ":", "name", "=", "\"temp_zeo\"", "zeo_inp_filename", "=", "name", ...
Computes the volume and surface area of isolated void using Zeo++. Useful to compute the volume and surface area of vacant site. Args: structure: pymatgen Structure containing vacancy rad_dict(optional): Dictionary with short name of elements and their radii. chan_rad(optional): Minimum channel Radius. probe_rad(optional): Probe radius for Monte Carlo sampling. Returns: volume: floating number representing the volume of void
[ "Computes", "the", "volume", "and", "surface", "area", "of", "isolated", "void", "using", "Zeo", "++", ".", "Useful", "to", "compute", "the", "volume", "and", "surface", "area", "of", "vacant", "site", "." ]
python
train
knipknap/exscript
Exscript/util/ipv4.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/ipv4.py#L257-L273
def is_private(ip): """ Returns True if the given IP address is private, returns False otherwise. :type ip: string :param ip: An IP address. :rtype: bool :return: True if the IP is private, False otherwise. """ if matches_prefix(ip, '10.0.0.0/8'): return True if matches_prefix(ip, '172.16.0.0/12'): return True if matches_prefix(ip, '192.168.0.0/16'): return True return False
[ "def", "is_private", "(", "ip", ")", ":", "if", "matches_prefix", "(", "ip", ",", "'10.0.0.0/8'", ")", ":", "return", "True", "if", "matches_prefix", "(", "ip", ",", "'172.16.0.0/12'", ")", ":", "return", "True", "if", "matches_prefix", "(", "ip", ",", "...
Returns True if the given IP address is private, returns False otherwise. :type ip: string :param ip: An IP address. :rtype: bool :return: True if the IP is private, False otherwise.
[ "Returns", "True", "if", "the", "given", "IP", "address", "is", "private", "returns", "False", "otherwise", "." ]
python
train
CalebBell/thermo
thermo/dippr.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/dippr.py#L244-L315
def EQ104(T, A, B, C, D, E, order=0): r'''DIPPR Equation #104. Often used in calculating second virial coefficients of gases. All 5 parameters are required. C, D, and E are normally large values. .. math:: Y = A + \frac{B}{T} + \frac{C}{T^3} + \frac{D}{T^8} + \frac{E}{T^9} Parameters ---------- T : float Temperature, [K] A-E : float Parameter for the equation; chemical and property specific [-] order : int, optional Order of the calculation. 0 for the calculation of the result itself; for 1, the first derivative of the property is returned, for -1, the indefinite integral of the property with respect to temperature is returned; and for -1j, the indefinite integral of the property divided by temperature with respect to temperature is returned. No other integrals or derivatives are implemented, and an exception will be raised if any other order is given. Returns ------- Y : float Property [constant-specific; if order == 1, property/K; if order == -1, property*K; if order == -1j, unchanged from default] Notes ----- The derivative with respect to T, integral with respect to T, and integral over T with respect to T are computed as follows. All expressions can be obtained with SymPy readily. .. math:: \frac{d Y}{dT} = - \frac{B}{T^{2}} - \frac{3 C}{T^{4}} - \frac{8 D}{T^{9}} - \frac{9 E}{T^{10}} .. math:: \int Y dT = A T + B \log{\left (T \right )} - \frac{1}{56 T^{8}} \left(28 C T^{6} + 8 D T + 7 E\right) .. math:: \int \frac{Y}{T} dT = A \log{\left (T \right )} - \frac{1}{72 T^{9}} \left(72 B T^{8} + 24 C T^{6} + 9 D T + 8 E\right) Examples -------- Water second virial coefficient; DIPPR coefficients normally dimensionless. >>> EQ104(300, 0.02222, -26.38, -16750000, -3.894E19, 3.133E21) -1.1204179007265156 References ---------- .. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801 DIPPR/AIChE ''' if order == 0: T2 = T*T return A + (B + (C + (D + E/T)/(T2*T2*T))/T2)/T elif order == 1: T2 = T*T T4 = T2*T2 return (-B + (-3*C + (-8*D - 9*E/T)/(T4*T))/T2)/T2 elif order == -1: return A*T + B*log(T) - (28*C*T**6 + 8*D*T + 7*E)/(56*T**8) elif order == -1j: return A*log(T) - (72*B*T**8 + 24*C*T**6 + 9*D*T + 8*E)/(72*T**9) else: raise Exception(order_not_found_msg)
[ "def", "EQ104", "(", "T", ",", "A", ",", "B", ",", "C", ",", "D", ",", "E", ",", "order", "=", "0", ")", ":", "if", "order", "==", "0", ":", "T2", "=", "T", "*", "T", "return", "A", "+", "(", "B", "+", "(", "C", "+", "(", "D", "+", ...
r'''DIPPR Equation #104. Often used in calculating second virial coefficients of gases. All 5 parameters are required. C, D, and E are normally large values. .. math:: Y = A + \frac{B}{T} + \frac{C}{T^3} + \frac{D}{T^8} + \frac{E}{T^9} Parameters ---------- T : float Temperature, [K] A-E : float Parameter for the equation; chemical and property specific [-] order : int, optional Order of the calculation. 0 for the calculation of the result itself; for 1, the first derivative of the property is returned, for -1, the indefinite integral of the property with respect to temperature is returned; and for -1j, the indefinite integral of the property divided by temperature with respect to temperature is returned. No other integrals or derivatives are implemented, and an exception will be raised if any other order is given. Returns ------- Y : float Property [constant-specific; if order == 1, property/K; if order == -1, property*K; if order == -1j, unchanged from default] Notes ----- The derivative with respect to T, integral with respect to T, and integral over T with respect to T are computed as follows. All expressions can be obtained with SymPy readily. .. math:: \frac{d Y}{dT} = - \frac{B}{T^{2}} - \frac{3 C}{T^{4}} - \frac{8 D}{T^{9}} - \frac{9 E}{T^{10}} .. math:: \int Y dT = A T + B \log{\left (T \right )} - \frac{1}{56 T^{8}} \left(28 C T^{6} + 8 D T + 7 E\right) .. math:: \int \frac{Y}{T} dT = A \log{\left (T \right )} - \frac{1}{72 T^{9}} \left(72 B T^{8} + 24 C T^{6} + 9 D T + 8 E\right) Examples -------- Water second virial coefficient; DIPPR coefficients normally dimensionless. >>> EQ104(300, 0.02222, -26.38, -16750000, -3.894E19, 3.133E21) -1.1204179007265156 References ---------- .. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801 DIPPR/AIChE
[ "r", "DIPPR", "Equation", "#104", ".", "Often", "used", "in", "calculating", "second", "virial", "coefficients", "of", "gases", ".", "All", "5", "parameters", "are", "required", ".", "C", "D", "and", "E", "are", "normally", "large", "values", "." ]
python
valid
oseledets/ttpy
tt/core/matrix.py
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/matrix.py#L355-L374
def full(self): """ Transforms a TT-matrix into a full matrix""" N = self.n.prod() M = self.m.prod() a = self.tt.full() d = self.tt.d sz = _np.vstack((self.n, self.m)).flatten('F') a = a.reshape(sz, order='F') # Design a permutation prm = _np.arange(2 * d) prm = prm.reshape((d, 2), order='F') prm = prm.transpose() prm = prm.flatten('F') # Get the inverse permutation iprm = [0] * (2 * d) for i in xrange(2 * d): iprm[prm[i]] = i a = a.transpose(iprm).reshape(N, M, order='F') a = a.reshape(N, M) return a
[ "def", "full", "(", "self", ")", ":", "N", "=", "self", ".", "n", ".", "prod", "(", ")", "M", "=", "self", ".", "m", ".", "prod", "(", ")", "a", "=", "self", ".", "tt", ".", "full", "(", ")", "d", "=", "self", ".", "tt", ".", "d", "sz",...
Transforms a TT-matrix into a full matrix
[ "Transforms", "a", "TT", "-", "matrix", "into", "a", "full", "matrix" ]
python
train
twisted/mantissa
xmantissa/liveform.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/liveform.py#L1288-L1298
def forms(self, req, tag): """ Make and return some forms, using L{self.parameter.getInitialLiveForms}. @return: some subforms. @rtype: C{list} of L{LiveForm} """ liveForms = self.parameter.getInitialLiveForms() for liveForm in liveForms: liveForm.setFragmentParent(self) return liveForms
[ "def", "forms", "(", "self", ",", "req", ",", "tag", ")", ":", "liveForms", "=", "self", ".", "parameter", ".", "getInitialLiveForms", "(", ")", "for", "liveForm", "in", "liveForms", ":", "liveForm", ".", "setFragmentParent", "(", "self", ")", "return", ...
Make and return some forms, using L{self.parameter.getInitialLiveForms}. @return: some subforms. @rtype: C{list} of L{LiveForm}
[ "Make", "and", "return", "some", "forms", "using", "L", "{", "self", ".", "parameter", ".", "getInitialLiveForms", "}", "." ]
python
train
cdriehuys/django-rest-email-auth
rest_email_auth/serializers.py
https://github.com/cdriehuys/django-rest-email-auth/blob/7e752c4d77ae02d2d046f214f56e743aa12ab23f/rest_email_auth/serializers.py#L282-L295
def save(self): """ Reset the user's password if the provided information is valid. """ token = models.PasswordResetToken.objects.get( key=self.validated_data["key"] ) token.email.user.set_password(self.validated_data["password"]) token.email.user.save() logger.info("Reset password for %s", token.email.user) token.delete()
[ "def", "save", "(", "self", ")", ":", "token", "=", "models", ".", "PasswordResetToken", ".", "objects", ".", "get", "(", "key", "=", "self", ".", "validated_data", "[", "\"key\"", "]", ")", "token", ".", "email", ".", "user", ".", "set_password", "(",...
Reset the user's password if the provided information is valid.
[ "Reset", "the", "user", "s", "password", "if", "the", "provided", "information", "is", "valid", "." ]
python
valid
eng-tools/bwplot
bwplot/colors.py
https://github.com/eng-tools/bwplot/blob/448bc422ffa301988f40d459230f9a4f21e2f1c6/bwplot/colors.py#L83-L157
def spectra(i, **kwargs): """ Define colours by number. Can be plotted either in order of gray scale or in the 'best' order for having a strong gray contrast for only three or four lines :param i: the index to access a colour """ ordered = kwargs.get('ordered', False) options = kwargs.get('options', 'best') gray = kwargs.get('gray', False) CD = {} CD['dark blue'] = (1.0, 0.0, 0.55) # 0 CD['dark green'] = (0.15, 0.35, 0.0) # 1 CD['dark red'] = (0.73, 0.0, 0.0) # 2 CD['dark purple'] = (0.8, 0.0, 0.8) # 3 CD['light green'] = (0.49, 0.64, 0.0) # 4 CD['orange'] = (1.0, 0.5, 0.0) # 5 CD['light blue'] = (0.5, 0.85, 1.0) # 6 CD['pink'] = (1.0, 0.8, 0.8) # 7 CD['brown'] = (0.5, 0.3, 0.0) # 8 CD['red'] = (0.9, 0.0, 0.0) # 9 CD['greenish blue'] = (0.12, .8, .8) # 10 CD['bluey purple'] = (0.8, 0.85, 1.0) # 12 CD['yellow'] = (1.0, 1.0, 0.0) # 6 CD['dark gray'] = (0.25, 0.25, 0.25) # CD['mid gray'] = (0.5, 0.5, 0.5) # CD['light gray'] = (0.75, 0.75, 0.75) # CD['black5'] = (0.05, 0.05, 0.05) # CD['black'] = (0.0, 0.0, 0.0) # CD['white'] = (1.0, 1.0, 1.0) # if isinstance(i, int): i = i elif isinstance(i, float): i = int(i) elif isinstance(i, str): dat = CD[i] return dat DtoL = ['dark blue', 'dark green', 'dark red', 'brown', 'light green', 'orange', 'light blue', 'pink', 'dark purple', 'red', 'greenish blue', 'bluey purple', 'yellow', 'dark gray', 'mid gray', 'light gray'] Best = ['dark blue', 'orange', 'light blue', 'dark purple', 'dark green', 'bluey purple', 'dark red', 'light green', 'pink', 'brown', 'red', 'yellow', 'greenish blue', 'dark gray', 'mid gray', 'light gray'] Dots = ['dark blue', 'yellow', 'light blue', 'dark purple', 'dark green', 'orange', 'bluey purple', 'dark red', 'light green', 'pink', 'brown', 'red', 'greenish blue', 'dark gray', 'mid gray', 'light gray'] # ll = [0, 5, 2, 4, 1, 6, 3, 7, 8, 11, 9, 12, 10, 13, 14, 15] # change 11 w 5 ind = i % len(Best) dat = CD[Best[ind]] col = Best[ind] if ordered: # if ordered is true then the colours are accessed from darkest to lightest ind = i % len(DtoL) dat = CD[DtoL[ind]] col = DtoL[ind] if options == "dots": ind = i % len(Dots) dat = CD[Dots[ind]] col = Dots[ind] if options == "ordered": ind = i % len(DtoL) dat = CD[DtoL[ind]] col = DtoL[ind] gray_value = 0.299 * dat[0] + 0.587 * dat[1] + 0.114 * dat[2] # calculate the gray scale value if gray: return gray_value, gray_value, gray_value return dat
[ "def", "spectra", "(", "i", ",", "*", "*", "kwargs", ")", ":", "ordered", "=", "kwargs", ".", "get", "(", "'ordered'", ",", "False", ")", "options", "=", "kwargs", ".", "get", "(", "'options'", ",", "'best'", ")", "gray", "=", "kwargs", ".", "get",...
Define colours by number. Can be plotted either in order of gray scale or in the 'best' order for having a strong gray contrast for only three or four lines :param i: the index to access a colour
[ "Define", "colours", "by", "number", ".", "Can", "be", "plotted", "either", "in", "order", "of", "gray", "scale", "or", "in", "the", "best", "order", "for", "having", "a", "strong", "gray", "contrast", "for", "only", "three", "or", "four", "lines", ":", ...
python
train
robotools/fontParts
Lib/fontParts/base/font.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/font.py#L863-L884
def insertLayer(self, layer, name=None): """ Insert **layer** into the font. :: >>> layer = font.insertLayer(otherLayer, name="layer 2") This will not insert the layer directly. Rather, a new layer will be created and the data from **layer** will be copied to to the new layer. **name** indicates the name that should be assigned to the layer after insertion. If **name** is not given, the layer's original name must be used. If the layer does not have a name, an error must be raised. The data that will be inserted from **layer** is the same data as documented in :meth:`BaseLayer.copy`. """ if name is None: name = layer.name name = normalizers.normalizeLayerName(name) if name in self: self.removeLayer(name) return self._insertLayer(layer, name=name)
[ "def", "insertLayer", "(", "self", ",", "layer", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "layer", ".", "name", "name", "=", "normalizers", ".", "normalizeLayerName", "(", "name", ")", "if", "name", "in", "sel...
Insert **layer** into the font. :: >>> layer = font.insertLayer(otherLayer, name="layer 2") This will not insert the layer directly. Rather, a new layer will be created and the data from **layer** will be copied to to the new layer. **name** indicates the name that should be assigned to the layer after insertion. If **name** is not given, the layer's original name must be used. If the layer does not have a name, an error must be raised. The data that will be inserted from **layer** is the same data as documented in :meth:`BaseLayer.copy`.
[ "Insert", "**", "layer", "**", "into", "the", "font", ".", "::" ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/security/certificate_rabbitmq.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/security/certificate_rabbitmq.py#L44-L57
def generate(self, information, timeout=-1): """ Generates a self signed certificate or an internal CA signed certificate for RabbitMQ clients. Args: information (dict): Information to generate the certificate for RabbitMQ clients. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: RabbitMQ certificate generated """ return self._client.create(information, timeout=timeout)
[ "def", "generate", "(", "self", ",", "information", ",", "timeout", "=", "-", "1", ")", ":", "return", "self", ".", "_client", ".", "create", "(", "information", ",", "timeout", "=", "timeout", ")" ]
Generates a self signed certificate or an internal CA signed certificate for RabbitMQ clients. Args: information (dict): Information to generate the certificate for RabbitMQ clients. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: RabbitMQ certificate generated
[ "Generates", "a", "self", "signed", "certificate", "or", "an", "internal", "CA", "signed", "certificate", "for", "RabbitMQ", "clients", "." ]
python
train
michaelliao/sinaweibopy
snspy.py
https://github.com/michaelliao/sinaweibopy/blob/0f19dd71c1fbd16ee539620c7e9e986887f5c665/snspy.py#L231-L244
def _prepare_api(self, method, path, access_token, **kw): ''' Get api url. ''' headers = None if access_token: headers = {'Authorization': 'OAuth2 %s' % access_token} if '/remind/' in path: # sina remind api url is different: return method, 'https://rm.api.weibo.com/2/%s.json' % path, headers, kw if method == 'POST' and 'pic' in kw: # if 'pic' in parameter, set to UPLOAD mode: return 'UPLOAD', 'https://api.weibo.com/2/%s.json' % path, headers, kw return method, 'https://api.weibo.com/2/%s.json' % path, headers, kw
[ "def", "_prepare_api", "(", "self", ",", "method", ",", "path", ",", "access_token", ",", "*", "*", "kw", ")", ":", "headers", "=", "None", "if", "access_token", ":", "headers", "=", "{", "'Authorization'", ":", "'OAuth2 %s'", "%", "access_token", "}", "...
Get api url.
[ "Get", "api", "url", "." ]
python
train
contentful/contentful-management.py
contentful_management/webhook.py
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/webhook.py#L51-L60
def create_attributes(klass, attributes, previous_object=None): """ Attributes for webhook creation. """ result = super(Webhook, klass).create_attributes(attributes, previous_object) if 'topics' not in result: raise Exception("Topics ('topics') must be provided for this operation.") return result
[ "def", "create_attributes", "(", "klass", ",", "attributes", ",", "previous_object", "=", "None", ")", ":", "result", "=", "super", "(", "Webhook", ",", "klass", ")", ".", "create_attributes", "(", "attributes", ",", "previous_object", ")", "if", "'topics'", ...
Attributes for webhook creation.
[ "Attributes", "for", "webhook", "creation", "." ]
python
train
OLC-Bioinformatics/sipprverse
cgecore/utility.py
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/utility.py#L80-L87
def graceful_exit(self, msg): """ This function Tries to update the MSQL database before exiting. """ # Print stored errors to stderr if self.caught_error: self.print2file(self.stderr, False, False, self.caught_error) # Kill process with error message self.log(msg) sys.exit(1)
[ "def", "graceful_exit", "(", "self", ",", "msg", ")", ":", "# Print stored errors to stderr", "if", "self", ".", "caught_error", ":", "self", ".", "print2file", "(", "self", ".", "stderr", ",", "False", ",", "False", ",", "self", ".", "caught_error", ")", ...
This function Tries to update the MSQL database before exiting.
[ "This", "function", "Tries", "to", "update", "the", "MSQL", "database", "before", "exiting", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/managers.py#L2834-L2857
def get_assessment_taken_admin_session_for_bank(self, bank_id, proxy): """Gets the ``OsidSession`` associated with the assessment taken admin service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the bank arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentTakenAdminSession) - an ``AssessmentTakenSearchSession`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_assessment_taken_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_taken_admin()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_assessment_taken_admin(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.AssessmentTakenAdminSession(bank_id, proxy, self._runtime)
[ "def", "get_assessment_taken_admin_session_for_bank", "(", "self", ",", "bank_id", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_assessment_taken_admin", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "##", "# Also include check to s...
Gets the ``OsidSession`` associated with the assessment taken admin service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the bank arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentTakenAdminSession) - an ``AssessmentTakenSearchSession`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_assessment_taken_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_taken_admin()`` and ``supports_visible_federation()`` are ``true``.*
[ "Gets", "the", "OsidSession", "associated", "with", "the", "assessment", "taken", "admin", "service", "for", "the", "given", "bank", "." ]
python
train
quora/qcore
qcore/helpers.py
https://github.com/quora/qcore/blob/fa5cd438eea554db35fd29cbc8dfbde69f09961c/qcore/helpers.py#L316-L333
def catchable_exceptions(exceptions): """Returns True if exceptions can be caught in the except clause. The exception can be caught if it is an Exception type or a tuple of exception types. """ if isinstance(exceptions, type) and issubclass(exceptions, BaseException): return True if ( isinstance(exceptions, tuple) and exceptions and all(issubclass(it, BaseException) for it in exceptions) ): return True return False
[ "def", "catchable_exceptions", "(", "exceptions", ")", ":", "if", "isinstance", "(", "exceptions", ",", "type", ")", "and", "issubclass", "(", "exceptions", ",", "BaseException", ")", ":", "return", "True", "if", "(", "isinstance", "(", "exceptions", ",", "t...
Returns True if exceptions can be caught in the except clause. The exception can be caught if it is an Exception type or a tuple of exception types.
[ "Returns", "True", "if", "exceptions", "can", "be", "caught", "in", "the", "except", "clause", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/path/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/path/__init__.py#L133-L154
def _set_path_hop(self, v, load=False): """ Setter method for path_hop, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/path/path_hop (list) If this variable is read-only (config: false) in the source YANG file, then _set_path_hop is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_path_hop() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("path_hop_ip",path_hop.path_hop, yang_name="path-hop", rest_name="hop", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='path-hop-ip', extensions={u'tailf-common': {u'info': u'Configure path strict or loose hops', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-full-no': None, u'alt-name': u'hop', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'MplsPathHop'}}), is_container='list', yang_name="path-hop", rest_name="hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure path strict or loose hops', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-full-no': None, u'alt-name': u'hop', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'MplsPathHop'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """path_hop must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("path_hop_ip",path_hop.path_hop, yang_name="path-hop", rest_name="hop", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='path-hop-ip', extensions={u'tailf-common': {u'info': u'Configure path strict or loose hops', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-full-no': None, u'alt-name': u'hop', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'MplsPathHop'}}), is_container='list', yang_name="path-hop", rest_name="hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure path strict or loose hops', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-full-no': None, u'alt-name': u'hop', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'MplsPathHop'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""", }) self.__path_hop = t if hasattr(self, '_set'): self._set()
[ "def", "_set_path_hop", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base"...
Setter method for path_hop, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/path/path_hop (list) If this variable is read-only (config: false) in the source YANG file, then _set_path_hop is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_path_hop() directly.
[ "Setter", "method", "for", "path_hop", "mapped", "from", "YANG", "variable", "/", "mpls_config", "/", "router", "/", "mpls", "/", "mpls_cmds_holder", "/", "path", "/", "path_hop", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", ...
python
train
ajenhl/tacl
tacl/__main__.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/__main__.py#L116-L120
def generate_catalogue(args, parser): """Generates and saves a catalogue file.""" catalogue = tacl.Catalogue() catalogue.generate(args.corpus, args.label) catalogue.save(args.catalogue)
[ "def", "generate_catalogue", "(", "args", ",", "parser", ")", ":", "catalogue", "=", "tacl", ".", "Catalogue", "(", ")", "catalogue", ".", "generate", "(", "args", ".", "corpus", ",", "args", ".", "label", ")", "catalogue", ".", "save", "(", "args", "....
Generates and saves a catalogue file.
[ "Generates", "and", "saves", "a", "catalogue", "file", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_lag.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_lag.py#L312-L325
def get_port_channel_detail_output_lacp_aggr_member_sync(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_port_channel_detail = ET.Element("get_port_channel_detail") config = get_port_channel_detail output = ET.SubElement(get_port_channel_detail, "output") lacp = ET.SubElement(output, "lacp") aggr_member = ET.SubElement(lacp, "aggr-member") sync = ET.SubElement(aggr_member, "sync") sync.text = kwargs.pop('sync') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_port_channel_detail_output_lacp_aggr_member_sync", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_port_channel_detail", "=", "ET", ".", "Element", "(", "\"get_port_channel_detail\"", ")",...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
gitpython-developers/GitPython
git/refs/symbolic.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/refs/symbolic.py#L517-L546
def create(cls, repo, path, reference='HEAD', force=False, logmsg=None): """Create a new symbolic reference, hence a reference pointing to another reference. :param repo: Repository to create the reference in :param path: full path at which the new symbolic reference is supposed to be created at, i.e. "NEW_HEAD" or "symrefs/my_new_symref" :param reference: The reference to which the new symbolic reference should point to. If it is a commit'ish, the symbolic ref will be detached. :param force: if True, force creation even if a symbolic reference with that name already exists. Raise OSError otherwise :param logmsg: If not None, the message to append to the reflog. Otherwise no reflog entry is written. :return: Newly created symbolic Reference :raise OSError: If a (Symbolic)Reference with the same name but different contents already exists. :note: This does not alter the current HEAD, index or Working Tree""" return cls._create(repo, path, cls._resolve_ref_on_create, reference, force, logmsg)
[ "def", "create", "(", "cls", ",", "repo", ",", "path", ",", "reference", "=", "'HEAD'", ",", "force", "=", "False", ",", "logmsg", "=", "None", ")", ":", "return", "cls", ".", "_create", "(", "repo", ",", "path", ",", "cls", ".", "_resolve_ref_on_cre...
Create a new symbolic reference, hence a reference pointing to another reference. :param repo: Repository to create the reference in :param path: full path at which the new symbolic reference is supposed to be created at, i.e. "NEW_HEAD" or "symrefs/my_new_symref" :param reference: The reference to which the new symbolic reference should point to. If it is a commit'ish, the symbolic ref will be detached. :param force: if True, force creation even if a symbolic reference with that name already exists. Raise OSError otherwise :param logmsg: If not None, the message to append to the reflog. Otherwise no reflog entry is written. :return: Newly created symbolic Reference :raise OSError: If a (Symbolic)Reference with the same name but different contents already exists. :note: This does not alter the current HEAD, index or Working Tree
[ "Create", "a", "new", "symbolic", "reference", "hence", "a", "reference", "pointing", "to", "another", "reference", "." ]
python
train
openpaperwork/paperwork-backend
paperwork_backend/index.py
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/index.py#L403-L415
def upd_doc(self, doc, index_update=True, label_guesser_update=True): """ Update a document in the index """ if not self.index_writer and index_update: self.index_writer = self.index.writer() if not self.label_guesser_updater and label_guesser_update: self.label_guesser_updater = self.label_guesser.get_updater() logger.info("Updating modified doc: %s" % doc) if index_update: self._update_doc_in_index(self.index_writer, doc) if label_guesser_update: self.label_guesser_updater.upd_doc(doc)
[ "def", "upd_doc", "(", "self", ",", "doc", ",", "index_update", "=", "True", ",", "label_guesser_update", "=", "True", ")", ":", "if", "not", "self", ".", "index_writer", "and", "index_update", ":", "self", ".", "index_writer", "=", "self", ".", "index", ...
Update a document in the index
[ "Update", "a", "document", "in", "the", "index" ]
python
train
google/python-gflags
gflags2man.py
https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags2man.py#L228-L272
def ParseDesc(self, start_line=0): """Parse the initial description. This could be Python or C++. Returns: (start_line, lang_type) start_line Line to start parsing flags on (int) lang_type Either 'python' or 'c' (-1, '') if the flags start could not be found """ exec_mod_start = self.executable + ':' after_blank = 0 start_line = 0 # ignore the passed-in arg for now (?) for start_line in range(start_line, len(self.output)): # collect top description line = self.output[start_line].rstrip() # Python flags start with 'flags:\n' if ('flags:' == line and len(self.output) > start_line+1 and '' == self.output[start_line+1].rstrip()): start_line += 2 logging.debug('Flags start (python): %s' % line) return (start_line, 'python') # SWIG flags just have the module name followed by colon. if exec_mod_start == line: logging.debug('Flags start (swig): %s' % line) return (start_line, 'python') # C++ flags begin after a blank line and with a constant string if after_blank and line.startswith(' Flags from '): logging.debug('Flags start (c): %s' % line) return (start_line, 'c') # java flags begin with a constant string if line == 'where flags are': logging.debug('Flags start (java): %s' % line) start_line += 2 # skip "Standard flags:" return (start_line, 'java') logging.debug('Desc: %s' % line) self.desc.append(line) after_blank = (line == '') else: logging.warn('Never found the start of the flags section for "%s"!' % self.long_name) return (-1, '')
[ "def", "ParseDesc", "(", "self", ",", "start_line", "=", "0", ")", ":", "exec_mod_start", "=", "self", ".", "executable", "+", "':'", "after_blank", "=", "0", "start_line", "=", "0", "# ignore the passed-in arg for now (?)", "for", "start_line", "in", "range", ...
Parse the initial description. This could be Python or C++. Returns: (start_line, lang_type) start_line Line to start parsing flags on (int) lang_type Either 'python' or 'c' (-1, '') if the flags start could not be found
[ "Parse", "the", "initial", "description", "." ]
python
train
jsommers/switchyard
switchyard/lib/topo/topobuild.py
https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/topo/topobuild.py#L231-L251
def addLink(self, node1, node2, capacity, delay): ''' Add a bidirectional link between node1 and node2 with the given capacity and delay to the topology. ''' for n in (node1, node2): if not self.__nxgraph.has_node(n): raise Exception("No node {} exists for building a link".format(n)) macs = [None,None] if self.__auto_macs: for i in range(len(macs)): macstr = '{:012x}'.format(self.__ifnum) self.__ifnum += 1 macaddr = ':'.join([ macstr[j:(j+2)] for j in range(0,len(macstr),2)]) macs[i] = macaddr node1if = self.__nxgraph.node[node1]['nodeobj'].addInterface(ethaddr=macs[0]) node2if = self.__nxgraph.node[node2]['nodeobj'].addInterface(ethaddr=macs[1]) self.__nxgraph.add_edge(node1, node2) self.__nxgraph[node1][node2][node1] = node1if self.__nxgraph[node1][node2][node2] = node2if self.setLinkCharacteristics(node1, node2, capacity, delay)
[ "def", "addLink", "(", "self", ",", "node1", ",", "node2", ",", "capacity", ",", "delay", ")", ":", "for", "n", "in", "(", "node1", ",", "node2", ")", ":", "if", "not", "self", ".", "__nxgraph", ".", "has_node", "(", "n", ")", ":", "raise", "Exce...
Add a bidirectional link between node1 and node2 with the given capacity and delay to the topology.
[ "Add", "a", "bidirectional", "link", "between", "node1", "and", "node2", "with", "the", "given", "capacity", "and", "delay", "to", "the", "topology", "." ]
python
train
alex-kostirin/pyatomac
atomac/ldtpd/mouse.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/mouse.py#L221-L244
def doubleclick(self, window_name, object_name): """ Double click on the object @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. Or menu heirarchy @type object_name: string @return: 1 on success. @rtype: integer """ object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) self._grabfocus(object_handle) x, y, width, height = self._getobjectsize(object_handle) window = self._get_front_most_window() # Mouse double click on the object # object_handle.doubleClick() window.doubleClickMouse((x + width / 2, y + height / 2)) return 1
[ "def", "doubleclick", "(", "self", ",", "window_name", ",", "object_name", ")", ":", "object_handle", "=", "self", ".", "_get_object_handle", "(", "window_name", ",", "object_name", ")", "if", "not", "object_handle", ".", "AXEnabled", ":", "raise", "LdtpServerEx...
Double click on the object @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. Or menu heirarchy @type object_name: string @return: 1 on success. @rtype: integer
[ "Double", "click", "on", "the", "object", "@param", "window_name", ":", "Window", "name", "to", "look", "for", "either", "full", "name", "LDTP", "s", "name", "convention", "or", "a", "Unix", "glob", ".", "@type", "window_name", ":", "string", "@param", "ob...
python
valid
ironfroggy/django-better-cache
bettercache/proxy.py
https://github.com/ironfroggy/django-better-cache/blob/5350e8c646cef1c1ca74eab176f856ddd9eaf5c3/bettercache/proxy.py#L65-L72
def header_name(name): """Convert header name like HTTP_XXXX_XXX to Xxxx-Xxx:""" words = name[5:].split('_') for i in range(len(words)): words[i] = words[i][0].upper() + words[i][1:].lower() result = '-'.join(words) return result
[ "def", "header_name", "(", "name", ")", ":", "words", "=", "name", "[", "5", ":", "]", ".", "split", "(", "'_'", ")", "for", "i", "in", "range", "(", "len", "(", "words", ")", ")", ":", "words", "[", "i", "]", "=", "words", "[", "i", "]", "...
Convert header name like HTTP_XXXX_XXX to Xxxx-Xxx:
[ "Convert", "header", "name", "like", "HTTP_XXXX_XXX", "to", "Xxxx", "-", "Xxx", ":" ]
python
train
razor-x/dichalcogenides
dichalcogenides/parameters/parameters.py
https://github.com/razor-x/dichalcogenides/blob/0fa1995a3a328b679c9926f73239d0ecdc6e5d3d/dichalcogenides/parameters/parameters.py#L69-L81
def get(self, name): """Get a parameter object by name. :param name: Name of the parameter object. :type name: str :return: The parameter. :rtype: Parameter """ parameter = next((p for p in self.parameters if p.name == name), None) if parameter is None: raise LookupError("Cannot find parameter '" + name + "'.") return parameter
[ "def", "get", "(", "self", ",", "name", ")", ":", "parameter", "=", "next", "(", "(", "p", "for", "p", "in", "self", ".", "parameters", "if", "p", ".", "name", "==", "name", ")", ",", "None", ")", "if", "parameter", "is", "None", ":", "raise", ...
Get a parameter object by name. :param name: Name of the parameter object. :type name: str :return: The parameter. :rtype: Parameter
[ "Get", "a", "parameter", "object", "by", "name", "." ]
python
train
vinta/haul
haul/finders/pipeline/html.py
https://github.com/vinta/haul/blob/234024ab8452ea2f41b18561377295cf2879fb20/haul/finders/pipeline/html.py#L30-L52
def a_href_finder(pipeline_index, soup, finder_image_urls=[], *args, **kwargs): """ Find image URL in <a>'s href attribute """ now_finder_image_urls = [] for a in soup.find_all('a'): href = a.get('href', None) if href: href = str(href) if filter(href.lower().endswith, ('.jpg', '.jpeg', '.gif', '.png')): if (href not in finder_image_urls) and \ (href not in now_finder_image_urls): now_finder_image_urls.append(href) output = {} output['finder_image_urls'] = finder_image_urls + now_finder_image_urls return output
[ "def", "a_href_finder", "(", "pipeline_index", ",", "soup", ",", "finder_image_urls", "=", "[", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "now_finder_image_urls", "=", "[", "]", "for", "a", "in", "soup", ".", "find_all", "(", "'a'", ")"...
Find image URL in <a>'s href attribute
[ "Find", "image", "URL", "in", "<a", ">", "s", "href", "attribute" ]
python
valid
django-leonardo/django-leonardo
leonardo/module/web/widget/application/reverse.py
https://github.com/django-leonardo/django-leonardo/blob/4b933e1792221a13b4028753d5f1d3499b0816d4/leonardo/module/web/widget/application/reverse.py#L31-L37
def cycle_app_reverse_cache(*args, **kwargs): """Does not really empty the cache; instead it adds a random element to the cache key generation which guarantees that the cache does not yet contain values for all newly generated keys""" value = '%07x' % (SystemRandom().randint(0, 0x10000000)) cache.set(APP_REVERSE_CACHE_GENERATION_KEY, value) return value
[ "def", "cycle_app_reverse_cache", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "value", "=", "'%07x'", "%", "(", "SystemRandom", "(", ")", ".", "randint", "(", "0", ",", "0x10000000", ")", ")", "cache", ".", "set", "(", "APP_REVERSE_CACHE_GENERAT...
Does not really empty the cache; instead it adds a random element to the cache key generation which guarantees that the cache does not yet contain values for all newly generated keys
[ "Does", "not", "really", "empty", "the", "cache", ";", "instead", "it", "adds", "a", "random", "element", "to", "the", "cache", "key", "generation", "which", "guarantees", "that", "the", "cache", "does", "not", "yet", "contain", "values", "for", "all", "ne...
python
train
cs50/check50
check50/api.py
https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/api.py#L107-L134
def import_checks(path): """ Import checks module given relative path. :param path: relative path from which to import checks module :type path: str :returns: the imported module :raises FileNotFoundError: if ``path / .check50.yaml`` does not exist :raises yaml.YAMLError: if ``path / .check50.yaml`` is not a valid YAML file This function is particularly useful when a set of checks logically extends another, as is often the case in CS50's own problems that have a "less comfy" and "more comfy" version. The "more comfy" version can include all of the "less comfy" checks like so:: less = check50.import_checks("../less") from less import * .. note:: the ``__name__`` of the imported module is given by the basename of the specified path (``less`` in the above example). """ dir = internal.check_dir / path file = internal.load_config(dir)["checks"] mod = internal.import_file(dir.name, (dir / file).resolve()) sys.modules[dir.name] = mod return mod
[ "def", "import_checks", "(", "path", ")", ":", "dir", "=", "internal", ".", "check_dir", "/", "path", "file", "=", "internal", ".", "load_config", "(", "dir", ")", "[", "\"checks\"", "]", "mod", "=", "internal", ".", "import_file", "(", "dir", ".", "na...
Import checks module given relative path. :param path: relative path from which to import checks module :type path: str :returns: the imported module :raises FileNotFoundError: if ``path / .check50.yaml`` does not exist :raises yaml.YAMLError: if ``path / .check50.yaml`` is not a valid YAML file This function is particularly useful when a set of checks logically extends another, as is often the case in CS50's own problems that have a "less comfy" and "more comfy" version. The "more comfy" version can include all of the "less comfy" checks like so:: less = check50.import_checks("../less") from less import * .. note:: the ``__name__`` of the imported module is given by the basename of the specified path (``less`` in the above example).
[ "Import", "checks", "module", "given", "relative", "path", "." ]
python
train
jim-easterbrook/pyctools
src/pyctools/components/deinterlace/intrafield.py
https://github.com/jim-easterbrook/pyctools/blob/2a958665326892f45f249bebe62c2c23f306732b/src/pyctools/components/deinterlace/intrafield.py#L28-L52
def IntraField(config={}): """Intra field interlace to sequential converter. This uses a vertical filter with an aperture of 8 lines, generated by :py:class:`~pyctools.components.interp.filtergenerator.FilterGenerator`. The aperture (and other parameters) can be adjusted after the :py:class:`IntraField` component is created. """ return Compound( config = config, deint = SimpleDeinterlace(), interp = Resize(), filgen = FilterGenerator(yaperture=8, ycut=50), gain = Arithmetic(func='data * pt_float(2)'), linkages = { ('self', 'input') : [('deint', 'input')], ('deint', 'output') : [('interp', 'input')], ('interp', 'output') : [('self', 'output')], ('filgen', 'output') : [('gain', 'input')], ('gain', 'output') : [('interp', 'filter')], } )
[ "def", "IntraField", "(", "config", "=", "{", "}", ")", ":", "return", "Compound", "(", "config", "=", "config", ",", "deint", "=", "SimpleDeinterlace", "(", ")", ",", "interp", "=", "Resize", "(", ")", ",", "filgen", "=", "FilterGenerator", "(", "yape...
Intra field interlace to sequential converter. This uses a vertical filter with an aperture of 8 lines, generated by :py:class:`~pyctools.components.interp.filtergenerator.FilterGenerator`. The aperture (and other parameters) can be adjusted after the :py:class:`IntraField` component is created.
[ "Intra", "field", "interlace", "to", "sequential", "converter", "." ]
python
train
contentful/contentful-management.py
contentful_management/client.py
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/client.py#L571-L618
def _contentful_user_agent(self): """ Sets the X-Contentful-User-Agent header. """ header = {} from . import __version__ header['sdk'] = { 'name': 'contentful-management.py', 'version': __version__ } header['app'] = { 'name': self.application_name, 'version': self.application_version } header['integration'] = { 'name': self.integration_name, 'version': self.integration_version } header['platform'] = { 'name': 'python', 'version': platform.python_version() } os_name = platform.system() if os_name == 'Darwin': os_name = 'macOS' elif not os_name or os_name == 'Java': os_name = None elif os_name and os_name not in ['macOS', 'Windows']: os_name = 'Linux' header['os'] = { 'name': os_name, 'version': platform.release() } def format_header(key, values): header = "{0} {1}".format(key, values['name']) if values['version'] is not None: header = "{0}/{1}".format(header, values['version']) return "{0};".format(header) result = [] for k, values in header.items(): if not values['name']: continue result.append(format_header(k, values)) return ' '.join(result)
[ "def", "_contentful_user_agent", "(", "self", ")", ":", "header", "=", "{", "}", "from", ".", "import", "__version__", "header", "[", "'sdk'", "]", "=", "{", "'name'", ":", "'contentful-management.py'", ",", "'version'", ":", "__version__", "}", "header", "[...
Sets the X-Contentful-User-Agent header.
[ "Sets", "the", "X", "-", "Contentful", "-", "User", "-", "Agent", "header", "." ]
python
train
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/cluster.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/cluster.py#L139-L147
def _update_from_pb(self, cluster_pb): """Refresh self from the server-provided protobuf. Helper for :meth:`from_pb` and :meth:`reload`. """ self.location_id = cluster_pb.location.split("/")[-1] self.serve_nodes = cluster_pb.serve_nodes self.default_storage_type = cluster_pb.default_storage_type self._state = cluster_pb.state
[ "def", "_update_from_pb", "(", "self", ",", "cluster_pb", ")", ":", "self", ".", "location_id", "=", "cluster_pb", ".", "location", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "self", ".", "serve_nodes", "=", "cluster_pb", ".", "serve_nodes", "s...
Refresh self from the server-provided protobuf. Helper for :meth:`from_pb` and :meth:`reload`.
[ "Refresh", "self", "from", "the", "server", "-", "provided", "protobuf", ".", "Helper", "for", ":", "meth", ":", "from_pb", "and", ":", "meth", ":", "reload", "." ]
python
train
lyst/lightfm
lightfm/lightfm.py
https://github.com/lyst/lightfm/blob/87b942f87759b8336f9066a25e4762ae7d95455e/lightfm/lightfm.py#L947-L974
def get_item_representations(self, features=None): """ Get the latent representations for items given model and features. Arguments --------- features: np.float32 csr_matrix of shape [n_items, n_item_features], optional Each row contains that item's weights over features. An identity matrix will be used if not supplied. Returns ------- (item_biases, item_embeddings): (np.float32 array of shape n_items, np.float32 array of shape [n_items, num_components] Biases and latent representations for items. """ self._check_initialized() if features is None: return self.item_biases, self.item_embeddings features = sp.csr_matrix(features, dtype=CYTHON_DTYPE) return features * self.item_biases, features * self.item_embeddings
[ "def", "get_item_representations", "(", "self", ",", "features", "=", "None", ")", ":", "self", ".", "_check_initialized", "(", ")", "if", "features", "is", "None", ":", "return", "self", ".", "item_biases", ",", "self", ".", "item_embeddings", "features", "...
Get the latent representations for items given model and features. Arguments --------- features: np.float32 csr_matrix of shape [n_items, n_item_features], optional Each row contains that item's weights over features. An identity matrix will be used if not supplied. Returns ------- (item_biases, item_embeddings): (np.float32 array of shape n_items, np.float32 array of shape [n_items, num_components] Biases and latent representations for items.
[ "Get", "the", "latent", "representations", "for", "items", "given", "model", "and", "features", "." ]
python
train
zimeon/iiif
iiif/error.py
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/error.py#L50-L69
def image_server_response(self, api_version=None): """Response, code and headers for image server error response. api_version selects the format (XML of 1.0). The return value is a tuple of response - body of HTTP response status - the HTTP status code headers - a dict of HTTP headers which will include the Content-Type As a side effect the routine sets self.content_type to the correct media type for the response. """ headers = dict(self.headers) if (api_version < '1.1'): headers['Content-Type'] = 'text/xml' response = self.as_xml() else: headers['Content-Type'] = 'text/plain' response = self.as_txt() return(response, self.code, headers)
[ "def", "image_server_response", "(", "self", ",", "api_version", "=", "None", ")", ":", "headers", "=", "dict", "(", "self", ".", "headers", ")", "if", "(", "api_version", "<", "'1.1'", ")", ":", "headers", "[", "'Content-Type'", "]", "=", "'text/xml'", ...
Response, code and headers for image server error response. api_version selects the format (XML of 1.0). The return value is a tuple of response - body of HTTP response status - the HTTP status code headers - a dict of HTTP headers which will include the Content-Type As a side effect the routine sets self.content_type to the correct media type for the response.
[ "Response", "code", "and", "headers", "for", "image", "server", "error", "response", "." ]
python
train
wummel/linkchecker
third_party/dnspython/dns/ipv6.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/ipv6.py#L96-L163
def inet_aton(text): """Convert a text format IPv6 address into network format. @param text: the textual address @type text: string @rtype: string @raises dns.exception.SyntaxError: the text was not properly formatted """ # # Our aim here is not something fast; we just want something that works. # if text == '::': text = '0::' # # Get rid of the icky dot-quad syntax if we have it. # m = _v4_ending.match(text) if not m is None: text = "%s:%04x:%04x" % (m.group(1), int(m.group(2)) * 256 + int(m.group(3)), int(m.group(4)) * 256 + int(m.group(5))) # # Try to turn '::<whatever>' into ':<whatever>'; if no match try to # turn '<whatever>::' into '<whatever>:' # m = _colon_colon_start.match(text) if not m is None: text = text[1:] else: m = _colon_colon_end.match(text) if not m is None: text = text[:-1] # # Now canonicalize into 8 chunks of 4 hex digits each # chunks = text.split(':') l = len(chunks) if l > 8: raise dns.exception.SyntaxError seen_empty = False canonical = [] for c in chunks: if c == '': if seen_empty: raise dns.exception.SyntaxError seen_empty = True for i in xrange(0, 8 - l + 1): canonical.append('0000') else: lc = len(c) if lc > 4: raise dns.exception.SyntaxError if lc != 4: c = ('0' * (4 - lc)) + c canonical.append(c) if l < 8 and not seen_empty: raise dns.exception.SyntaxError text = ''.join(canonical) # # Finally we can go to binary. # try: return text.decode('hex_codec') except TypeError: raise dns.exception.SyntaxError
[ "def", "inet_aton", "(", "text", ")", ":", "#", "# Our aim here is not something fast; we just want something that works.", "#", "if", "text", "==", "'::'", ":", "text", "=", "'0::'", "#", "# Get rid of the icky dot-quad syntax if we have it.", "#", "m", "=", "_v4_ending"...
Convert a text format IPv6 address into network format. @param text: the textual address @type text: string @rtype: string @raises dns.exception.SyntaxError: the text was not properly formatted
[ "Convert", "a", "text", "format", "IPv6", "address", "into", "network", "format", "." ]
python
train
sepandhaghighi/pycm
pycm/pycm_overall_func.py
https://github.com/sepandhaghighi/pycm/blob/cb03258afd6a821d10acba73c965aaac174bedcd/pycm/pycm_overall_func.py#L472-L498
def lambda_A_calc(classes, table, P, POP): """ Calculate Goodman and Kruskal's lambda A. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param P: condition positive :type P : dict :param POP: population :type POP : int :return: Goodman and Kruskal's lambda A as float """ try: result = 0 maxreference = max(list(P.values())) length = POP for i in classes: col = [] for col_item in table.values(): col.append(col_item[i]) result += max(col) result = (result - maxreference) / (length - maxreference) return result except Exception: return "None"
[ "def", "lambda_A_calc", "(", "classes", ",", "table", ",", "P", ",", "POP", ")", ":", "try", ":", "result", "=", "0", "maxreference", "=", "max", "(", "list", "(", "P", ".", "values", "(", ")", ")", ")", "length", "=", "POP", "for", "i", "in", ...
Calculate Goodman and Kruskal's lambda A. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param P: condition positive :type P : dict :param POP: population :type POP : int :return: Goodman and Kruskal's lambda A as float
[ "Calculate", "Goodman", "and", "Kruskal", "s", "lambda", "A", "." ]
python
train
econ-ark/HARK
HARK/ConsumptionSaving/ConsRepAgentModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsRepAgentModel.py#L213-L235
def getStates(self): ''' Calculates updated values of normalized market resources and permanent income level. Uses pLvlNow, aNrmNow, PermShkNow, TranShkNow. Parameters ---------- None Returns ------- None ''' pLvlPrev = self.pLvlNow aNrmPrev = self.aNrmNow # Calculate new states: normalized market resources and permanent income level self.pLvlNow = pLvlPrev*self.PermShkNow # Same as in IndShockConsType self.kNrmNow = aNrmPrev/self.PermShkNow self.yNrmNow = self.kNrmNow**self.CapShare*self.TranShkNow**(1.-self.CapShare) self.Rfree = 1. + self.CapShare*self.kNrmNow**(self.CapShare-1.)*self.TranShkNow**(1.-self.CapShare) - self.DeprFac self.wRte = (1.-self.CapShare)*self.kNrmNow**self.CapShare*self.TranShkNow**(-self.CapShare) self.mNrmNow = self.Rfree*self.kNrmNow + self.wRte*self.TranShkNow
[ "def", "getStates", "(", "self", ")", ":", "pLvlPrev", "=", "self", ".", "pLvlNow", "aNrmPrev", "=", "self", ".", "aNrmNow", "# Calculate new states: normalized market resources and permanent income level", "self", ".", "pLvlNow", "=", "pLvlPrev", "*", "self", ".", ...
Calculates updated values of normalized market resources and permanent income level. Uses pLvlNow, aNrmNow, PermShkNow, TranShkNow. Parameters ---------- None Returns ------- None
[ "Calculates", "updated", "values", "of", "normalized", "market", "resources", "and", "permanent", "income", "level", ".", "Uses", "pLvlNow", "aNrmNow", "PermShkNow", "TranShkNow", "." ]
python
train
striglia/pyramid_swagger
pyramid_swagger/ingest.py
https://github.com/striglia/pyramid_swagger/blob/1dbc0b4f23e2e5f4ed575c116f3f7d0e83e30d45/pyramid_swagger/ingest.py#L168-L192
def get_swagger_spec(settings): """Return a :class:`bravado_core.spec.Spec` constructed from the swagger specs in `pyramid_swagger.schema_directory`. If `pyramid_swagger.enable_swagger_spec_validation` is enabled the schema will be validated before returning it. :param settings: a pyramid registry settings with configuration for building a swagger schema :type settings: dict :rtype: :class:`bravado_core.spec.Spec` """ schema_dir = settings.get('pyramid_swagger.schema_directory', 'api_docs/') schema_filename = settings.get('pyramid_swagger.schema_file', 'swagger.json') schema_path = os.path.join(schema_dir, schema_filename) schema_url = urlparse.urljoin('file:', pathname2url(os.path.abspath(schema_path))) handlers = build_http_handlers(None) # don't need http_client for file: file_handler = handlers['file'] spec_dict = file_handler(schema_url) return Spec.from_dict( spec_dict, config=create_bravado_core_config(settings), origin_url=schema_url)
[ "def", "get_swagger_spec", "(", "settings", ")", ":", "schema_dir", "=", "settings", ".", "get", "(", "'pyramid_swagger.schema_directory'", ",", "'api_docs/'", ")", "schema_filename", "=", "settings", ".", "get", "(", "'pyramid_swagger.schema_file'", ",", "'swagger.js...
Return a :class:`bravado_core.spec.Spec` constructed from the swagger specs in `pyramid_swagger.schema_directory`. If `pyramid_swagger.enable_swagger_spec_validation` is enabled the schema will be validated before returning it. :param settings: a pyramid registry settings with configuration for building a swagger schema :type settings: dict :rtype: :class:`bravado_core.spec.Spec`
[ "Return", "a", ":", "class", ":", "bravado_core", ".", "spec", ".", "Spec", "constructed", "from", "the", "swagger", "specs", "in", "pyramid_swagger", ".", "schema_directory", ".", "If", "pyramid_swagger", ".", "enable_swagger_spec_validation", "is", "enabled", "t...
python
train
timstaley/voevent-parse
src/voeventparse/misc.py
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/misc.py#L95-L112
def Group(params, name=None, type=None): """Groups together Params for adding under the 'What' section. Args: params(list of :func:`Param`): Parameter elements to go in this group. name(str): Group name. NB ``None`` is valid, since the group may be best identified by its type. type(str): Type of group, e.g. 'complex' (for real and imaginary). """ atts = {} if name: atts['name'] = name if type: atts['type'] = type g = objectify.Element('Group', attrib=atts) for p in params: g.append(p) return g
[ "def", "Group", "(", "params", ",", "name", "=", "None", ",", "type", "=", "None", ")", ":", "atts", "=", "{", "}", "if", "name", ":", "atts", "[", "'name'", "]", "=", "name", "if", "type", ":", "atts", "[", "'type'", "]", "=", "type", "g", "...
Groups together Params for adding under the 'What' section. Args: params(list of :func:`Param`): Parameter elements to go in this group. name(str): Group name. NB ``None`` is valid, since the group may be best identified by its type. type(str): Type of group, e.g. 'complex' (for real and imaginary).
[ "Groups", "together", "Params", "for", "adding", "under", "the", "What", "section", "." ]
python
train
hugapi/hug
hug/middleware.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/middleware.py#L122-L134
def match_route(self, reqpath): """Match a request with parameter to it's corresponding route""" route_dicts = [routes for _, routes in self.api.http.routes.items()][0] routes = [route for route, _ in route_dicts.items()] if reqpath not in routes: for route in routes: # replace params in route with regex reqpath = re.sub('^(/v\d*/?)', '/', reqpath) base_url = getattr(self.api.http, 'base_url', '') reqpath = reqpath.replace(base_url, '', 1) if base_url else reqpath if re.match(re.sub(r'/{[^{}]+}', r'/[\\w-]+', route) + '$', reqpath): return route return reqpath
[ "def", "match_route", "(", "self", ",", "reqpath", ")", ":", "route_dicts", "=", "[", "routes", "for", "_", ",", "routes", "in", "self", ".", "api", ".", "http", ".", "routes", ".", "items", "(", ")", "]", "[", "0", "]", "routes", "=", "[", "rout...
Match a request with parameter to it's corresponding route
[ "Match", "a", "request", "with", "parameter", "to", "it", "s", "corresponding", "route" ]
python
train
gwastro/pycbc
pycbc/events/stat.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/events/stat.py#L261-L297
def logsignalrate(self, s0, s1, slide, step): """Calculate the normalized log rate density of signals via lookup""" td = numpy.array(s0['end_time'] - s1['end_time'] - slide*step, ndmin=1) pd = numpy.array((s0['coa_phase'] - s1['coa_phase']) % \ (2. * numpy.pi), ndmin=1) rd = numpy.array((s0['sigmasq'] / s1['sigmasq']) ** 0.5, ndmin=1) sn0 = numpy.array(s0['snr'], ndmin=1) sn1 = numpy.array(s1['snr'], ndmin=1) snr0 = sn0 * 1 snr1 = sn1 * 1 snr0[rd > 1] = sn1[rd > 1] snr1[rd > 1] = sn0[rd > 1] rd[rd > 1] = 1. / rd[rd > 1] # Find which bin each coinc falls into tv = numpy.searchsorted(self.tbins, td) - 1 pv = numpy.searchsorted(self.pbins, pd) - 1 s0v = numpy.searchsorted(self.sbins, snr0) - 1 s1v = numpy.searchsorted(self.sbins, snr1) - 1 rv = numpy.searchsorted(self.rbins, rd) - 1 # Enforce that points fits into the bin boundaries: if a point lies # outside the boundaries it is pushed back to the nearest bin. tv[tv < 0] = 0 tv[tv >= len(self.tbins) - 1] = len(self.tbins) - 2 pv[pv < 0] = 0 pv[pv >= len(self.pbins) - 1] = len(self.pbins) - 2 s0v[s0v < 0] = 0 s0v[s0v >= len(self.sbins) - 1] = len(self.sbins) - 2 s1v[s1v < 0] = 0 s1v[s1v >= len(self.sbins) - 1] = len(self.sbins) - 2 rv[rv < 0] = 0 rv[rv >= len(self.rbins) - 1] = len(self.rbins) - 2 return self.hist[tv, pv, s0v, s1v, rv]
[ "def", "logsignalrate", "(", "self", ",", "s0", ",", "s1", ",", "slide", ",", "step", ")", ":", "td", "=", "numpy", ".", "array", "(", "s0", "[", "'end_time'", "]", "-", "s1", "[", "'end_time'", "]", "-", "slide", "*", "step", ",", "ndmin", "=", ...
Calculate the normalized log rate density of signals via lookup
[ "Calculate", "the", "normalized", "log", "rate", "density", "of", "signals", "via", "lookup" ]
python
train
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L203-L208
def p_statement_foreach(p): 'statement : FOREACH LPAREN expr AS foreach_variable foreach_optional_arg RPAREN foreach_statement' if p[6] is None: p[0] = ast.Foreach(p[3], None, p[5], p[8], lineno=p.lineno(1)) else: p[0] = ast.Foreach(p[3], p[5], p[6], p[8], lineno=p.lineno(1))
[ "def", "p_statement_foreach", "(", "p", ")", ":", "if", "p", "[", "6", "]", "is", "None", ":", "p", "[", "0", "]", "=", "ast", ".", "Foreach", "(", "p", "[", "3", "]", ",", "None", ",", "p", "[", "5", "]", ",", "p", "[", "8", "]", ",", ...
statement : FOREACH LPAREN expr AS foreach_variable foreach_optional_arg RPAREN foreach_statement
[ "statement", ":", "FOREACH", "LPAREN", "expr", "AS", "foreach_variable", "foreach_optional_arg", "RPAREN", "foreach_statement" ]
python
train
senaite/senaite.core
bika/lims/browser/analysisrequest/add2.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analysisrequest/add2.py#L1522-L1673
def ajax_submit(self): """Submit & create the ARs """ # Get AR required fields (including extended fields) fields = self.get_ar_fields() # extract records from request records = self.get_records() fielderrors = {} errors = {"message": "", "fielderrors": {}} attachments = {} valid_records = [] # Validate required fields for n, record in enumerate(records): # Process UID fields first and set their values to the linked field uid_fields = filter(lambda f: f.endswith("_uid"), record) for field in uid_fields: name = field.replace("_uid", "") value = record.get(field) if "," in value: value = value.split(",") record[name] = value # Extract file uploads (fields ending with _file) # These files will be added later as attachments file_fields = filter(lambda f: f.endswith("_file"), record) attachments[n] = map(lambda f: record.pop(f), file_fields) # Process Specifications field (dictionary like records instance). # -> Convert to a standard Python dictionary. specifications = map( lambda x: dict(x), record.pop("Specifications", [])) record["Specifications"] = specifications # Required fields and their values required_keys = [field.getName() for field in fields if field.required] required_values = [record.get(key) for key in required_keys] required_fields = dict(zip(required_keys, required_values)) # Client field is required but hidden in the AR Add form. We remove # it therefore from the list of required fields to let empty # columns pass the required check below. if record.get("Client", False): required_fields.pop('Client', None) # Contacts get pre-filled out if only one contact exists. # We won't force those columns with only the Contact filled out to # be required. contact = required_fields.pop("Contact", None) # None of the required fields are filled, skip this record if not any(required_fields.values()): continue # Re-add the Contact required_fields["Contact"] = contact # Missing required fields missing = [f for f in required_fields if not record.get(f, None)] # If there are required fields missing, flag an error for field in missing: fieldname = "{}-{}".format(field, n) msg = _("Field '{}' is required".format(field)) fielderrors[fieldname] = msg # Process valid record valid_record = dict() for fieldname, fieldvalue in record.iteritems(): # clean empty if fieldvalue in ['', None]: continue valid_record[fieldname] = fieldvalue # append the valid record to the list of valid records valid_records.append(valid_record) # return immediately with an error response if some field checks failed if fielderrors: errors["fielderrors"] = fielderrors return {'errors': errors} # Process Form actions = ActionHandlerPool.get_instance() actions.queue_pool() ARs = OrderedDict() for n, record in enumerate(valid_records): client_uid = record.get("Client") client = self.get_object_by_uid(client_uid) if not client: actions.resume() raise RuntimeError("No client found") # get the specifications and pass them to the AR create function. specifications = record.pop("Specifications", {}) # Create the Analysis Request try: ar = crar( client, self.request, record, specifications=specifications ) except (KeyError, RuntimeError) as e: actions.resume() errors["message"] = e.message return {"errors": errors} # We keep the title to check if AR is newly created # and UID to print stickers ARs[ar.Title()] = ar.UID() for attachment in attachments.get(n, []): if not attachment.filename: continue att = _createObjectByType("Attachment", client, tmpID()) att.setAttachmentFile(attachment) att.processForm() ar.addAttachment(att) actions.resume() level = "info" if len(ARs) == 0: message = _('No Samples could be created.') level = "error" elif len(ARs) > 1: message = _('Samples ${ARs} were successfully created.', mapping={'ARs': safe_unicode(', '.join(ARs.keys()))}) else: message = _('Sample ${AR} was successfully created.', mapping={'AR': safe_unicode(ARs.keys()[0])}) # Display a portal message self.context.plone_utils.addPortalMessage(message, level) # Automatic label printing bika_setup = api.get_bika_setup() auto_print = bika_setup.getAutoPrintStickers() if 'register' in auto_print and ARs: return { 'success': message, 'stickers': ARs.values(), 'stickertemplate': bika_setup.getAutoStickerTemplate() } else: return {'success': message}
[ "def", "ajax_submit", "(", "self", ")", ":", "# Get AR required fields (including extended fields)", "fields", "=", "self", ".", "get_ar_fields", "(", ")", "# extract records from request", "records", "=", "self", ".", "get_records", "(", ")", "fielderrors", "=", "{",...
Submit & create the ARs
[ "Submit", "&", "create", "the", "ARs" ]
python
train
ggaughan/pipe2py
pipe2py/modules/pipestrreplace.py
https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipestrreplace.py#L38-L62
def asyncPipeStrreplace(context=None, _INPUT=None, conf=None, **kwargs): """A string module that asynchronously replaces text. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : twisted Deferred iterable of items or strings conf : { 'RULE': [ { 'param': {'value': <match type: 1=first, 2=last, 3=every>}, 'find': {'value': <text to find>}, 'replace': {'value': <replacement>} } ] } Returns ------- _OUTPUT : twisted.internet.defer.Deferred generator of replaced strings """ splits = yield asyncGetSplits(_INPUT, conf['RULE'], **kwargs) parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs()) _OUTPUT = yield asyncStarMap(asyncParseResult, parsed) returnValue(iter(_OUTPUT))
[ "def", "asyncPipeStrreplace", "(", "context", "=", "None", ",", "_INPUT", "=", "None", ",", "conf", "=", "None", ",", "*", "*", "kwargs", ")", ":", "splits", "=", "yield", "asyncGetSplits", "(", "_INPUT", ",", "conf", "[", "'RULE'", "]", ",", "*", "*...
A string module that asynchronously replaces text. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : twisted Deferred iterable of items or strings conf : { 'RULE': [ { 'param': {'value': <match type: 1=first, 2=last, 3=every>}, 'find': {'value': <text to find>}, 'replace': {'value': <replacement>} } ] } Returns ------- _OUTPUT : twisted.internet.defer.Deferred generator of replaced strings
[ "A", "string", "module", "that", "asynchronously", "replaces", "text", ".", "Loopable", "." ]
python
train
allenai/allennlp
allennlp/nn/util.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L231-L269
def masked_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1, memory_efficient: bool = False, mask_fill_value: float = -1e32) -> torch.Tensor: """ ``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be masked. This performs a softmax on just the non-masked portions of ``vector``. Passing ``None`` in for the mask is also acceptable; you'll just get a regular softmax. ``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask, do it yourself before passing the mask into this function. If ``memory_efficient`` is set to true, we will simply use a very large negative number for those masked positions so that the probabilities of those positions would be approximately 0. This is not accurate in math, but works for most cases and consumes less memory. In the case that the input vector is completely masked and ``memory_efficient`` is false, this function returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model that uses categorical cross-entropy loss. Instead, if ``memory_efficient`` is true, this function will treat every element as equal, and do softmax over equal numbers. """ if mask is None: result = torch.nn.functional.softmax(vector, dim=dim) else: mask = mask.float() while mask.dim() < vector.dim(): mask = mask.unsqueeze(1) if not memory_efficient: # To limit numerical errors from large vector elements outside the mask, we zero these out. result = torch.nn.functional.softmax(vector * mask, dim=dim) result = result * mask result = result / (result.sum(dim=dim, keepdim=True) + 1e-13) else: masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value) result = torch.nn.functional.softmax(masked_vector, dim=dim) return result
[ "def", "masked_softmax", "(", "vector", ":", "torch", ".", "Tensor", ",", "mask", ":", "torch", ".", "Tensor", ",", "dim", ":", "int", "=", "-", "1", ",", "memory_efficient", ":", "bool", "=", "False", ",", "mask_fill_value", ":", "float", "=", "-", ...
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be masked. This performs a softmax on just the non-masked portions of ``vector``. Passing ``None`` in for the mask is also acceptable; you'll just get a regular softmax. ``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask, do it yourself before passing the mask into this function. If ``memory_efficient`` is set to true, we will simply use a very large negative number for those masked positions so that the probabilities of those positions would be approximately 0. This is not accurate in math, but works for most cases and consumes less memory. In the case that the input vector is completely masked and ``memory_efficient`` is false, this function returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model that uses categorical cross-entropy loss. Instead, if ``memory_efficient`` is true, this function will treat every element as equal, and do softmax over equal numbers.
[ "torch", ".", "nn", ".", "functional", ".", "softmax", "(", "vector", ")", "does", "not", "work", "if", "some", "elements", "of", "vector", "should", "be", "masked", ".", "This", "performs", "a", "softmax", "on", "just", "the", "non", "-", "masked", "p...
python
train
dmlc/gluon-nlp
scripts/machine_translation/dataprocessor.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/machine_translation/dataprocessor.py#L206-L265
def make_dataloader(data_train, data_val, data_test, args, use_average_length=False, num_shards=0, num_workers=8): """Create data loaders for training/validation/test.""" data_train_lengths = get_data_lengths(data_train) data_val_lengths = get_data_lengths(data_val) data_test_lengths = get_data_lengths(data_test) train_batchify_fn = btf.Tuple(btf.Pad(), btf.Pad(), btf.Stack(dtype='float32'), btf.Stack(dtype='float32')) test_batchify_fn = btf.Tuple(btf.Pad(), btf.Pad(), btf.Stack(dtype='float32'), btf.Stack(dtype='float32'), btf.Stack()) target_val_lengths = list(map(lambda x: x[-1], data_val_lengths)) target_test_lengths = list(map(lambda x: x[-1], data_test_lengths)) if args.bucket_scheme == 'constant': bucket_scheme = nlp.data.ConstWidthBucket() elif args.bucket_scheme == 'linear': bucket_scheme = nlp.data.LinearWidthBucket() elif args.bucket_scheme == 'exp': bucket_scheme = nlp.data.ExpWidthBucket(bucket_len_step=1.2) else: raise NotImplementedError train_batch_sampler = nlp.data.FixedBucketSampler(lengths=data_train_lengths, batch_size=args.batch_size, num_buckets=args.num_buckets, ratio=args.bucket_ratio, shuffle=True, use_average_length=use_average_length, num_shards=num_shards, bucket_scheme=bucket_scheme) logging.info('Train Batch Sampler:\n%s', train_batch_sampler.stats()) train_data_loader = nlp.data.ShardedDataLoader(data_train, batch_sampler=train_batch_sampler, batchify_fn=train_batchify_fn, num_workers=num_workers) val_batch_sampler = nlp.data.FixedBucketSampler(lengths=target_val_lengths, batch_size=args.test_batch_size, num_buckets=args.num_buckets, ratio=args.bucket_ratio, shuffle=False, use_average_length=use_average_length, bucket_scheme=bucket_scheme) logging.info('Valid Batch Sampler:\n%s', val_batch_sampler.stats()) val_data_loader = gluon.data.DataLoader(data_val, batch_sampler=val_batch_sampler, batchify_fn=test_batchify_fn, num_workers=num_workers) test_batch_sampler = nlp.data.FixedBucketSampler(lengths=target_test_lengths, batch_size=args.test_batch_size, num_buckets=args.num_buckets, ratio=args.bucket_ratio, shuffle=False, use_average_length=use_average_length, bucket_scheme=bucket_scheme) logging.info('Test Batch Sampler:\n%s', test_batch_sampler.stats()) test_data_loader = gluon.data.DataLoader(data_test, batch_sampler=test_batch_sampler, batchify_fn=test_batchify_fn, num_workers=num_workers) return train_data_loader, val_data_loader, test_data_loader
[ "def", "make_dataloader", "(", "data_train", ",", "data_val", ",", "data_test", ",", "args", ",", "use_average_length", "=", "False", ",", "num_shards", "=", "0", ",", "num_workers", "=", "8", ")", ":", "data_train_lengths", "=", "get_data_lengths", "(", "data...
Create data loaders for training/validation/test.
[ "Create", "data", "loaders", "for", "training", "/", "validation", "/", "test", "." ]
python
train
bokeh/bokeh
bokeh/embed/server.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/embed/server.py#L271-L287
def _process_arguments(arguments): ''' Return user-supplied HTML arguments to add to a Bokeh server URL. Args: arguments (dict[str, object]) : Key/value pairs to add to the URL Returns: str ''' if arguments is None: return "" result = "" for key, value in arguments.items(): if not key.startswith("bokeh-"): result += "&{}={}".format(quote_plus(str(key)), quote_plus(str(value))) return result
[ "def", "_process_arguments", "(", "arguments", ")", ":", "if", "arguments", "is", "None", ":", "return", "\"\"", "result", "=", "\"\"", "for", "key", ",", "value", "in", "arguments", ".", "items", "(", ")", ":", "if", "not", "key", ".", "startswith", "...
Return user-supplied HTML arguments to add to a Bokeh server URL. Args: arguments (dict[str, object]) : Key/value pairs to add to the URL Returns: str
[ "Return", "user", "-", "supplied", "HTML", "arguments", "to", "add", "to", "a", "Bokeh", "server", "URL", "." ]
python
train
yunojuno-archive/django-package-monitor
package_monitor/management/commands/refresh_packages.py
https://github.com/yunojuno-archive/django-package-monitor/blob/534aa35ccfe187d2c55aeca0cb52b8278254e437/package_monitor/management/commands/refresh_packages.py#L28-L35
def local(): """Load local requirements file.""" logger.info("Loading requirements from local file.") with open(REQUIREMENTS_FILE, 'r') as f: requirements = parse(f) for r in requirements: logger.debug("Creating new package: %r", r) create_package_version(r)
[ "def", "local", "(", ")", ":", "logger", ".", "info", "(", "\"Loading requirements from local file.\"", ")", "with", "open", "(", "REQUIREMENTS_FILE", ",", "'r'", ")", "as", "f", ":", "requirements", "=", "parse", "(", "f", ")", "for", "r", "in", "requirem...
Load local requirements file.
[ "Load", "local", "requirements", "file", "." ]
python
train
xtuml/pyxtuml
xtuml/load.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/load.py#L285-L296
def _populate_matching_class(metamodel, kind, names, values): ''' Populate a *metamodel* with a class that matches the given *insert statement*. ''' attributes = list() for name, value in zip(names, values): ty = guess_type_name(value) attr = (name, ty) attributes.append(attr) return metamodel.define_class(kind, attributes)
[ "def", "_populate_matching_class", "(", "metamodel", ",", "kind", ",", "names", ",", "values", ")", ":", "attributes", "=", "list", "(", ")", "for", "name", ",", "value", "in", "zip", "(", "names", ",", "values", ")", ":", "ty", "=", "guess_type_name", ...
Populate a *metamodel* with a class that matches the given *insert statement*.
[ "Populate", "a", "*", "metamodel", "*", "with", "a", "class", "that", "matches", "the", "given", "*", "insert", "statement", "*", "." ]
python
test
Varkal/chuda
chuda/shell.py
https://github.com/Varkal/chuda/blob/0d93b716dede35231c21be97bcc19a656655983f/chuda/shell.py#L159-L172
def kill(self): """ Kill the current non blocking command Raises: TypeError: If command is blocking """ if self.block: raise TypeError(NON_BLOCKING_ERROR_MESSAGE) try: self.process.kill() except ProcessLookupError as exc: self.logger.debug(exc)
[ "def", "kill", "(", "self", ")", ":", "if", "self", ".", "block", ":", "raise", "TypeError", "(", "NON_BLOCKING_ERROR_MESSAGE", ")", "try", ":", "self", ".", "process", ".", "kill", "(", ")", "except", "ProcessLookupError", "as", "exc", ":", "self", ".",...
Kill the current non blocking command Raises: TypeError: If command is blocking
[ "Kill", "the", "current", "non", "blocking", "command" ]
python
train
tkf/rash
rash/interactive_search.py
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/interactive_search.py#L31-L49
def strip_glob(string, split_str=' '): """ Strip glob portion in `string`. >>> strip_glob('*glob*like') 'glob like' >>> strip_glob('glob?') 'glo' >>> strip_glob('glob[seq]') 'glob' >>> strip_glob('glob[!seq]') 'glob' :type string: str :rtype: str """ string = _GLOB_PORTION_RE.sub(split_str, string) return string.strip()
[ "def", "strip_glob", "(", "string", ",", "split_str", "=", "' '", ")", ":", "string", "=", "_GLOB_PORTION_RE", ".", "sub", "(", "split_str", ",", "string", ")", "return", "string", ".", "strip", "(", ")" ]
Strip glob portion in `string`. >>> strip_glob('*glob*like') 'glob like' >>> strip_glob('glob?') 'glo' >>> strip_glob('glob[seq]') 'glob' >>> strip_glob('glob[!seq]') 'glob' :type string: str :rtype: str
[ "Strip", "glob", "portion", "in", "string", "." ]
python
train
pipermerriam/flex
flex/validation/request.py
https://github.com/pipermerriam/flex/blob/233f8149fb851a6255753bcec948cb6fefb2723b/flex/validation/request.py#L13-L65
def validate_request(request, schema): """ Request validation does the following steps. 1. validate that the path matches one of the defined paths in the schema. 2. validate that the request method conforms to a supported methods for the given path. 3. validate that the request parameters conform to the parameter definitions for the operation definition. """ with ErrorDict() as errors: # 1 try: api_path = validate_path_to_api_path( path=request.path, context=schema, **schema ) except ValidationError as err: errors['path'].add_error(err.detail) return # this causes an exception to be raised since errors is no longer falsy. path_definition = schema['paths'][api_path] or {} if not path_definition: # TODO: is it valid to not have a definition for a path? return # 2 try: operation_definition = validate_request_method_to_operation( request_method=request.method, path_definition=path_definition, ) except ValidationError as err: errors['method'].add_error(err.detail) return if operation_definition is None: # TODO: is this compliant with swagger, can path operations have a null # definition? return # 3 operation_validators = construct_operation_validators( api_path=api_path, path_definition=path_definition, operation_definition=operation_definition, context=schema, ) try: validate_operation(request, operation_validators, context=schema) except ValidationError as err: errors['method'].add_error(err.detail)
[ "def", "validate_request", "(", "request", ",", "schema", ")", ":", "with", "ErrorDict", "(", ")", "as", "errors", ":", "# 1", "try", ":", "api_path", "=", "validate_path_to_api_path", "(", "path", "=", "request", ".", "path", ",", "context", "=", "schema"...
Request validation does the following steps. 1. validate that the path matches one of the defined paths in the schema. 2. validate that the request method conforms to a supported methods for the given path. 3. validate that the request parameters conform to the parameter definitions for the operation definition.
[ "Request", "validation", "does", "the", "following", "steps", "." ]
python
train
senaite/senaite.core
bika/lims/workflow/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/workflow/__init__.py#L228-L239
def isTransitionAllowed(instance, transition_id): """Checks if the object can perform the transition passed in. :returns: True if transition can be performed :rtype: bool """ wf_tool = getToolByName(instance, "portal_workflow") for wf_id in wf_tool.getChainFor(instance): wf = wf_tool.getWorkflowById(wf_id) if wf and wf.isActionSupported(instance, transition_id): return True return False
[ "def", "isTransitionAllowed", "(", "instance", ",", "transition_id", ")", ":", "wf_tool", "=", "getToolByName", "(", "instance", ",", "\"portal_workflow\"", ")", "for", "wf_id", "in", "wf_tool", ".", "getChainFor", "(", "instance", ")", ":", "wf", "=", "wf_too...
Checks if the object can perform the transition passed in. :returns: True if transition can be performed :rtype: bool
[ "Checks", "if", "the", "object", "can", "perform", "the", "transition", "passed", "in", ".", ":", "returns", ":", "True", "if", "transition", "can", "be", "performed", ":", "rtype", ":", "bool" ]
python
train
fermiPy/fermipy
fermipy/version.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/version.py#L62-L74
def render_pep440(vcs): """Convert git release tag into a form that is PEP440 compliant.""" if vcs is None: return None tags = vcs.split('-') # Bare version number if len(tags) == 1: return tags[0] else: return tags[0] + '+' + '.'.join(tags[1:])
[ "def", "render_pep440", "(", "vcs", ")", ":", "if", "vcs", "is", "None", ":", "return", "None", "tags", "=", "vcs", ".", "split", "(", "'-'", ")", "# Bare version number", "if", "len", "(", "tags", ")", "==", "1", ":", "return", "tags", "[", "0", "...
Convert git release tag into a form that is PEP440 compliant.
[ "Convert", "git", "release", "tag", "into", "a", "form", "that", "is", "PEP440", "compliant", "." ]
python
train
shendo/websnort
websnort/web.py
https://github.com/shendo/websnort/blob/19495e8834a111e889ba28efad8cd90cf55eb661/websnort/web.py#L126-L138
def main(): """ Main entrypoint for command-line webserver. """ parser = argparse.ArgumentParser() parser.add_argument("-H", "--host", help="Web server Host address to bind to", default="0.0.0.0", action="store", required=False) parser.add_argument("-p", "--port", help="Web server Port to bind to", default=8080, action="store", required=False) args = parser.parse_args() logging.basicConfig() run(host=args.host, port=args.port, reloader=True, server=SERVER)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "\"-H\"", ",", "\"--host\"", ",", "help", "=", "\"Web server Host address to bind to\"", ",", "default", "=", "\"0.0.0.0\"", ",", "act...
Main entrypoint for command-line webserver.
[ "Main", "entrypoint", "for", "command", "-", "line", "webserver", "." ]
python
train
KrishnaswamyLab/graphtools
graphtools/graphs.py
https://github.com/KrishnaswamyLab/graphtools/blob/44685352be7df2005d44722903092207967457f2/graphtools/graphs.py#L673-L700
def interpolate(self, transform, transitions=None, Y=None): """Interpolate new data onto a transformation of the graph data One of either transitions or Y should be provided Parameters ---------- transform : array-like, shape=[n_samples, n_transform_features] transitions : array-like, optional, shape=[n_samples_y, n_samples] Transition matrix from `Y` (not provided) to `self.data` Y: array-like, optional, shape=[n_samples_y, n_features] new data for which an affinity matrix is calculated to the existing data. `n_features` must match either the ambient or PCA dimensions Returns ------- Y_transform : array-like, [n_samples_y, n_features or n_pca] Transition matrix from `Y` to `self.data` """ if transitions is None and Y is None: # assume Y is self.data and use standard landmark transitions transitions = self.transitions return super().interpolate(transform, transitions=transitions, Y=Y)
[ "def", "interpolate", "(", "self", ",", "transform", ",", "transitions", "=", "None", ",", "Y", "=", "None", ")", ":", "if", "transitions", "is", "None", "and", "Y", "is", "None", ":", "# assume Y is self.data and use standard landmark transitions", "transitions",...
Interpolate new data onto a transformation of the graph data One of either transitions or Y should be provided Parameters ---------- transform : array-like, shape=[n_samples, n_transform_features] transitions : array-like, optional, shape=[n_samples_y, n_samples] Transition matrix from `Y` (not provided) to `self.data` Y: array-like, optional, shape=[n_samples_y, n_features] new data for which an affinity matrix is calculated to the existing data. `n_features` must match either the ambient or PCA dimensions Returns ------- Y_transform : array-like, [n_samples_y, n_features or n_pca] Transition matrix from `Y` to `self.data`
[ "Interpolate", "new", "data", "onto", "a", "transformation", "of", "the", "graph", "data" ]
python
train
AguaClara/aguaclara
aguaclara/design/sed_tank.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/design/sed_tank.py#L341-L358
def w_diffuser_inner(sed_inputs=sed_dict): """Return the inner width of each diffuser in the sedimentation tank. Parameters ---------- sed_inputs : dict A dictionary of all of the constant inputs needed for sedimentation tank calculations can be found in sed.yaml Returns ------- float Inner width of each diffuser in the sedimentation tank Examples -------- >>> from aide_design.play import* >>> """ return ut.ceil_nearest(w_diffuser_inner_min(sed_inputs).magnitude, (np.arange(1/16,1/4,1/16)*u.inch).magnitude)
[ "def", "w_diffuser_inner", "(", "sed_inputs", "=", "sed_dict", ")", ":", "return", "ut", ".", "ceil_nearest", "(", "w_diffuser_inner_min", "(", "sed_inputs", ")", ".", "magnitude", ",", "(", "np", ".", "arange", "(", "1", "/", "16", ",", "1", "/", "4", ...
Return the inner width of each diffuser in the sedimentation tank. Parameters ---------- sed_inputs : dict A dictionary of all of the constant inputs needed for sedimentation tank calculations can be found in sed.yaml Returns ------- float Inner width of each diffuser in the sedimentation tank Examples -------- >>> from aide_design.play import* >>>
[ "Return", "the", "inner", "width", "of", "each", "diffuser", "in", "the", "sedimentation", "tank", ".", "Parameters", "----------", "sed_inputs", ":", "dict", "A", "dictionary", "of", "all", "of", "the", "constant", "inputs", "needed", "for", "sedimentation", ...
python
train
recurly/recurly-client-python
recurly/resource.py
https://github.com/recurly/recurly-client-python/blob/682217c4e85ec5c8d4e41519ee0620d2dc4d84d7/recurly/resource.py#L687-L692
def delete(self): """Submits a deletion request for this `Resource` instance as a ``DELETE`` request to its URL.""" response = self.http_request(self._url, 'DELETE') if response.status != 204: self.raise_http_error(response)
[ "def", "delete", "(", "self", ")", ":", "response", "=", "self", ".", "http_request", "(", "self", ".", "_url", ",", "'DELETE'", ")", "if", "response", ".", "status", "!=", "204", ":", "self", ".", "raise_http_error", "(", "response", ")" ]
Submits a deletion request for this `Resource` instance as a ``DELETE`` request to its URL.
[ "Submits", "a", "deletion", "request", "for", "this", "Resource", "instance", "as", "a", "DELETE", "request", "to", "its", "URL", "." ]
python
train
tanghaibao/goatools
goatools/gosubdag/go_edges.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/go_edges.py#L69-L76
def _rm_gos_edges_rel(self, rm_goids, edges_rel): """Remove any relationship that contain user-specified edges.""" edges_ret = {} for rname, edges_cur in edges_rel.items(): edges_new = self._rm_gos_edges(rm_goids, edges_cur) if edges_new: edges_ret[rname] = edges_new return edges_ret
[ "def", "_rm_gos_edges_rel", "(", "self", ",", "rm_goids", ",", "edges_rel", ")", ":", "edges_ret", "=", "{", "}", "for", "rname", ",", "edges_cur", "in", "edges_rel", ".", "items", "(", ")", ":", "edges_new", "=", "self", ".", "_rm_gos_edges", "(", "rm_g...
Remove any relationship that contain user-specified edges.
[ "Remove", "any", "relationship", "that", "contain", "user", "-", "specified", "edges", "." ]
python
train
saltstack/salt
salt/modules/boto_iam.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_iam.py#L870-L891
def delete_virtual_mfa_device(serial, region=None, key=None, keyid=None, profile=None): ''' Deletes the specified virtual MFA device. CLI Example: .. code-block:: bash salt myminion boto_iam.delete_virtual_mfa_device serial_num ''' conn = __utils__['boto3.get_connection_func']('iam')() try: conn.delete_virtual_mfa_device(SerialNumber=serial) log.info('Deleted virtual MFA device %s.', serial) return True except botocore.exceptions.ClientError as e: log.debug(e) if 'NoSuchEntity' in six.text_type(e): log.info('Virtual MFA device %s not found.', serial) return True log.error('Failed to delete virtual MFA device %s.', serial) return False
[ "def", "delete_virtual_mfa_device", "(", "serial", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "__utils__", "[", "'boto3.get_connection_func'", "]", "(", "'iam'", ")", ...
Deletes the specified virtual MFA device. CLI Example: .. code-block:: bash salt myminion boto_iam.delete_virtual_mfa_device serial_num
[ "Deletes", "the", "specified", "virtual", "MFA", "device", "." ]
python
train
cirruscluster/cirruscluster
cirruscluster/ext/ansible/callbacks.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/callbacks.py#L81-L99
def compute(self, runner_results, setup=False, poll=False, ignore_errors=False): ''' walk through all results and increment stats ''' for (host, value) in runner_results.get('contacted', {}).iteritems(): if not ignore_errors and (('failed' in value and bool(value['failed'])) or ('rc' in value and value['rc'] != 0)): self._increment('failures', host) elif 'skipped' in value and bool(value['skipped']): self._increment('skipped', host) elif 'changed' in value and bool(value['changed']): if not setup and not poll: self._increment('changed', host) self._increment('ok', host) else: if not poll or ('finished' in value and bool(value['finished'])): self._increment('ok', host) for (host, value) in runner_results.get('dark', {}).iteritems(): self._increment('dark', host)
[ "def", "compute", "(", "self", ",", "runner_results", ",", "setup", "=", "False", ",", "poll", "=", "False", ",", "ignore_errors", "=", "False", ")", ":", "for", "(", "host", ",", "value", ")", "in", "runner_results", ".", "get", "(", "'contacted'", ",...
walk through all results and increment stats
[ "walk", "through", "all", "results", "and", "increment", "stats" ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/oneoffs/eval_sgf_to_cbt.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/oneoffs/eval_sgf_to_cbt.py#L179-L233
def write_eval_records(bt_table, game_data, last_game): """Write all eval_records to eval_table In addition to writing new rows table_state must be updated in row `table_state` columns `metadata:eval_game_counter` Args: bt_table: bigtable table to add rows to. game_data: metadata pairs (column name, value) for each eval record. last_game: last_game in metadata:table_state """ eval_num = last_game # Each column counts as a mutation so max rows is ~10000 GAMES_PER_COMMIT = 2000 for games in grouper(tqdm(game_data), GAMES_PER_COMMIT): assert bt_table.read_row(EVAL_PREFIX.format(eval_num)), "Prev row doesn't exists" assert bt_table.read_row(EVAL_PREFIX.format(eval_num+1)) is None, "Row already exists" rows = [] for i, metadata in enumerate(games): eval_num += 1 row_name = EVAL_PREFIX.format(eval_num) row = bt_table.row(row_name) for column, value in metadata: row.set_cell(METADATA, column, value) rows.append(row) # For each batch of games print a couple of the rows being added. if i < 5 or i + 5 > len(games): print("\t", i, row_name, metadata[6][1]) if eval_num == last_game + len(games): test = input("Commit ('y'/'yes' required): ") if test.lower() not in ('y', 'yes'): break # TODO(derek): Figure out how to condition on atomic counter update. # Condition all updates on the current value of last_game game_num_update = bt_table.row(TABLE_STATE) game_num_update.set_cell(METADATA, EVAL_GAME_COUNTER, eval_num) print(TABLE_STATE, eval_num) response = bt_table.mutate_rows(rows) # validate that all rows written successfully any_bad = False for i, status in enumerate(response): if status.code is not 0: print("Row number {} failed to write {}".format(i, status)) any_bad = True if any_bad: break game_num_update.commit()
[ "def", "write_eval_records", "(", "bt_table", ",", "game_data", ",", "last_game", ")", ":", "eval_num", "=", "last_game", "# Each column counts as a mutation so max rows is ~10000", "GAMES_PER_COMMIT", "=", "2000", "for", "games", "in", "grouper", "(", "tqdm", "(", "g...
Write all eval_records to eval_table In addition to writing new rows table_state must be updated in row `table_state` columns `metadata:eval_game_counter` Args: bt_table: bigtable table to add rows to. game_data: metadata pairs (column name, value) for each eval record. last_game: last_game in metadata:table_state
[ "Write", "all", "eval_records", "to", "eval_table" ]
python
train
phaethon/kamene
kamene/contrib/gsm_um.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L1664-L1686
def connectNetToMs(Facility_presence=0, ProgressIndicator_presence=0, ConnectedNumber_presence=0, ConnectedSubaddress_presence=0, UserUser_presence=0): """CONNECT Section 9.3.5.1""" a = TpPd(pd=0x3) b = MessageType(mesType=0x7) # 00000111 packet = a / b if Facility_presence is 1: c = FacilityHdr(ieiF=0x1C, eightBitF=0x0) packet = packet / c if ProgressIndicator_presence is 1: d = ProgressIndicatorHdr(ieiPI=0x1E, eightBitPI=0x0) packet = packet / d if ConnectedNumber_presence is 1: e = ConnectedNumberHdr(ieiCN=0x4C, eightBitCN=0x0) packet = packet / e if ConnectedSubaddress_presence is 1: f = ConnectedSubaddressHdr(ieiCS=0x4D, eightBitCS=0x0) packet = packet / f if UserUser_presence is 1: g = UserUserHdr(ieiUU=0x7F, eightBitUU=0x0) packet = packet / g return packet
[ "def", "connectNetToMs", "(", "Facility_presence", "=", "0", ",", "ProgressIndicator_presence", "=", "0", ",", "ConnectedNumber_presence", "=", "0", ",", "ConnectedSubaddress_presence", "=", "0", ",", "UserUser_presence", "=", "0", ")", ":", "a", "=", "TpPd", "(...
CONNECT Section 9.3.5.1
[ "CONNECT", "Section", "9", ".", "3", ".", "5", ".", "1" ]
python
train
rdussurget/py-altimetry
altimetry/data/alti_data.py
https://github.com/rdussurget/py-altimetry/blob/57ce7f2d63c6bbc4993821af0bbe46929e3a2d98/altimetry/data/alti_data.py#L198-L441
def read_sla(self,filename,params=None,force=False,timerange=None,datatype=None,**kwargs): """ Read AVISO Along-Track products :return outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list. :author: Renaud Dussurget """ from time import time from datetime import timedelta # a = time() self.message(2,'Reading AVISO DT data ({0})'.format(datatype)) #Open file self._filename = filename try: self._ncfile = ncfile(self._filename, "r") except Exception,e: self.warning(1, repr(e)) return {} #Get delimiter if os.path.basename(filename).count('.') > os.path.basename(filename).count('_'): delim='.' else : delim = '_' #Gat sat name splitted=os.path.basename(filename).split(delim) if len(splitted) > 3 : if (datatype == 'DT') | (datatype == 'NRT') : sat_name = splitted[2] if splitted[0] == 'nrt' else splitted[3] elif datatype == 'PISTACH' : sat_name = 'J2' else : sat_name = 'J2' else : sat_name="N/A" #Get list of recorded parameters: par_list=[i.encode() for i in self._ncfile.variables.keys()] for i in ['BeginDates','Longitudes','Latitudes'] : par_list.pop(par_list.index(i)) nparam=len(par_list) self.message(2,'Recorded parameters : '+str(nparam)+' -> '+str(par_list)) lon = self.load_ncVar('Longitudes',**kwargs) lon['data'] = recale(lon['data'], degrees=True, zero_2pi=True) #shift longitudes lat = self.load_ncVar('Latitudes',**kwargs) # lon = self.load_ncVar('Longitudes',Longitudes=(self.limit[1],self.limit[3]),**kwargs) # lon['data'] = recale(lon['data'], degrees=True, zero_2pi=True) #shift longitudes # lat = self.load_ncVar('Latitudes',Latitudes=(self.limit[0],self.limit[1]),**kwargs) #Extract within limits ind, flag = in_limits(lon['data'],lat['data'],limit=self.limit) dim_lon = lon['_dimensions'] lat = lat['data'].compress(flag) lon = lon['data'].compress(flag) # dist=cumulative_distance(lat, lon) sz=np.shape(lon) ndims=np.size(sz) #Get dates stDate = self.load_ncVar('BeginDates',**kwargs)['data'] dumVar = self.load_ncVar('Cycles',**kwargs) nbCyc = dumVar['data'] Ncycs = dumVar['_dimensions']['Cycles'] Ntra = dumVar['_dimensions']['Tracks'] nbTra = self.load_ncVar('Tracks',**kwargs)['data'] # if np.size(stDate) == 1 : stDate. DeltaT = self._ncfile.variables['DeltaT'][:] / 86400. #* self._ncfile.variables['DeltaT'].scale_factor npts = self.load_ncVar('NbPoints',**kwargs)['data'] dumind=np.cumsum(npts) #Loop 1 # date=np.ma.array([],mask=[]) # cycles=np.ma.array([],mask=[]) # tracks=np.ma.array([],mask=[]) # for i in xrange(Ncycs) : # np.ma.concatenate((nbTra,nbTra)) # # for i,nc in enumerate(nbCyc.data.flatten()): # N=npts[i] # curInd=np.array(list(set(xrange(dumind[i]-N,dumind[i]) if N > 0 else []).intersection(ind))) # ncur=len(curInd) # date=np.ma.concatenate((date,(curInd - dumind[0])*DeltaT+stDate.flatten()[i])) # cycles=np.ma.concatenate((cycles,np.ma.array((nbCyc.data.flatten()[i],)*ncur)))#,mask=np.repeat(nbCyc.mask[i][j],ncur)))) # tracks=np.ma.concatenate((tracks,np.ma.array((nbTra.data.flatten()[i],)*ncur))) #Loop 2 date = () cycles = () tracks = () # rowind = (0,)*Ntra # nind=0 # for i in xrange(Ncycs): nind+=(npts*(~nbCyc.mask.T[i])).sum() indcopy=ind.copy() # npts_copy=npts.copy() npts[npts.mask]=0 dumind[dumind.mask]=0 nbTra_copy=nbTra.copy() toto=npts.copy() concat_npts = not( nbCyc.shape[-1] > 1) #loop over cycles for i in np.arange(1,Ncycs,1.0,dtype=int) : nbTra=np.ma.concatenate((nbTra,nbTra_copy)) if concat_npts : npts=np.ma.concatenate((npts,tuple((~nbCyc.T[i].mask)*1*npts))) # rowind+=(i,)*Ntra if concat_npts: npts=npts.reshape(nbCyc.shape[::-1]).T else : npts=nbCyc nbTra=nbTra.reshape(nbCyc.shape[::-1]).T # rowind=np.reshape(rowind,nbCyc.shape[::-1]).T # npts.mask=nbCyc.mask nbTra.mask=nbCyc.mask npts=npts.flatten() nbTra=nbTra.flatten() # rowind=rowind.flatten() nbCyc_flatten=nbCyc.flatten() nbTra_flatten=nbTra.flatten() stDate_flatten=stDate.flatten() # nind=0 outInd=[] for i,nc in enumerate(nbCyc.data.flatten()): N=npts[i] Nprev=npts[i-Ncycs] if i >= (Ncycs) and np.remainder(float(i),Ncycs) == 0 else 0 indcopy-=Nprev #if rowind[i] == 0 else 0 curInd=tuple(sorted(set(xrange(N) if N > 0 else []).intersection(indcopy))) ncur=len(curInd) # nind+=ncur outInd+=map(operator.sub, curInd,(( (curInd[0] if len(curInd) > 0 else 0) - (outInd[-1] +1 if len(outInd) > 0 else 0) - len(ind)*(np.remainder(float(i),Ncycs)),)*ncur)) curInd=tuple(map(operator.mul, curInd, (DeltaT,)*ncur)) date+=tuple(map(operator.add, curInd, (stDate_flatten[i],)*ncur)) cycles+=(nbCyc_flatten[i],)*ncur tracks+=(nbTra_flatten[i],)*ncur date=np.ma.masked_array(date,mask=False) cycles=np.ma.masked_array(cycles,mask=False) tracks=np.ma.masked_array(tracks,mask=False) #Loop 3 # date=np.ma.array([],mask=[]) # cycles=np.ma.array([],mask=[]) # tracks=np.ma.array([],mask=[]) # for j in xrange(Ncycs) : # for i,N in enumerate(npts.data) : ## curFg=(ind >= dumind[i]-N) & (ind <= dumind[i]) # curInd=np.array(list(set(xrange(dumind[i]-N,dumind[i]) if N > 0 else []).intersection(ind))) # ncur=len(curInd) # date=np.ma.concatenate((date,(curInd - dumind[0])*DeltaT+stDate[i][j])) # cycles=np.ma.concatenate((cycles,np.ma.array((nbCyc.data[i][j],)*ncur)))#,mask=np.repeat(nbCyc.mask[i][j],ncur)))) # tracks=np.ma.concatenate((tracks,np.ma.array((nbTra.data[i],)*ncur)))#,mask=np.repeat(nbCyc.mask[i][j],ncur)))) outInd=np.array(outInd,dtype=int) #Check output index # print outInd.shape[0],npts.cumsum().max() nt=len(date) date.mask=(False,)*nt cycles.mask=date.mask tracks.mask=date.mask # date=date.reshape((Ncycs,)+(npts.sum(),)).T # mask=date.mask # date=date.compressed() # cycles=cycles.reshape((Ncycs,)+(npts.sum(),)).T.compressed() # tracks=tracks.reshape((Ncycs,)+(npts.sum(),)).T.compressed() # lon=np.repeat(lon,Ncycs) # lat=np.repeat(lat,Ncycs) # mask=~lon.mask dimStr = dim_lon dimStr.pop('Data') nrec=len(date) dimStr.update({'time':nrec}) for i in ['DeltaT','NbPoints','Cycles','Tracks','DataIndexes'] : par_list.pop(par_list.index(i)) outStr={'_dimensions':dimStr, 'lon':lon, 'lat':lat, 'date':date, 'cycle':cycles, 'track':tracks} for param in par_list : a = time() dumVar = self.load_ncVar(param,Data=ind,**kwargs) #Load variables runtime = time() - a # print 'runtime:', timedelta(seconds=runtime) dimStr=dumVar['_dimensions'] dimStr.pop('Cycles') dimStr.pop('Data') dimStr['time']=nrec dimStr['_ndims']=len(dimStr.keys())-1 #update dimensions curDim = [str(dimname) for dimname in dimStr.keys()[1:]] #[str(dimname) for dimname in self._ncfile.variables['LONGITUDE'].dimensions] curDimval = [dimStr[dim] for dim in curDim] #[len(self._ncfile.dimensions[dimname]) for dimname in curDim] flag = [(np.array(dimname) == outStr['_dimensions'].keys()).sum() == 0 for dimname in curDim] #find dimensions to update dimUpdate = np.array(curDim).compress(flag) for enum in enumerate(dimUpdate) : self.message(3, 'Appending dimensions {0}:{1} to dataStructure'.format(enum[1],np.array(curDimval).compress(flag)[enum[0]])) outStr['_dimensions'].update({enum[1]:np.array(curDimval).compress(flag)[enum[0]]}) #Append new dimension outStr['_dimensions']['_ndims']+=1 #update dimension counts # dumStr = {param.lower() : dumVar['data']} dumStr = {param.lower() : dumVar['data'].flatten()[outInd]} # cmd = 'dumStr = {\''+param.lower()+'\':dumVar[\'data\']}' # self.message(4, 'exec : '+cmd) # exec(cmd) outStr.update(dumStr) id=np.repeat(sat_name,outStr['_dimensions']['time']) outStr.update({'id':id}) self._ncfile.close() #Checkit [len(outStr[k]) for k in outStr.keys()] return outStr
[ "def", "read_sla", "(", "self", ",", "filename", ",", "params", "=", "None", ",", "force", "=", "False", ",", "timerange", "=", "None", ",", "datatype", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "time", "import", "time", "from", "dateti...
Read AVISO Along-Track products :return outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list. :author: Renaud Dussurget
[ "Read", "AVISO", "Along", "-", "Track", "products", ":", "return", "outStr", ":", "Output", "data", "structure", "containing", "all", "recorded", "parameters", "as", "specificied", "by", "NetCDF", "file", "PARAMETER", "list", ".", ":", "author", ":", "Renaud",...
python
train
tuxpiper/cloudcast
cloudcast/_utils.py
https://github.com/tuxpiper/cloudcast/blob/06ca62045c483e9c3e7ee960ba70d90ea6a13776/cloudcast/_utils.py#L3-L12
def caller_folder(): """ Returns the folder where the code of the caller's caller lives """ import inspect caller_file = inspect.stack()[2][1] if os.path.exists(caller_file): return os.path.abspath(os.path.dirname(caller_file)) else: return os.path.abspath(os.getcwd())
[ "def", "caller_folder", "(", ")", ":", "import", "inspect", "caller_file", "=", "inspect", ".", "stack", "(", ")", "[", "2", "]", "[", "1", "]", "if", "os", ".", "path", ".", "exists", "(", "caller_file", ")", ":", "return", "os", ".", "path", ".",...
Returns the folder where the code of the caller's caller lives
[ "Returns", "the", "folder", "where", "the", "code", "of", "the", "caller", "s", "caller", "lives" ]
python
train
fhcrc/taxtastic
taxtastic/refpkg.py
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/refpkg.py#L247-L254
def resource_name(self, resource): """ Return the name of the file within the reference package for a particular named resource. """ if not(resource in self.contents['files']): raise ValueError("No such resource %r in refpkg" % (resource,)) return self.contents['files'][resource]
[ "def", "resource_name", "(", "self", ",", "resource", ")", ":", "if", "not", "(", "resource", "in", "self", ".", "contents", "[", "'files'", "]", ")", ":", "raise", "ValueError", "(", "\"No such resource %r in refpkg\"", "%", "(", "resource", ",", ")", ")"...
Return the name of the file within the reference package for a particular named resource.
[ "Return", "the", "name", "of", "the", "file", "within", "the", "reference", "package", "for", "a", "particular", "named", "resource", "." ]
python
train
buildbot/buildbot
master/buildbot/db/pool.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/db/pool.py#L42-L87
def timed_do_fn(f): """Decorate a do function to log before, after, and elapsed time, with the name of the calling function. This is not speedy!""" def wrap(callable, *args, **kwargs): global _debug_id # get a description of the function that called us st = traceback.extract_stack(limit=2) file, line, name, _ = st[0] # and its locals frame = inspect.currentframe() locals = frame.f_locals # invent a unique ID for the description id, _debug_id = _debug_id, _debug_id + 1 descr = "%s-%08x" % (name, id) start_time = time.time() log.msg("%s - before ('%s' line %d)" % (descr, file, line)) for name in locals: if name in ('self', 'thd'): continue log.msg("%s - %s = %r" % (descr, name, locals[name])) # wrap the callable to log the begin and end of the actual thread # function def callable_wrap(*args, **kargs): log.msg("%s - thd start" % (descr,)) try: return callable(*args, **kwargs) finally: log.msg("%s - thd end" % (descr,)) d = f(callable_wrap, *args, **kwargs) @d.addBoth def after(x): end_time = time.time() elapsed = (end_time - start_time) * 1000 log.msg("%s - after (%0.2f ms elapsed)" % (descr, elapsed)) return x return d wrap.__name__ = f.__name__ wrap.__doc__ = f.__doc__ return wrap
[ "def", "timed_do_fn", "(", "f", ")", ":", "def", "wrap", "(", "callable", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "global", "_debug_id", "# get a description of the function that called us", "st", "=", "traceback", ".", "extract_stack", "(", "limi...
Decorate a do function to log before, after, and elapsed time, with the name of the calling function. This is not speedy!
[ "Decorate", "a", "do", "function", "to", "log", "before", "after", "and", "elapsed", "time", "with", "the", "name", "of", "the", "calling", "function", ".", "This", "is", "not", "speedy!" ]
python
train
kyuupichan/aiorpcX
aiorpcx/curio.py
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/curio.py#L143-L149
async def spawn(self, coro, *args): '''Create a new task that’s part of the group. Returns a Task instance. ''' task = await spawn(coro, *args, report_crash=False) self._add_task(task) return task
[ "async", "def", "spawn", "(", "self", ",", "coro", ",", "*", "args", ")", ":", "task", "=", "await", "spawn", "(", "coro", ",", "*", "args", ",", "report_crash", "=", "False", ")", "self", ".", "_add_task", "(", "task", ")", "return", "task" ]
Create a new task that’s part of the group. Returns a Task instance.
[ "Create", "a", "new", "task", "that’s", "part", "of", "the", "group", ".", "Returns", "a", "Task", "instance", "." ]
python
train
inasafe/inasafe
safe/report/expressions/infographic.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/expressions/infographic.py#L228-L232
def age_gender_section_header_element(feature, parent): """Retrieve age gender section header string from definitions.""" _ = feature, parent # NOQA header = age_gender_section_header['string_format'] return header.capitalize()
[ "def", "age_gender_section_header_element", "(", "feature", ",", "parent", ")", ":", "_", "=", "feature", ",", "parent", "# NOQA", "header", "=", "age_gender_section_header", "[", "'string_format'", "]", "return", "header", ".", "capitalize", "(", ")" ]
Retrieve age gender section header string from definitions.
[ "Retrieve", "age", "gender", "section", "header", "string", "from", "definitions", "." ]
python
train
SheffieldML/GPy
GPy/util/linalg.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/linalg.py#L356-L368
def symmetrify(A, upper=False): """ Take the square matrix A and make it symmetrical by copting elements from the lower half to the upper works IN PLACE. note: tries to use cython, falls back to a slower numpy version """ if use_linalg_cython: _symmetrify_cython(A, upper) else: _symmetrify_numpy(A, upper)
[ "def", "symmetrify", "(", "A", ",", "upper", "=", "False", ")", ":", "if", "use_linalg_cython", ":", "_symmetrify_cython", "(", "A", ",", "upper", ")", "else", ":", "_symmetrify_numpy", "(", "A", ",", "upper", ")" ]
Take the square matrix A and make it symmetrical by copting elements from the lower half to the upper works IN PLACE. note: tries to use cython, falls back to a slower numpy version
[ "Take", "the", "square", "matrix", "A", "and", "make", "it", "symmetrical", "by", "copting", "elements", "from", "the", "lower", "half", "to", "the", "upper" ]
python
train
kiwiz/gkeepapi
gkeepapi/__init__.py
https://github.com/kiwiz/gkeepapi/blob/78aaae8b988b1cf616e3973f7f15d4c6d5e996cc/gkeepapi/__init__.py#L344-L357
def get(self, blob): """Get the canonical link to a media blob. Args: blob (gkeepapi.node.Blob): The blob. Returns: str: A link to the media. """ return self._send( url=self._base_url + blob.parent.server_id + '/' + blob.server_id + '?s=0', method='GET', allow_redirects=False ).headers.get('Location')
[ "def", "get", "(", "self", ",", "blob", ")", ":", "return", "self", ".", "_send", "(", "url", "=", "self", ".", "_base_url", "+", "blob", ".", "parent", ".", "server_id", "+", "'/'", "+", "blob", ".", "server_id", "+", "'?s=0'", ",", "method", "=",...
Get the canonical link to a media blob. Args: blob (gkeepapi.node.Blob): The blob. Returns: str: A link to the media.
[ "Get", "the", "canonical", "link", "to", "a", "media", "blob", "." ]
python
train
cdriehuys/django-rest-email-auth
rest_email_auth/serializers.py
https://github.com/cdriehuys/django-rest-email-auth/blob/7e752c4d77ae02d2d046f214f56e743aa12ab23f/rest_email_auth/serializers.py#L89-L115
def validate_email(self, email): """ Validate the provided email address. The email address is first modified to match the RFC spec. Namely, the domain portion of the email is lowercased. Returns: The validated email address. Raises: serializers.ValidationError: If the serializer is bound and the provided email doesn't match the existing address. """ user, domain = email.rsplit("@", 1) email = "@".join([user, domain.lower()]) if self.instance and email and self.instance.email != email: raise serializers.ValidationError( _( "Existing emails may not be edited. Create a new one " "instead." ) ) return email
[ "def", "validate_email", "(", "self", ",", "email", ")", ":", "user", ",", "domain", "=", "email", ".", "rsplit", "(", "\"@\"", ",", "1", ")", "email", "=", "\"@\"", ".", "join", "(", "[", "user", ",", "domain", ".", "lower", "(", ")", "]", ")", ...
Validate the provided email address. The email address is first modified to match the RFC spec. Namely, the domain portion of the email is lowercased. Returns: The validated email address. Raises: serializers.ValidationError: If the serializer is bound and the provided email doesn't match the existing address.
[ "Validate", "the", "provided", "email", "address", "." ]
python
valid
iskandr/serializable
serializable/primitive_types.py
https://github.com/iskandr/serializable/blob/6807dfd582567b3bda609910806b7429d8d53b44/serializable/primitive_types.py#L26-L36
def return_primitive(fn): """ Decorator which wraps a single argument function to ignore any arguments of primitive type (simply returning them unmodified). """ @wraps(fn) def wrapped_fn(x): if isinstance(x, PRIMITIVE_TYPES): return x return fn(x) return wrapped_fn
[ "def", "return_primitive", "(", "fn", ")", ":", "@", "wraps", "(", "fn", ")", "def", "wrapped_fn", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "PRIMITIVE_TYPES", ")", ":", "return", "x", "return", "fn", "(", "x", ")", "return", "wrapped_fn...
Decorator which wraps a single argument function to ignore any arguments of primitive type (simply returning them unmodified).
[ "Decorator", "which", "wraps", "a", "single", "argument", "function", "to", "ignore", "any", "arguments", "of", "primitive", "type", "(", "simply", "returning", "them", "unmodified", ")", "." ]
python
train
angr/angr
angr/analyses/cfg/cfg_emulated.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_emulated.py#L1635-L1821
def _handle_successor(self, job, successor, successors): """ Returns a new CFGJob instance for further analysis, or None if there is no immediate state to perform the analysis on. :param CFGJob job: The current job. """ state = successor all_successor_states = successors addr = job.addr # The PathWrapper instance to return pw = None job.successor_status[state] = "" new_state = state.copy() suc_jumpkind = state.history.jumpkind suc_exit_stmt_idx = state.scratch.exit_stmt_idx suc_exit_ins_addr = state.scratch.exit_ins_addr if suc_jumpkind in {'Ijk_EmWarn', 'Ijk_NoDecode', 'Ijk_MapFail', 'Ijk_NoRedir', 'Ijk_SigTRAP', 'Ijk_SigSEGV', 'Ijk_ClientReq'}: # Ignore SimExits that are of these jumpkinds job.successor_status[state] = "Skipped" return [ ] call_target = job.extra_info['call_target'] if suc_jumpkind == "Ijk_FakeRet" and call_target is not None: # if the call points to a SimProcedure that doesn't return, we don't follow the fakeret anymore if self.project.is_hooked(call_target): sim_proc = self.project._sim_procedures[call_target] if sim_proc.NO_RET: return [ ] # Get target address try: target_addr = state.solver.eval_one(state.ip) except (SimValueError, SimSolverModeError): # It cannot be concretized currently. Maybe we can handle it later, maybe it just cannot be concretized target_addr = None if suc_jumpkind == "Ijk_Ret": target_addr = job.call_stack.current_return_target if target_addr is not None: new_state.ip = new_state.solver.BVV(target_addr, new_state.arch.bits) if target_addr is None: # Unlucky... return [ ] if state.thumb: # Make sure addresses are always odd. It is important to encode this information in the address for the # time being. target_addr |= 1 # see if the target successor is in our whitelist if self._address_whitelist is not None: if target_addr not in self._address_whitelist: l.debug("Successor %#x is not in the address whitelist. Skip.", target_addr) return [ ] # see if this edge is in the base graph if self._base_graph is not None: # TODO: make it more efficient. the current implementation is half-assed and extremely slow for src_, dst_ in self._base_graph.edges(): if src_.addr == addr and dst_.addr == target_addr: break else: # not found l.debug("Edge (%#x -> %#x) is not found in the base graph. Skip.", addr, target_addr) return [ ] # Fix target_addr for syscalls if suc_jumpkind.startswith("Ijk_Sys"): syscall_proc = self.project.simos.syscall(new_state) if syscall_proc is not None: target_addr = syscall_proc.addr self._pre_handle_successor_state(job.extra_info, suc_jumpkind, target_addr) if suc_jumpkind == "Ijk_FakeRet": if target_addr == job.extra_info['last_call_exit_target']: l.debug("... skipping a fake return exit that has the same target with its call exit.") job.successor_status[state] = "Skipped" return [ ] if job.extra_info['skip_fakeret']: l.debug('... skipping a fake return exit since the function it\'s calling doesn\'t return') job.successor_status[state] = "Skipped - non-returning function 0x%x" % job.extra_info['call_target'] return [ ] # TODO: Make it optional if (suc_jumpkind == 'Ijk_Ret' and self._call_depth is not None and len(job.call_stack) <= 1 ): # We cannot continue anymore since this is the end of the function where we started tracing l.debug('... reaching the end of the starting function, skip.') job.successor_status[state] = "Skipped - reaching the end of the starting function" return [ ] # Create the new call stack of target block new_call_stack = self._create_new_call_stack(addr, all_successor_states, job, target_addr, suc_jumpkind) # Create the callstack suffix new_call_stack_suffix = new_call_stack.stack_suffix(self._context_sensitivity_level) # Tuple that will be used to index this exit new_tpl = self._generate_block_id(new_call_stack_suffix, target_addr, suc_jumpkind.startswith('Ijk_Sys')) # We might have changed the mode for this basic block # before. Make sure it is still running in 'fastpath' mode self._reset_state_mode(new_state, 'fastpath') pw = CFGJob(target_addr, new_state, self._context_sensitivity_level, src_block_id=job.block_id, src_exit_stmt_idx=suc_exit_stmt_idx, src_ins_addr=suc_exit_ins_addr, call_stack=new_call_stack, jumpkind=suc_jumpkind, ) # Special case: If the binary has symbols and the target address is a function, but for some reason (e.g., # a tail-call optimization) the CallStack's function address is still the old function address, we will have to # overwrite it here. if not self._is_call_jumpkind(pw.jumpkind): target_symbol = self.project.loader.find_symbol(target_addr) if target_symbol and target_symbol.is_function: # Force update the function address pw.func_addr = target_addr # Generate new exits if suc_jumpkind == "Ijk_Ret": # This is the real return exit job.successor_status[state] = "Appended" elif suc_jumpkind == "Ijk_FakeRet": # This is the default "fake" retn that generated at each # call. Save them first, but don't process them right # away # st = self.project._simos.prepare_call_state(new_state, initial_state=saved_state) st = new_state self._reset_state_mode(st, 'fastpath') pw = None # clear the job pe = PendingJob(job.func_addr, job.extra_info['call_target'], st, job.block_id, suc_exit_stmt_idx, suc_exit_ins_addr, new_call_stack ) self._pending_jobs[new_tpl] = pe self._register_analysis_job(pe.caller_func_addr, pe) job.successor_status[state] = "Pended" elif self._traced_addrs[new_call_stack_suffix][target_addr] >= 1 and suc_jumpkind == "Ijk_Ret": # This is a corner case for the f****** ARM instruction # like # BLEQ <address> # If we have analyzed the boring exit before returning from that called address, we will lose the link # between the last block of the function being called and the basic block it returns to. We cannot # reanalyze the basic block as we are not flow-sensitive, but we can still record the connection and make # for it afterwards. pass else: job.successor_status[state] = "Appended" if job.extra_info['is_call_jump'] and job.extra_info['call_target'] in self._non_returning_functions: job.extra_info['skip_fakeret'] = True if not pw: return [ ] if self._base_graph is not None: # remove all existing jobs that has the same block ID if next((en for en in self.jobs if en.block_id == pw.block_id), None): # TODO: this is very hackish. Reimplement this logic later self._job_info_queue = [entry for entry in self._job_info_queue if entry.job.block_id != pw.block_id] # register the job self._register_analysis_job(pw.func_addr, pw) return [ pw ]
[ "def", "_handle_successor", "(", "self", ",", "job", ",", "successor", ",", "successors", ")", ":", "state", "=", "successor", "all_successor_states", "=", "successors", "addr", "=", "job", ".", "addr", "# The PathWrapper instance to return", "pw", "=", "None", ...
Returns a new CFGJob instance for further analysis, or None if there is no immediate state to perform the analysis on. :param CFGJob job: The current job.
[ "Returns", "a", "new", "CFGJob", "instance", "for", "further", "analysis", "or", "None", "if", "there", "is", "no", "immediate", "state", "to", "perform", "the", "analysis", "on", "." ]
python
train