repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/Utility.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/Utility.py#L251-L260
def GetSOAPEnvUri(self, version): """Return the appropriate SOAP envelope uri for a given human-friendly SOAP version string (e.g. '1.1').""" attrname = 'NS_SOAP_ENV_%s' % join(split(version, '.'), '_') value = getattr(self, attrname, None) if value is not None: return value raise ValueError( 'Unsupported SOAP version: %s' % version )
[ "def", "GetSOAPEnvUri", "(", "self", ",", "version", ")", ":", "attrname", "=", "'NS_SOAP_ENV_%s'", "%", "join", "(", "split", "(", "version", ",", "'.'", ")", ",", "'_'", ")", "value", "=", "getattr", "(", "self", ",", "attrname", ",", "None", ")", ...
Return the appropriate SOAP envelope uri for a given human-friendly SOAP version string (e.g. '1.1').
[ "Return", "the", "appropriate", "SOAP", "envelope", "uri", "for", "a", "given", "human", "-", "friendly", "SOAP", "version", "string", "(", "e", ".", "g", ".", "1", ".", "1", ")", "." ]
python
train
crazy-canux/arguspy
arguspy/http_requests.py
https://github.com/crazy-canux/arguspy/blob/e9486b5df61978a990d56bf43de35f3a4cdefcc3/arguspy/http_requests.py#L61-L67
def close(self): """Close the http/https connect.""" try: self.response.close() self.logger.debug("close connect succeed.") except Exception as e: self.unknown("close connect error: %s" % e)
[ "def", "close", "(", "self", ")", ":", "try", ":", "self", ".", "response", ".", "close", "(", ")", "self", ".", "logger", ".", "debug", "(", "\"close connect succeed.\"", ")", "except", "Exception", "as", "e", ":", "self", ".", "unknown", "(", "\"clos...
Close the http/https connect.
[ "Close", "the", "http", "/", "https", "connect", "." ]
python
valid
logston/py3s3
py3s3/storage.py
https://github.com/logston/py3s3/blob/1910ca60c53a53d839d6f7b09c05b555f3bfccf4/py3s3/storage.py#L261-L274
def _get_content_type(self, file): """ Return content type of file. If file does not have a content type, make a guess. """ if file.mimetype: return file.mimetype # get file extension _, extension = os.path.splitext(file.name) extension = extension.strip('.') # Make an educated guess about what the Content-Type should be. return media_types[extension] if extension in media_types else 'binary/octet-stream'
[ "def", "_get_content_type", "(", "self", ",", "file", ")", ":", "if", "file", ".", "mimetype", ":", "return", "file", ".", "mimetype", "# get file extension", "_", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(", "file", ".", "name", ")", ...
Return content type of file. If file does not have a content type, make a guess.
[ "Return", "content", "type", "of", "file", ".", "If", "file", "does", "not", "have", "a", "content", "type", "make", "a", "guess", "." ]
python
train
biocore/burrito
burrito/util.py
https://github.com/biocore/burrito/blob/3b1dcc560431cc2b7a4856b99aafe36d32082356/burrito/util.py#L399-L421
def _get_base_command(self): """ Returns the full command string input_arg: the argument to the command which represents the input to the program, this will be a string, either representing input or a filename to get input from tI""" command_parts = [] # Append a change directory to the beginning of the command to change # to self.WorkingDir before running the command # WorkingDir should be in quotes -- filenames might contain spaces cd_command = ''.join(['cd ', str(self.WorkingDir), ';']) if self._command is None: raise ApplicationError('_command has not been set.') command = self._command parameters = self.Parameters command_parts.append(cd_command) command_parts.append(command) command_parts.append(self._command_delimiter.join(filter( None, (map(str, parameters.values()))))) return self._command_delimiter.join(command_parts).strip()
[ "def", "_get_base_command", "(", "self", ")", ":", "command_parts", "=", "[", "]", "# Append a change directory to the beginning of the command to change", "# to self.WorkingDir before running the command", "# WorkingDir should be in quotes -- filenames might contain spaces", "cd_command",...
Returns the full command string input_arg: the argument to the command which represents the input to the program, this will be a string, either representing input or a filename to get input from tI
[ "Returns", "the", "full", "command", "string" ]
python
train
cthoyt/ols-client
src/ols_client/client.py
https://github.com/cthoyt/ols-client/blob/8c6bb54888675652d25324184967392d00d128fc/src/ols_client/client.py#L93-L107
def suggest(self, name, ontology=None): """Suggest terms from an optional list of ontologies :param str name: :param list[str] ontology: :rtype: dict .. seealso:: https://www.ebi.ac.uk/ols/docs/api#_suggest_term """ params = {'q': name} if ontology: params['ontology'] = ','.join(ontology) response = requests.get(self.ontology_suggest, params=params) return response.json()
[ "def", "suggest", "(", "self", ",", "name", ",", "ontology", "=", "None", ")", ":", "params", "=", "{", "'q'", ":", "name", "}", "if", "ontology", ":", "params", "[", "'ontology'", "]", "=", "','", ".", "join", "(", "ontology", ")", "response", "="...
Suggest terms from an optional list of ontologies :param str name: :param list[str] ontology: :rtype: dict .. seealso:: https://www.ebi.ac.uk/ols/docs/api#_suggest_term
[ "Suggest", "terms", "from", "an", "optional", "list", "of", "ontologies" ]
python
test
googleapis/oauth2client
oauth2client/_helpers.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/_helpers.py#L182-L202
def parse_unique_urlencoded(content): """Parses unique key-value parameters from urlencoded content. Args: content: string, URL-encoded key-value pairs. Returns: dict, The key-value pairs from ``content``. Raises: ValueError: if one of the keys is repeated. """ urlencoded_params = urllib.parse.parse_qs(content) params = {} for key, value in six.iteritems(urlencoded_params): if len(value) != 1: msg = ('URL-encoded content contains a repeated value:' '%s -> %s' % (key, ', '.join(value))) raise ValueError(msg) params[key] = value[0] return params
[ "def", "parse_unique_urlencoded", "(", "content", ")", ":", "urlencoded_params", "=", "urllib", ".", "parse", ".", "parse_qs", "(", "content", ")", "params", "=", "{", "}", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "urlencoded_params", ...
Parses unique key-value parameters from urlencoded content. Args: content: string, URL-encoded key-value pairs. Returns: dict, The key-value pairs from ``content``. Raises: ValueError: if one of the keys is repeated.
[ "Parses", "unique", "key", "-", "value", "parameters", "from", "urlencoded", "content", "." ]
python
valid
nschloe/matplotlib2tikz
matplotlib2tikz/line2d.py
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/line2d.py#L147-L182
def _mpl_marker2pgfp_marker(data, mpl_marker, marker_face_color): """Translates a marker style of matplotlib to the corresponding style in PGFPlots. """ # try default list try: pgfplots_marker = _MP_MARKER2PGF_MARKER[mpl_marker] except KeyError: pass else: if (marker_face_color is not None) and pgfplots_marker == "o": pgfplots_marker = "*" data["tikz libs"].add("plotmarks") marker_options = None return (data, pgfplots_marker, marker_options) # try plotmarks list try: data["tikz libs"].add("plotmarks") pgfplots_marker, marker_options = _MP_MARKER2PLOTMARKS[mpl_marker] except KeyError: # There's no equivalent for the pixel marker (,) in Pgfplots. pass else: if ( marker_face_color is not None and ( not isinstance(marker_face_color, str) or marker_face_color.lower() != "none" ) and pgfplots_marker not in ["|", "-", "asterisk", "star"] ): pgfplots_marker += "*" return (data, pgfplots_marker, marker_options) return data, None, None
[ "def", "_mpl_marker2pgfp_marker", "(", "data", ",", "mpl_marker", ",", "marker_face_color", ")", ":", "# try default list", "try", ":", "pgfplots_marker", "=", "_MP_MARKER2PGF_MARKER", "[", "mpl_marker", "]", "except", "KeyError", ":", "pass", "else", ":", "if", "...
Translates a marker style of matplotlib to the corresponding style in PGFPlots.
[ "Translates", "a", "marker", "style", "of", "matplotlib", "to", "the", "corresponding", "style", "in", "PGFPlots", "." ]
python
train
madsbk/lrcloud
lrcloud/__main__.py
https://github.com/madsbk/lrcloud/blob/8d99be3e1abdf941642e9a1c86b7d775dc373c0b/lrcloud/__main__.py#L271-L369
def cmd_normal(args): """Normal procedure: * Pull from cloud (if necessary) * Run Lightroom * Push to cloud """ logging.info("cmd_normal") (lcat, ccat) = (args.local_catalog, args.cloud_catalog) (lmeta, cmeta) = ("%s.lrcloud"%lcat, "%s.lrcloud"%ccat) if not isfile(lcat): args.error("The local catalog does not exist: %s"%lcat) if not isfile(ccat): args.error("The cloud catalog does not exist: %s"%ccat) #Let's "lock" the local catalog logging.info("Locking local catalog: %s"%(lcat)) if not lock_file(lcat): raise RuntimeError("The catalog %s is locked!"%lcat) #Backup the local catalog (overwriting old backup) logging.info("Removed old backup: %s.backup"%lcat) util.remove("%s.backup"%lcat) util.copy(lcat, "%s.backup"%lcat) lmfile = MetaFile(lmeta) cmfile = MetaFile(cmeta) #Apply changesets cloudDAG = ChangesetDAG(ccat) path = cloudDAG.path(lmfile['last_push']['hash'], cloudDAG.leafs[0].hash) util.apply_changesets(args, path, lcat) #Let's copy Smart Previews if not args.no_smart_previews: copy_smart_previews(lcat, ccat, local2cloud=False) #Backup the local catalog (overwriting old backup) logging.info("Removed old backup: %s.backup"%lcat) util.remove("%s.backup"%lcat) util.copy(lcat, "%s.backup"%lcat) #Let's unlock the local catalog so that Lightroom can read it logging.info("Unlocking local catalog: %s"%(lcat)) unlock_file(lcat) #Now we can start Lightroom if args.lightroom_exec_debug: logging.info("Debug Lightroom appending '%s' to %s"%(args.lightroom_exec_debug, lcat)) with open(lcat, "a") as f: f.write("%s\n"%args.lightroom_exec_debug) elif args.lightroom_exec: logging.info("Starting Lightroom: %s %s"%(args.lightroom_exec, lcat)) subprocess.call([args.lightroom_exec, lcat]) tmpdir = tempfile.mkdtemp() tmp_patch = join(tmpdir, "tmp.patch") diff_cmd = args.diff_cmd.replace("$in1", "%s.backup"%lcat)\ .replace("$in2", lcat)\ .replace("$out", tmp_patch) logging.info("Diff: %s"%diff_cmd) subprocess.call(diff_cmd, shell=True) patch = "%s_%s.zip"%(ccat, hashsum(tmp_patch)) util.copy(tmp_patch, patch) # Write cloud meta-data mfile = MetaFile("%s.lrcloud"%patch) utcnow = datetime.utcnow().strftime(DATETIME_FORMAT)[:-4] mfile['changeset']['is_base'] = False mfile['changeset']['hash'] = hashsum(tmp_patch) mfile['changeset']['modification_utc'] = utcnow mfile['changeset']['filename'] = basename(patch) mfile['parent']['is_base'] = cloudDAG.leafs[0].mfile['changeset']['is_base'] mfile['parent']['hash'] = cloudDAG.leafs[0].mfile['changeset']['hash'] mfile['parent']['modification_utc'] = cloudDAG.leafs[0].mfile['changeset']['modification_utc'] mfile['parent']['filename'] = basename(cloudDAG.leafs[0].mfile['changeset']['filename']) mfile.flush() # Write local meta-data mfile = MetaFile(lmeta) mfile['catalog']['hash'] = hashsum(lcat) mfile['catalog']['modification_utc'] = utcnow mfile['last_push']['filename'] = patch mfile['last_push']['hash'] = hashsum(tmp_patch) mfile['last_push']['modification_utc'] = utcnow mfile.flush() shutil.rmtree(tmpdir, ignore_errors=True) #Let's copy Smart Previews if not args.no_smart_previews: copy_smart_previews(lcat, ccat, local2cloud=True) #Finally, let's unlock the catalog files logging.info("Unlocking local catalog: %s"%(lcat)) unlock_file(lcat)
[ "def", "cmd_normal", "(", "args", ")", ":", "logging", ".", "info", "(", "\"cmd_normal\"", ")", "(", "lcat", ",", "ccat", ")", "=", "(", "args", ".", "local_catalog", ",", "args", ".", "cloud_catalog", ")", "(", "lmeta", ",", "cmeta", ")", "=", "(", ...
Normal procedure: * Pull from cloud (if necessary) * Run Lightroom * Push to cloud
[ "Normal", "procedure", ":", "*", "Pull", "from", "cloud", "(", "if", "necessary", ")", "*", "Run", "Lightroom", "*", "Push", "to", "cloud" ]
python
valid
ucsb-cs-education/hairball
hairball/plugins/blocks.py
https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/plugins/blocks.py#L67-L71
def finalize(self): """Output the number of instances that contained dead code.""" if self.total_instances > 1: print('{} of {} instances contained dead code.' .format(self.dead_code_instances, self.total_instances))
[ "def", "finalize", "(", "self", ")", ":", "if", "self", ".", "total_instances", ">", "1", ":", "print", "(", "'{} of {} instances contained dead code.'", ".", "format", "(", "self", ".", "dead_code_instances", ",", "self", ".", "total_instances", ")", ")" ]
Output the number of instances that contained dead code.
[ "Output", "the", "number", "of", "instances", "that", "contained", "dead", "code", "." ]
python
train
Becksteinlab/GromacsWrapper
gromacs/core.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/core.py#L301-L322
def transform_args(self, *args, **kwargs): """Transform arguments and return them as a list suitable for Popen.""" options = [] for option,value in kwargs.items(): if not option.startswith('-'): # heuristic for turning key=val pairs into options # (fails for commands such as 'find' -- then just use args) if len(option) == 1: option = '-' + option # POSIX style else: option = '--' + option # GNU option if value is True: options.append(option) continue elif value is False: raise ValueError('A False value is ambiguous for option {0!r}'.format(option)) if option[:2] == '--': options.append(option + '=' + str(value)) # GNU option else: options.extend((option, str(value))) # POSIX style return options + list(args)
[ "def", "transform_args", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "options", "=", "[", "]", "for", "option", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "not", "option", ".", "startswith", "(", "'-'", ...
Transform arguments and return them as a list suitable for Popen.
[ "Transform", "arguments", "and", "return", "them", "as", "a", "list", "suitable", "for", "Popen", "." ]
python
valid
Dentosal/python-sc2
sc2/client.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/client.py#L197-L230
async def query_pathings(self, zipped_list: List[List[Union[Unit, Point2, Point3]]]) -> List[Union[float, int]]: """ Usage: await self.query_pathings([[unit1, target2], [unit2, target2]]) -> returns [distance1, distance2] Caution: returns 0 when path not found Might merge this function with the function above """ assert zipped_list, "No zipped_list" assert isinstance(zipped_list, list), f"{type(zipped_list)}" assert isinstance(zipped_list[0], list), f"{type(zipped_list[0])}" assert len(zipped_list[0]) == 2, f"{len(zipped_list[0])}" assert isinstance(zipped_list[0][0], (Point2, Unit)), f"{type(zipped_list[0][0])}" assert isinstance(zipped_list[0][1], Point2), f"{type(zipped_list[0][1])}" if isinstance(zipped_list[0][0], Point2): results = await self._execute( query=query_pb.RequestQuery( pathing=[ query_pb.RequestQueryPathing( start_pos=common_pb.Point2D(x=p1.x, y=p1.y), end_pos=common_pb.Point2D(x=p2.x, y=p2.y) ) for p1, p2 in zipped_list ] ) ) else: results = await self._execute( query=query_pb.RequestQuery( pathing=[ query_pb.RequestQueryPathing(unit_tag=p1.tag, end_pos=common_pb.Point2D(x=p2.x, y=p2.y)) for p1, p2 in zipped_list ] ) ) results = [float(d.distance) for d in results.query.pathing] return results
[ "async", "def", "query_pathings", "(", "self", ",", "zipped_list", ":", "List", "[", "List", "[", "Union", "[", "Unit", ",", "Point2", ",", "Point3", "]", "]", "]", ")", "->", "List", "[", "Union", "[", "float", ",", "int", "]", "]", ":", "assert",...
Usage: await self.query_pathings([[unit1, target2], [unit2, target2]]) -> returns [distance1, distance2] Caution: returns 0 when path not found Might merge this function with the function above
[ "Usage", ":", "await", "self", ".", "query_pathings", "(", "[[", "unit1", "target2", "]", "[", "unit2", "target2", "]]", ")", "-", ">", "returns", "[", "distance1", "distance2", "]", "Caution", ":", "returns", "0", "when", "path", "not", "found", "Might"...
python
train
kronok/django-google-analytics-reporter
google_analytics_reporter/tracking.py
https://github.com/kronok/django-google-analytics-reporter/blob/cca5fb0920ec68cfe03069cedf53fb4c6440cc11/google_analytics_reporter/tracking.py#L53-L64
def get_payload(self, *args, **kwargs): """Receive all passed in args, kwargs, and combine them together with any required params""" if not kwargs: kwargs = self.default_params else: kwargs.update(self.default_params) for item in args: if isinstance(item, dict): kwargs.update(item) if hasattr(self, 'type_params'): kwargs.update(self.type_params(*args, **kwargs)) return kwargs
[ "def", "get_payload", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "kwargs", ":", "kwargs", "=", "self", ".", "default_params", "else", ":", "kwargs", ".", "update", "(", "self", ".", "default_params", ")", "for", "i...
Receive all passed in args, kwargs, and combine them together with any required params
[ "Receive", "all", "passed", "in", "args", "kwargs", "and", "combine", "them", "together", "with", "any", "required", "params" ]
python
train
mrcagney/gtfstk
gtfstk/shapes.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/shapes.py#L213-L247
def geometrize_shapes( shapes: DataFrame, *, use_utm: bool = False ) -> DataFrame: """ Given a GTFS shapes DataFrame, convert it to a GeoPandas GeoDataFrame and return the result. The result has a ``'geometry'`` column of WGS84 LineStrings instead of the columns ``'shape_pt_sequence'``, ``'shape_pt_lon'``, ``'shape_pt_lat'``, and ``'shape_dist_traveled'``. If ``use_utm``, then use local UTM coordinates for the geometries. Notes ------ Requires GeoPandas. """ import geopandas as gpd f = shapes.copy().sort_values(["shape_id", "shape_pt_sequence"]) def my_agg(group): d = {} d["geometry"] = sg.LineString( group[["shape_pt_lon", "shape_pt_lat"]].values ) return pd.Series(d) g = f.groupby("shape_id").apply(my_agg).reset_index() g = gpd.GeoDataFrame(g, crs=cs.WGS84) if use_utm: lat, lon = f.loc[0, ["shape_pt_lat", "shape_pt_lon"]].values crs = hp.get_utm_crs(lat, lon) g = g.to_crs(crs) return g
[ "def", "geometrize_shapes", "(", "shapes", ":", "DataFrame", ",", "*", ",", "use_utm", ":", "bool", "=", "False", ")", "->", "DataFrame", ":", "import", "geopandas", "as", "gpd", "f", "=", "shapes", ".", "copy", "(", ")", ".", "sort_values", "(", "[", ...
Given a GTFS shapes DataFrame, convert it to a GeoPandas GeoDataFrame and return the result. The result has a ``'geometry'`` column of WGS84 LineStrings instead of the columns ``'shape_pt_sequence'``, ``'shape_pt_lon'``, ``'shape_pt_lat'``, and ``'shape_dist_traveled'``. If ``use_utm``, then use local UTM coordinates for the geometries. Notes ------ Requires GeoPandas.
[ "Given", "a", "GTFS", "shapes", "DataFrame", "convert", "it", "to", "a", "GeoPandas", "GeoDataFrame", "and", "return", "the", "result", ".", "The", "result", "has", "a", "geometry", "column", "of", "WGS84", "LineStrings", "instead", "of", "the", "columns", "...
python
train
Jarn/jarn.viewdoc
jarn/viewdoc/viewdoc.py
https://github.com/Jarn/jarn.viewdoc/blob/59ae82fd1658889c41096c1d8c08dcb1047dc349/jarn/viewdoc/viewdoc.py#L463-L507
def parse_options(self, args, depth=0): """Parse command line options. """ style_names = tuple(self.defaults.known_styles) style_opts = tuple('--'+x for x in style_names) try: options, remaining_args = getopt.gnu_getopt(args, 'b:c:hls:v', ('help', 'style=', 'version', 'list-styles', 'browser=', 'config-file=') + style_names) except getopt.GetoptError as e: err_exit('viewdoc: %s\n%s' % (e.msg, USAGE)) for name, value in options: if name in ('-s', '--style'): self.styles = self.defaults.known_styles.get(value, '') elif name in style_opts: self.styles = self.defaults.known_styles.get(name[2:], '') elif name in ('-b', '--browser'): self.browser = value elif name in ('-l', '--list-styles'): self.list = True elif name in ('-h', '--help'): msg_exit(HELP) elif name in ('-v', '--version'): msg_exit(VERSION) elif name in ('-c', '--config-file') and depth == 0: self.reset_defaults(expanduser(value)) return self.parse_options(args, depth+1) if len(remaining_args) > 1: err_exit('viewdoc: too many arguments\n%s' % USAGE) if not isfile(self.defaults.filename) and depth == 0: self.write_defaults() return self.parse_options(args, depth+1) if self.defaults.version < CONFIG_VERSION and depth == 0: self.upgrade_defaults() return self.parse_options(args, depth+1) if self.list: self.list_styles() return remaining_args
[ "def", "parse_options", "(", "self", ",", "args", ",", "depth", "=", "0", ")", ":", "style_names", "=", "tuple", "(", "self", ".", "defaults", ".", "known_styles", ")", "style_opts", "=", "tuple", "(", "'--'", "+", "x", "for", "x", "in", "style_names",...
Parse command line options.
[ "Parse", "command", "line", "options", "." ]
python
train
knagra/farnsworth
base/redirects.py
https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/base/redirects.py#L23-L32
def red_home(request, message=None): ''' Convenience function for redirecting users who don't have access to a page to the home page. Parameters: request - the request in the calling function message - a message from the caller function ''' if message: messages.add_message(request, messages.ERROR, message) return HttpResponseRedirect(reverse('homepage'))
[ "def", "red_home", "(", "request", ",", "message", "=", "None", ")", ":", "if", "message", ":", "messages", ".", "add_message", "(", "request", ",", "messages", ".", "ERROR", ",", "message", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'hom...
Convenience function for redirecting users who don't have access to a page to the home page. Parameters: request - the request in the calling function message - a message from the caller function
[ "Convenience", "function", "for", "redirecting", "users", "who", "don", "t", "have", "access", "to", "a", "page", "to", "the", "home", "page", ".", "Parameters", ":", "request", "-", "the", "request", "in", "the", "calling", "function", "message", "-", "a"...
python
train
saltstack/salt
salt/utils/master.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/master.py#L625-L635
def secure(self): ''' secure the sockets for root-only access ''' log.debug('ConCache securing sockets') if os.path.exists(self.cache_sock): os.chmod(self.cache_sock, 0o600) if os.path.exists(self.update_sock): os.chmod(self.update_sock, 0o600) if os.path.exists(self.upd_t_sock): os.chmod(self.upd_t_sock, 0o600)
[ "def", "secure", "(", "self", ")", ":", "log", ".", "debug", "(", "'ConCache securing sockets'", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "cache_sock", ")", ":", "os", ".", "chmod", "(", "self", ".", "cache_sock", ",", "0o600", ...
secure the sockets for root-only access
[ "secure", "the", "sockets", "for", "root", "-", "only", "access" ]
python
train
googledatalab/pydatalab
google/datalab/storage/_bucket.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_bucket.py#L215-L233
def contains(self, name): """Checks if the specified bucket exists. Args: name: the name of the bucket to lookup. Returns: True if the bucket exists; False otherwise. Raises: Exception if there was an error requesting information about the bucket. """ try: self._api.buckets_get(name) except google.datalab.utils.RequestException as e: if e.status == 404: return False raise e except Exception as e: raise e return True
[ "def", "contains", "(", "self", ",", "name", ")", ":", "try", ":", "self", ".", "_api", ".", "buckets_get", "(", "name", ")", "except", "google", ".", "datalab", ".", "utils", ".", "RequestException", "as", "e", ":", "if", "e", ".", "status", "==", ...
Checks if the specified bucket exists. Args: name: the name of the bucket to lookup. Returns: True if the bucket exists; False otherwise. Raises: Exception if there was an error requesting information about the bucket.
[ "Checks", "if", "the", "specified", "bucket", "exists", "." ]
python
train
onicagroup/runway
runway/util.py
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/util.py#L165-L179
def fix_windows_command_list(commands): # type: (List[str]) -> List[str] """Return command list with working Windows commands. npm on windows is npm.cmd, which will blow up subprocess.check_call(['npm', '...']) Similar issues arise when calling python apps like pipenv that will have a windows-only suffix applied to them """ fully_qualified_cmd_path = which(commands[0]) if fully_qualified_cmd_path and ( not which(commands[0], add_win_suffixes=False)): commands[0] = os.path.basename(fully_qualified_cmd_path) return commands
[ "def", "fix_windows_command_list", "(", "commands", ")", ":", "# type: (List[str]) -> List[str]", "fully_qualified_cmd_path", "=", "which", "(", "commands", "[", "0", "]", ")", "if", "fully_qualified_cmd_path", "and", "(", "not", "which", "(", "commands", "[", "0", ...
Return command list with working Windows commands. npm on windows is npm.cmd, which will blow up subprocess.check_call(['npm', '...']) Similar issues arise when calling python apps like pipenv that will have a windows-only suffix applied to them
[ "Return", "command", "list", "with", "working", "Windows", "commands", "." ]
python
train
gitpython-developers/GitPython
git/refs/log.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/refs/log.py#L239-L252
def to_file(self, filepath): """Write the contents of the reflog instance to a file at the given filepath. :param filepath: path to file, parent directories are assumed to exist""" lfd = LockedFD(filepath) assure_directory_exists(filepath, is_file=True) fp = lfd.open(write=True, stream=True) try: self._serialize(fp) lfd.commit() except Exception: # on failure it rolls back automatically, but we make it clear lfd.rollback() raise
[ "def", "to_file", "(", "self", ",", "filepath", ")", ":", "lfd", "=", "LockedFD", "(", "filepath", ")", "assure_directory_exists", "(", "filepath", ",", "is_file", "=", "True", ")", "fp", "=", "lfd", ".", "open", "(", "write", "=", "True", ",", "stream...
Write the contents of the reflog instance to a file at the given filepath. :param filepath: path to file, parent directories are assumed to exist
[ "Write", "the", "contents", "of", "the", "reflog", "instance", "to", "a", "file", "at", "the", "given", "filepath", ".", ":", "param", "filepath", ":", "path", "to", "file", "parent", "directories", "are", "assumed", "to", "exist" ]
python
train
gwastro/pycbc
pycbc/io/record.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/record.py#L1822-L1825
def spin_sx(self): """Returns the x-component of the spin of the secondary mass.""" return conversions.secondary_spin(self.mass1, self.mass2, self.spin1x, self.spin2x)
[ "def", "spin_sx", "(", "self", ")", ":", "return", "conversions", ".", "secondary_spin", "(", "self", ".", "mass1", ",", "self", ".", "mass2", ",", "self", ".", "spin1x", ",", "self", ".", "spin2x", ")" ]
Returns the x-component of the spin of the secondary mass.
[ "Returns", "the", "x", "-", "component", "of", "the", "spin", "of", "the", "secondary", "mass", "." ]
python
train
twoolie/NBT
nbt/region.py
https://github.com/twoolie/NBT/blob/b06dd6cc8117d2788da1d8416e642d58bad45762/nbt/region.py#L618-L707
def write_blockdata(self, x, z, data, compression=COMPRESSION_ZLIB): """ Compress the data, write it to file, and add pointers in the header so it can be found as chunk(x,z). """ if compression == COMPRESSION_GZIP: # Python 3.1 and earlier do not yet support `data = gzip.compress(data)`. compressed_file = BytesIO() f = gzip.GzipFile(fileobj=compressed_file) f.write(data) f.close() compressed_file.seek(0) data = compressed_file.read() del compressed_file elif compression == COMPRESSION_ZLIB: data = zlib.compress(data) # use zlib compression, rather than Gzip elif compression != COMPRESSION_NONE: raise ValueError("Unknown compression type %d" % compression) length = len(data) # 5 extra bytes are required for the chunk block header nsectors = self._bytes_to_sector(length + 5) if nsectors >= 256: raise ChunkDataError("Chunk is too large (%d sectors exceeds 255 maximum)" % (nsectors)) # Ensure file has a header if self.size < 2*SECTOR_LENGTH: self._init_file() # search for a place where to write the chunk: current = self.metadata[x, z] free_sectors = self._locate_free_sectors(ignore_chunk=current) sector = self._find_free_location(free_sectors, nsectors, preferred=current.blockstart) # If file is smaller than sector*SECTOR_LENGTH (it was truncated), pad it with zeroes. if self.size < sector*SECTOR_LENGTH: # jump to end of file self.file.seek(0, SEEK_END) self.file.write((sector*SECTOR_LENGTH - self.size) * b"\x00") assert self.file.tell() == sector*SECTOR_LENGTH # write out chunk to region self.file.seek(sector*SECTOR_LENGTH) self.file.write(pack(">I", length + 1)) #length field self.file.write(pack(">B", compression)) #compression field self.file.write(data) #compressed data # Write zeros up to the end of the chunk remaining_length = SECTOR_LENGTH * nsectors - length - 5 self.file.write(remaining_length * b"\x00") #seek to header record and write offset and length records self.file.seek(4 * (x + 32*z)) self.file.write(pack(">IB", sector, nsectors)[1:]) #write timestamp self.file.seek(SECTOR_LENGTH + 4 * (x + 32*z)) timestamp = int(time.time()) self.file.write(pack(">I", timestamp)) # Update free_sectors with newly written block # This is required for calculating file truncation and zeroing freed blocks. free_sectors.extend((sector + nsectors - len(free_sectors)) * [True]) for s in range(sector, sector + nsectors): free_sectors[s] = False # Check if file should be truncated: truncate_count = list(reversed(free_sectors)).index(False) if truncate_count > 0: self.size = SECTOR_LENGTH * (len(free_sectors) - truncate_count) self.file.truncate(self.size) free_sectors = free_sectors[:-truncate_count] # Calculate freed sectors for s in range(current.blockstart, min(current.blockstart + current.blocklength, len(free_sectors))): if free_sectors[s]: # zero sector s self.file.seek(SECTOR_LENGTH*s) self.file.write(SECTOR_LENGTH*b'\x00') # update file size and header information self.size = max((sector + nsectors)*SECTOR_LENGTH, self.size) assert self.get_size() == self.size current.blockstart = sector current.blocklength = nsectors current.status = STATUS_CHUNK_OK current.timestamp = timestamp current.length = length + 1 current.compression = COMPRESSION_ZLIB
[ "def", "write_blockdata", "(", "self", ",", "x", ",", "z", ",", "data", ",", "compression", "=", "COMPRESSION_ZLIB", ")", ":", "if", "compression", "==", "COMPRESSION_GZIP", ":", "# Python 3.1 and earlier do not yet support `data = gzip.compress(data)`.", "compressed_file...
Compress the data, write it to file, and add pointers in the header so it can be found as chunk(x,z).
[ "Compress", "the", "data", "write", "it", "to", "file", "and", "add", "pointers", "in", "the", "header", "so", "it", "can", "be", "found", "as", "chunk", "(", "x", "z", ")", "." ]
python
train
rfarley3/Kibana
kibana/mapping.py
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L181-L192
def get_index_mappings(self, index): """Converts all index's doc_types to .kibana""" fields_arr = [] for (key, val) in iteritems(index): # self.pr_dbg("\tdoc_type: %s" % key) doc_mapping = self.get_doc_type_mappings(index[key]) # self.pr_dbg("\tdoc_mapping: %s" % doc_mapping) if doc_mapping is None: return None # keep adding to the fields array fields_arr.extend(doc_mapping) return fields_arr
[ "def", "get_index_mappings", "(", "self", ",", "index", ")", ":", "fields_arr", "=", "[", "]", "for", "(", "key", ",", "val", ")", "in", "iteritems", "(", "index", ")", ":", "# self.pr_dbg(\"\\tdoc_type: %s\" % key)", "doc_mapping", "=", "self", ".", "get_do...
Converts all index's doc_types to .kibana
[ "Converts", "all", "index", "s", "doc_types", "to", ".", "kibana" ]
python
train
gccxml/pygccxml
pygccxml/declarations/pattern_parser.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/pattern_parser.py#L203-L210
def normalize(self, decl_string, arg_separator=None): """implementation details""" if not self.has_pattern(decl_string): return decl_string name, args = self.split(decl_string) for i, arg in enumerate(args): args[i] = self.normalize(arg) return self.join(name, args, arg_separator)
[ "def", "normalize", "(", "self", ",", "decl_string", ",", "arg_separator", "=", "None", ")", ":", "if", "not", "self", ".", "has_pattern", "(", "decl_string", ")", ":", "return", "decl_string", "name", ",", "args", "=", "self", ".", "split", "(", "decl_s...
implementation details
[ "implementation", "details" ]
python
train
Shizmob/pydle
pydle/client.py
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/client.py#L365-L373
async def handle_forever(self): """ Handle data forever. """ while self.connected: data = await self.connection.recv() if not data: if self.connected: await self.disconnect(expected=False) break await self.on_data(data)
[ "async", "def", "handle_forever", "(", "self", ")", ":", "while", "self", ".", "connected", ":", "data", "=", "await", "self", ".", "connection", ".", "recv", "(", ")", "if", "not", "data", ":", "if", "self", ".", "connected", ":", "await", "self", "...
Handle data forever.
[ "Handle", "data", "forever", "." ]
python
train
wkentaro/pytorch-fcn
torchfcn/ext/fcn.berkeleyvision.org/siftflow_layers.py
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/siftflow_layers.py#L107-L122
def load_label(self, idx, label_type=None): """ Load label image as 1 x height x width integer array of label indices. The leading singleton dimension is required by the loss. """ if label_type == 'semantic': label = scipy.io.loadmat('{}/SemanticLabels/spatial_envelope_256x256_static_8outdoorcategories/{}.mat'.format(self.siftflow_dir, idx))['S'] elif label_type == 'geometric': label = scipy.io.loadmat('{}/GeoLabels/spatial_envelope_256x256_static_8outdoorcategories/{}.mat'.format(self.siftflow_dir, idx))['S'] label[label == -1] = 0 else: raise Exception("Unknown label type: {}. Pick semantic or geometric.".format(label_type)) label = label.astype(np.uint8) label -= 1 # rotate labels so classes start at 0, void is 255 label = label[np.newaxis, ...] return label.copy()
[ "def", "load_label", "(", "self", ",", "idx", ",", "label_type", "=", "None", ")", ":", "if", "label_type", "==", "'semantic'", ":", "label", "=", "scipy", ".", "io", ".", "loadmat", "(", "'{}/SemanticLabels/spatial_envelope_256x256_static_8outdoorcategories/{}.mat'...
Load label image as 1 x height x width integer array of label indices. The leading singleton dimension is required by the loss.
[ "Load", "label", "image", "as", "1", "x", "height", "x", "width", "integer", "array", "of", "label", "indices", ".", "The", "leading", "singleton", "dimension", "is", "required", "by", "the", "loss", "." ]
python
train
JarryShaw/PyPCAPKit
src/utilities/validations.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/utilities/validations.py#L66-L73
def number_check(*args, func=None): """Check if arguments are numbers.""" func = func or inspect.stack()[2][3] for var in args: if not isinstance(var, numbers.Number): name = type(var).__name__ raise DigitError( f'Function {func} expected number, {name} got instead.')
[ "def", "number_check", "(", "*", "args", ",", "func", "=", "None", ")", ":", "func", "=", "func", "or", "inspect", ".", "stack", "(", ")", "[", "2", "]", "[", "3", "]", "for", "var", "in", "args", ":", "if", "not", "isinstance", "(", "var", ","...
Check if arguments are numbers.
[ "Check", "if", "arguments", "are", "numbers", "." ]
python
train
fossasia/knittingpattern
knittingpattern/Instruction.py
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/Instruction.py#L260-L270
def transfer_to_row(self, new_row): """Transfer this instruction to a new row. :param knittingpattern.Row.Row new_row: the new row the instruction is in. """ if new_row != self._row: index = self.get_index_in_row() if index is not None: self._row.instructions.pop(index) self._row = new_row
[ "def", "transfer_to_row", "(", "self", ",", "new_row", ")", ":", "if", "new_row", "!=", "self", ".", "_row", ":", "index", "=", "self", ".", "get_index_in_row", "(", ")", "if", "index", "is", "not", "None", ":", "self", ".", "_row", ".", "instructions"...
Transfer this instruction to a new row. :param knittingpattern.Row.Row new_row: the new row the instruction is in.
[ "Transfer", "this", "instruction", "to", "a", "new", "row", "." ]
python
valid
roclark/sportsreference
sportsreference/nba/teams.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nba/teams.py#L114-L177
def dataframe(self): """ Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string abbreviation of the team, such as 'DET'. """ fields_to_include = { 'abbreviation': self.abbreviation, 'assists': self.assists, 'blocks': self.blocks, 'defensive_rebounds': self.defensive_rebounds, 'field_goal_attempts': self.field_goal_attempts, 'field_goal_percentage': self.field_goal_percentage, 'field_goals': self.field_goals, 'free_throw_attempts': self.free_throw_attempts, 'free_throw_percentage': self.free_throw_percentage, 'free_throws': self.free_throws, 'games_played': self.games_played, 'minutes_played': self.minutes_played, 'name': self.name, 'offensive_rebounds': self.offensive_rebounds, 'opp_assists': self.opp_assists, 'opp_blocks': self.opp_blocks, 'opp_defensive_rebounds': self.opp_defensive_rebounds, 'opp_field_goal_attempts': self.opp_field_goal_attempts, 'opp_field_goal_percentage': self.opp_field_goal_percentage, 'opp_field_goals': self.opp_field_goals, 'opp_free_throw_attempts': self.opp_free_throw_attempts, 'opp_free_throw_percentage': self.opp_free_throw_percentage, 'opp_free_throws': self.opp_free_throws, 'opp_offensive_rebounds': self.opp_offensive_rebounds, 'opp_personal_fouls': self.opp_personal_fouls, 'opp_points': self.opp_points, 'opp_steals': self.opp_steals, 'opp_three_point_field_goal_attempts': self.opp_three_point_field_goal_attempts, 'opp_three_point_field_goal_percentage': self.opp_three_point_field_goal_percentage, 'opp_three_point_field_goals': self.opp_three_point_field_goals, 'opp_total_rebounds': self.opp_total_rebounds, 'opp_turnovers': self.opp_turnovers, 'opp_two_point_field_goal_attempts': self.opp_two_point_field_goal_attempts, 'opp_two_point_field_goal_percentage': self.opp_two_point_field_goal_percentage, 'opp_two_point_field_goals': self.opp_two_point_field_goals, 'personal_fouls': self.personal_fouls, 'points': self.points, 'rank': self.rank, 'steals': self.steals, 'three_point_field_goal_attempts': self.three_point_field_goal_attempts, 'three_point_field_goal_percentage': self.three_point_field_goal_percentage, 'three_point_field_goals': self.three_point_field_goals, 'total_rebounds': self.total_rebounds, 'turnovers': self.turnovers, 'two_point_field_goal_attempts': self.two_point_field_goal_attempts, 'two_point_field_goal_percentage': self.two_point_field_goal_percentage, 'two_point_field_goals': self.two_point_field_goals } return pd.DataFrame([fields_to_include], index=[self._abbreviation])
[ "def", "dataframe", "(", "self", ")", ":", "fields_to_include", "=", "{", "'abbreviation'", ":", "self", ".", "abbreviation", ",", "'assists'", ":", "self", ".", "assists", ",", "'blocks'", ":", "self", ".", "blocks", ",", "'defensive_rebounds'", ":", "self"...
Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string abbreviation of the team, such as 'DET'.
[ "Returns", "a", "pandas", "DataFrame", "containing", "all", "other", "class", "properties", "and", "values", ".", "The", "index", "for", "the", "DataFrame", "is", "the", "string", "abbreviation", "of", "the", "team", "such", "as", "DET", "." ]
python
train
pjuren/pyokit
src/pyokit/io/genomeAlignment.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/genomeAlignment.py#L297-L306
def _build_index(maf_strm, ref_spec): """Build an index for a MAF genome alig file and return StringIO of it.""" idx_strm = StringIO.StringIO() bound_iter = functools.partial(genome_alignment_iterator, reference_species=ref_spec) hash_func = JustInTimeGenomeAlignmentBlock.build_hash idx = IndexedFile(maf_strm, bound_iter, hash_func) idx.write_index(idx_strm) idx_strm.seek(0) # seek to the start return idx_strm
[ "def", "_build_index", "(", "maf_strm", ",", "ref_spec", ")", ":", "idx_strm", "=", "StringIO", ".", "StringIO", "(", ")", "bound_iter", "=", "functools", ".", "partial", "(", "genome_alignment_iterator", ",", "reference_species", "=", "ref_spec", ")", "hash_fun...
Build an index for a MAF genome alig file and return StringIO of it.
[ "Build", "an", "index", "for", "a", "MAF", "genome", "alig", "file", "and", "return", "StringIO", "of", "it", "." ]
python
train
geelweb/geelweb-django-contactform
src/geelweb/django/contactform/views.py
https://github.com/geelweb/geelweb-django-contactform/blob/9c5934e0877f61c3ddeca48569836703e1d6344a/src/geelweb/django/contactform/views.py#L12-L29
def contact(request): """Displays the contact form and sends the email""" form = ContactForm(request.POST or None) if form.is_valid(): subject = form.cleaned_data['subject'] message = form.cleaned_data['message'] sender = form.cleaned_data['sender'] cc_myself = form.cleaned_data['cc_myself'] recipients = settings.CONTACTFORM_RECIPIENTS if cc_myself: recipients.append(sender) send_mail(getattr(settings, "CONTACTFORM_SUBJECT_PREFIX", '') + subject, message, sender, recipients) return render(request, 'contactform/thanks.html') return render( request, 'contactform/contact.html', {'form': form})
[ "def", "contact", "(", "request", ")", ":", "form", "=", "ContactForm", "(", "request", ".", "POST", "or", "None", ")", "if", "form", ".", "is_valid", "(", ")", ":", "subject", "=", "form", ".", "cleaned_data", "[", "'subject'", "]", "message", "=", ...
Displays the contact form and sends the email
[ "Displays", "the", "contact", "form", "and", "sends", "the", "email" ]
python
valid
mrahnis/drapery
drapery/cli/drape.py
https://github.com/mrahnis/drapery/blob/c0c0906fb5ff846cf591cb9fe8a9eaee68e8820c/drapery/cli/drape.py#L21-L67
def cli(source_f, raster_f, output, verbose): """ Converts 2D geometries to 3D using GEOS sample through fiona. \b Example: drape point.shp elevation.tif -o point_z.shp """ with fiona.open(source_f, 'r') as source: source_driver = source.driver source_crs = source.crs sink_schema = source.schema.copy() source_geom = source.schema['geometry'] if source_geom == 'Point': sink_schema['geometry'] = '3D Point' elif source_geom == 'LineString': sink_schema['geometry'] = '3D LineString' elif source_geom == '3D Point' or source_geom == '3D LineString': pass else: click.BadParameter("Source geometry type {} not implemented".format(source_geom)) with rasterio.open(raster_f) as raster: if source_crs != raster.crs: click.BadParameter("Features and raster have different CRS.") if raster.count > 1: warnings.warn("Found {0} bands in {1}, expected a single band raster".format(raster.bands, raster_f)) supported = ['int16', 'int32', 'float32', 'float64'] if raster.dtypes[0] not in supported: warnings.warn("Found {0} type in {1}, expected one of {2}".format(raster.dtypes[0]), raster_f, supported) with fiona.open( output, 'w', driver=source_driver, crs=source_crs, schema=sink_schema) as sink: for feature in source: try: feature_z = drapery.drape(raster, feature) sink.write({ 'geometry': mapping(feature_z), 'properties': feature['properties'], }) except Exception: logging.exception("Error processing feature %s:", feature['id'])
[ "def", "cli", "(", "source_f", ",", "raster_f", ",", "output", ",", "verbose", ")", ":", "with", "fiona", ".", "open", "(", "source_f", ",", "'r'", ")", "as", "source", ":", "source_driver", "=", "source", ".", "driver", "source_crs", "=", "source", "....
Converts 2D geometries to 3D using GEOS sample through fiona. \b Example: drape point.shp elevation.tif -o point_z.shp
[ "Converts", "2D", "geometries", "to", "3D", "using", "GEOS", "sample", "through", "fiona", "." ]
python
train
zomux/deepy
deepy/networks/network.py
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/networks/network.py#L166-L179
def setup_variables(self): """ Set up variables. """ if self.input_tensor: if type(self.input_tensor) == int: x = dim_to_var(self.input_tensor, name="x") else: x = self.input_tensor else: x = T.matrix('x') self.input_variables.append(x) self._output = x self._test_output = x
[ "def", "setup_variables", "(", "self", ")", ":", "if", "self", ".", "input_tensor", ":", "if", "type", "(", "self", ".", "input_tensor", ")", "==", "int", ":", "x", "=", "dim_to_var", "(", "self", ".", "input_tensor", ",", "name", "=", "\"x\"", ")", ...
Set up variables.
[ "Set", "up", "variables", "." ]
python
test
Fizzadar/pydocs
pydocs/__init__.py
https://github.com/Fizzadar/pydocs/blob/72713201dc4cf40335f9c3d380c9111b23c2c38b/pydocs/__init__.py#L16-L26
def _parse_module_list(module_list): '''Loop through all the modules and parse them.''' for module_meta in module_list: name = module_meta['module'] # Import & parse module module = import_module(name) output = parse_module(module) # Assign to meta.content module_meta['content'] = output
[ "def", "_parse_module_list", "(", "module_list", ")", ":", "for", "module_meta", "in", "module_list", ":", "name", "=", "module_meta", "[", "'module'", "]", "# Import & parse module", "module", "=", "import_module", "(", "name", ")", "output", "=", "parse_module",...
Loop through all the modules and parse them.
[ "Loop", "through", "all", "the", "modules", "and", "parse", "them", "." ]
python
train
openstack/quark
quark/drivers/ironic_driver.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/ironic_driver.py#L290-L367
def create_port(self, context, network_id, port_id, **kwargs): """Create a port. :param context: neutron api request context. :param network_id: neutron network id. :param port_id: neutron port id. :param kwargs: required keys - device_id: neutron port device_id (instance_id) instance_node_id: nova hypervisor host id mac_address: neutron port mac address base_net_driver: the base network driver optional keys - addresses: list of allocated IPAddress models security_groups: list of associated security groups :raises IronicException: If the client is unable to create the downstream port for any reason, the exception will be logged and IronicException raised. """ LOG.info("create_port %s %s %s" % (context.tenant_id, network_id, port_id)) # sanity check if not kwargs.get('base_net_driver'): raise IronicException(msg='base_net_driver required.') base_net_driver = kwargs['base_net_driver'] if not kwargs.get('device_id'): raise IronicException(msg='device_id required.') device_id = kwargs['device_id'] if not kwargs.get('instance_node_id'): raise IronicException(msg='instance_node_id required.') instance_node_id = kwargs['instance_node_id'] if not kwargs.get('mac_address'): raise IronicException(msg='mac_address is required.') mac_address = str(netaddr.EUI(kwargs["mac_address"]["address"])) mac_address = mac_address.replace('-', ':') # TODO(morgabra): Change this when we enable security groups. if kwargs.get('security_groups'): msg = 'ironic driver does not support security group operations.' raise IronicException(msg=msg) # unroll the given address models into a fixed_ips list we can # pass downstream fixed_ips = [] addresses = kwargs.get('addresses') if not isinstance(addresses, list): addresses = [addresses] for address in addresses: fixed_ips.append(self._make_fixed_ip_dict(context, address)) body = { "id": port_id, "network_id": network_id, "device_id": device_id, "device_owner": kwargs.get('device_owner', ''), "tenant_id": context.tenant_id or "quark", "roles": context.roles, "mac_address": mac_address, "fixed_ips": fixed_ips, "switch:hardware_id": instance_node_id, "dynamic_network": not STRATEGY.is_provider_network(network_id) } net_info = self._get_base_network_info( context, network_id, base_net_driver) body.update(net_info) try: LOG.info("creating downstream port: %s" % (body)) port = self._create_port(context, body) LOG.info("created downstream port: %s" % (port)) return {"uuid": port['port']['id'], "vlan_id": port['port']['vlan_id']} except Exception as e: msg = "failed to create downstream port. Exception: %s" % (e) raise IronicException(msg=msg)
[ "def", "create_port", "(", "self", ",", "context", ",", "network_id", ",", "port_id", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "\"create_port %s %s %s\"", "%", "(", "context", ".", "tenant_id", ",", "network_id", ",", "port_id", ")", "...
Create a port. :param context: neutron api request context. :param network_id: neutron network id. :param port_id: neutron port id. :param kwargs: required keys - device_id: neutron port device_id (instance_id) instance_node_id: nova hypervisor host id mac_address: neutron port mac address base_net_driver: the base network driver optional keys - addresses: list of allocated IPAddress models security_groups: list of associated security groups :raises IronicException: If the client is unable to create the downstream port for any reason, the exception will be logged and IronicException raised.
[ "Create", "a", "port", "." ]
python
valid
secdev/scapy
scapy/automaton.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/automaton.py#L129-L134
def _timeout_thread(self, remain): """Timeout before releasing every thing, if nothing was returned""" time.sleep(remain) if not self._ended: self._ended = True self._release_all()
[ "def", "_timeout_thread", "(", "self", ",", "remain", ")", ":", "time", ".", "sleep", "(", "remain", ")", "if", "not", "self", ".", "_ended", ":", "self", ".", "_ended", "=", "True", "self", ".", "_release_all", "(", ")" ]
Timeout before releasing every thing, if nothing was returned
[ "Timeout", "before", "releasing", "every", "thing", "if", "nothing", "was", "returned" ]
python
train
PyCQA/astroid
astroid/brain/brain_builtin_inference.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/brain/brain_builtin_inference.py#L121-L147
def register_builtin_transform(transform, builtin_name): """Register a new transform function for the given *builtin_name*. The transform function must accept two parameters, a node and an optional context. """ def _transform_wrapper(node, context=None): result = transform(node, context=context) if result: if not result.parent: # Let the transformation function determine # the parent for its result. Otherwise, # we set it to be the node we transformed from. result.parent = node if result.lineno is None: result.lineno = node.lineno if result.col_offset is None: result.col_offset = node.col_offset return iter([result]) MANAGER.register_transform( nodes.Call, inference_tip(_transform_wrapper), partial(_builtin_filter_predicate, builtin_name=builtin_name), )
[ "def", "register_builtin_transform", "(", "transform", ",", "builtin_name", ")", ":", "def", "_transform_wrapper", "(", "node", ",", "context", "=", "None", ")", ":", "result", "=", "transform", "(", "node", ",", "context", "=", "context", ")", "if", "result...
Register a new transform function for the given *builtin_name*. The transform function must accept two parameters, a node and an optional context.
[ "Register", "a", "new", "transform", "function", "for", "the", "given", "*", "builtin_name", "*", "." ]
python
train
Phyks/libbmc
libbmc/doi.py
https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/doi.py#L188-L209
def get_bibtex(doi): """ Get a BibTeX entry for a given DOI. .. note:: Adapted from https://gist.github.com/jrsmith3/5513926. :param doi: The canonical DOI to get BibTeX from. :returns: A BibTeX string or ``None``. >>> get_bibtex('10.1209/0295-5075/111/40005') '@article{Verney_2015,\\n\\tdoi = {10.1209/0295-5075/111/40005},\\n\\turl = {http://dx.doi.org/10.1209/0295-5075/111/40005},\\n\\tyear = 2015,\\n\\tmonth = {aug},\\n\\tpublisher = {{IOP} Publishing},\\n\\tvolume = {111},\\n\\tnumber = {4},\\n\\tpages = {40005},\\n\\tauthor = {Lucas Verney and Lev Pitaevskii and Sandro Stringari},\\n\\ttitle = {Hybridization of first and second sound in a weakly interacting Bose gas},\\n\\tjournal = {{EPL}}\\n}' """ try: request = requests.get(to_url(doi), headers={"accept": "application/x-bibtex"}) request.raise_for_status() assert request.headers.get("content-type") == "application/x-bibtex" return request.text except (RequestException, AssertionError): return None
[ "def", "get_bibtex", "(", "doi", ")", ":", "try", ":", "request", "=", "requests", ".", "get", "(", "to_url", "(", "doi", ")", ",", "headers", "=", "{", "\"accept\"", ":", "\"application/x-bibtex\"", "}", ")", "request", ".", "raise_for_status", "(", ")"...
Get a BibTeX entry for a given DOI. .. note:: Adapted from https://gist.github.com/jrsmith3/5513926. :param doi: The canonical DOI to get BibTeX from. :returns: A BibTeX string or ``None``. >>> get_bibtex('10.1209/0295-5075/111/40005') '@article{Verney_2015,\\n\\tdoi = {10.1209/0295-5075/111/40005},\\n\\turl = {http://dx.doi.org/10.1209/0295-5075/111/40005},\\n\\tyear = 2015,\\n\\tmonth = {aug},\\n\\tpublisher = {{IOP} Publishing},\\n\\tvolume = {111},\\n\\tnumber = {4},\\n\\tpages = {40005},\\n\\tauthor = {Lucas Verney and Lev Pitaevskii and Sandro Stringari},\\n\\ttitle = {Hybridization of first and second sound in a weakly interacting Bose gas},\\n\\tjournal = {{EPL}}\\n}'
[ "Get", "a", "BibTeX", "entry", "for", "a", "given", "DOI", "." ]
python
train
decryptus/sonicprobe
sonicprobe/libs/pworkerpool.py
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/pworkerpool.py#L170-L178
def set_max_workers(self, nb): """ Set the maximum workers to create. """ self.count_lock.acquire() self.shared['max_workers'] = nb if self.shared['workers'] > self.shared['max_workers']: self.kill(self.shared['workers'] - self.shared['max_workers']) self.count_lock.release()
[ "def", "set_max_workers", "(", "self", ",", "nb", ")", ":", "self", ".", "count_lock", ".", "acquire", "(", ")", "self", ".", "shared", "[", "'max_workers'", "]", "=", "nb", "if", "self", ".", "shared", "[", "'workers'", "]", ">", "self", ".", "share...
Set the maximum workers to create.
[ "Set", "the", "maximum", "workers", "to", "create", "." ]
python
train
geomet/geomet
geomet/wkt.py
https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/wkt.py#L57-L100
def dumps(obj, decimals=16): """ Dump a GeoJSON-like `dict` to a WKT string. """ try: geom_type = obj['type'] exporter = _dumps_registry.get(geom_type) if exporter is None: _unsupported_geom_type(geom_type) # Check for empty cases if geom_type == 'GeometryCollection': if len(obj['geometries']) == 0: return 'GEOMETRYCOLLECTION EMPTY' else: # Geom has no coordinate values at all, and must be empty. if len(list(util.flatten_multi_dim(obj['coordinates']))) == 0: return '%s EMPTY' % geom_type.upper() except KeyError: raise geomet.InvalidGeoJSONException('Invalid GeoJSON: %s' % obj) result = exporter(obj, decimals) # Try to get the SRID from `meta.srid` meta_srid = obj.get('meta', {}).get('srid') # Also try to get it from `crs.properties.name`: crs_srid = obj.get('crs', {}).get('properties', {}).get('name') if crs_srid is not None: # Shave off the EPSG prefix to give us the SRID: crs_srid = crs_srid.replace('EPSG', '') if (meta_srid is not None and crs_srid is not None and str(meta_srid) != str(crs_srid)): raise ValueError( 'Ambiguous CRS/SRID values: %s and %s' % (meta_srid, crs_srid) ) srid = meta_srid or crs_srid # TODO: add tests for CRS input if srid is not None: # Prepend the SRID result = 'SRID=%s;%s' % (srid, result) return result
[ "def", "dumps", "(", "obj", ",", "decimals", "=", "16", ")", ":", "try", ":", "geom_type", "=", "obj", "[", "'type'", "]", "exporter", "=", "_dumps_registry", ".", "get", "(", "geom_type", ")", "if", "exporter", "is", "None", ":", "_unsupported_geom_type...
Dump a GeoJSON-like `dict` to a WKT string.
[ "Dump", "a", "GeoJSON", "-", "like", "dict", "to", "a", "WKT", "string", "." ]
python
train
quantopian/pyfolio
pyfolio/perf_attrib.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/perf_attrib.py#L419-L468
def plot_factor_contribution_to_perf( perf_attrib_data, ax=None, title='Cumulative common returns attribution', ): """ Plot each factor's contribution to performance. Parameters ---------- perf_attrib_data : pd.DataFrame df with factors, common returns, and specific returns as columns, and datetimes as index - Example: momentum reversal common_returns specific_returns dt 2017-01-01 0.249087 0.935925 1.185012 1.185012 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980 ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used title : str, optional title of plot Returns ------- ax : matplotlib.axes.Axes """ if ax is None: ax = plt.gca() factors_to_plot = perf_attrib_data.drop( ['total_returns', 'common_returns'], axis='columns', errors='ignore' ) factors_cumulative = pd.DataFrame() for factor in factors_to_plot: factors_cumulative[factor] = ep.cum_returns(factors_to_plot[factor]) for col in factors_cumulative: ax.plot(factors_cumulative[col]) ax.axhline(0, color='k') configure_legend(ax, change_colors=True) ax.set_ylabel('Cumulative returns by factor') ax.set_title(title) return ax
[ "def", "plot_factor_contribution_to_perf", "(", "perf_attrib_data", ",", "ax", "=", "None", ",", "title", "=", "'Cumulative common returns attribution'", ",", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "factors_to_plot", "...
Plot each factor's contribution to performance. Parameters ---------- perf_attrib_data : pd.DataFrame df with factors, common returns, and specific returns as columns, and datetimes as index - Example: momentum reversal common_returns specific_returns dt 2017-01-01 0.249087 0.935925 1.185012 1.185012 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980 ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used title : str, optional title of plot Returns ------- ax : matplotlib.axes.Axes
[ "Plot", "each", "factor", "s", "contribution", "to", "performance", "." ]
python
valid
Stewori/pytypes
pytypes/typechecker.py
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/typechecker.py#L1167-L1172
def check_argument_types(cllable = None, call_args = None, clss = None, caller_level = 0): """Can be called from within a function or method to apply typechecking to the arguments that were passed in by the caller. Checking is applied w.r.t. type hints of the function or method hosting the call to check_argument_types. """ return _check_caller_type(False, cllable, call_args, clss, caller_level+1)
[ "def", "check_argument_types", "(", "cllable", "=", "None", ",", "call_args", "=", "None", ",", "clss", "=", "None", ",", "caller_level", "=", "0", ")", ":", "return", "_check_caller_type", "(", "False", ",", "cllable", ",", "call_args", ",", "clss", ",", ...
Can be called from within a function or method to apply typechecking to the arguments that were passed in by the caller. Checking is applied w.r.t. type hints of the function or method hosting the call to check_argument_types.
[ "Can", "be", "called", "from", "within", "a", "function", "or", "method", "to", "apply", "typechecking", "to", "the", "arguments", "that", "were", "passed", "in", "by", "the", "caller", ".", "Checking", "is", "applied", "w", ".", "r", ".", "t", ".", "t...
python
train
gabrielelanaro/chemview
chemview/viewer.py
https://github.com/gabrielelanaro/chemview/blob/2c9768dd23db99e59e27adff2a953bb8ee795fa3/chemview/viewer.py#L312-L335
def cartoon(self, cmap=None): '''Display a protein secondary structure as a pymol-like cartoon representation. :param cmap: is a dictionary that maps the secondary type (H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white) ''' # Parse secondary structure top = self.topology geom = gg.GeomProteinCartoon(gg.Aes(xyz=self.coordinates, types=top['atom_names'], secondary_type=top['secondary_structure']), cmap=cmap) primitives = geom.produce(gg.Aes()) ids = [self.add_representation(r['rep_type'], r['options']) for r in primitives] def update(self=self, geom=geom, ids=ids): primitives = geom.produce(gg.Aes(xyz=self.coordinates)) [self.update_representation(id_, rep_options) for id_, rep_options in zip(ids, primitives)] self.update_callbacks.append(update) self.autozoom(self.coordinates)
[ "def", "cartoon", "(", "self", ",", "cmap", "=", "None", ")", ":", "# Parse secondary structure", "top", "=", "self", ".", "topology", "geom", "=", "gg", ".", "GeomProteinCartoon", "(", "gg", ".", "Aes", "(", "xyz", "=", "self", ".", "coordinates", ",", ...
Display a protein secondary structure as a pymol-like cartoon representation. :param cmap: is a dictionary that maps the secondary type (H=helix, E=sheet, C=coil) to a hexadecimal color (0xffffff for white)
[ "Display", "a", "protein", "secondary", "structure", "as", "a", "pymol", "-", "like", "cartoon", "representation", "." ]
python
train
mitsei/dlkit
dlkit/json_/grading/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/managers.py#L1718-L1735
def get_gradebook_hierarchy_design_session(self, proxy): """Gets the session designing gradebook hierarchies. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.grading.GradebookHierarchyDesignSession) - a ``GradebookHierarchyDesignSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_gradebook_hierarchy_design() is false`` *compliance: optional -- This method must be implemented if ``supports_gradebook_hierarchy_design()`` is true.* """ if not self.supports_gradebook_hierarchy_design(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.GradebookHierarchyDesignSession(proxy=proxy, runtime=self._runtime)
[ "def", "get_gradebook_hierarchy_design_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_gradebook_hierarchy_design", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions"...
Gets the session designing gradebook hierarchies. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.grading.GradebookHierarchyDesignSession) - a ``GradebookHierarchyDesignSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_gradebook_hierarchy_design() is false`` *compliance: optional -- This method must be implemented if ``supports_gradebook_hierarchy_design()`` is true.*
[ "Gets", "the", "session", "designing", "gradebook", "hierarchies", "." ]
python
train
rigetti/grove
grove/measurements/estimation.py
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/measurements/estimation.py#L37-L55
def get_rotation_program(pauli_term: PauliTerm) -> Program: """ Generate a rotation program so that the pauli term is diagonal. :param pauli_term: The Pauli term used to generate diagonalizing one-qubit rotations. :return: The rotation program. """ meas_basis_change = Program() for index, gate in pauli_term: if gate == 'X': meas_basis_change.inst(RY(-np.pi / 2, index)) elif gate == 'Y': meas_basis_change.inst(RX(np.pi / 2, index)) elif gate == 'Z': pass else: raise ValueError() return meas_basis_change
[ "def", "get_rotation_program", "(", "pauli_term", ":", "PauliTerm", ")", "->", "Program", ":", "meas_basis_change", "=", "Program", "(", ")", "for", "index", ",", "gate", "in", "pauli_term", ":", "if", "gate", "==", "'X'", ":", "meas_basis_change", ".", "ins...
Generate a rotation program so that the pauli term is diagonal. :param pauli_term: The Pauli term used to generate diagonalizing one-qubit rotations. :return: The rotation program.
[ "Generate", "a", "rotation", "program", "so", "that", "the", "pauli", "term", "is", "diagonal", "." ]
python
train
ioos/compliance-checker
compliance_checker/acdd.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/acdd.py#L566-L582
def verify_convention_version(self, ds): """ Verify that the version in the Conventions field is correct """ try: for convention in getattr(ds, "Conventions", '').replace(' ', '').split(','): if convention == 'ACDD-' + self._cc_spec_version: return ratable_result((2, 2), None, []) # name=None so grouped with Globals # if no/wrong ACDD convention, return appropriate result # Result will have name "Global Attributes" to group with globals m = ["Conventions does not contain 'ACDD-{}'".format(self._cc_spec_version)] return ratable_result((1, 2), "Global Attributes", m) except AttributeError: # NetCDF attribute not found m = ["No Conventions attribute present; must contain ACDD-{}".format(self._cc_spec_version)] # Result will have name "Global Attributes" to group with globals return ratable_result((0, 2), "Global Attributes", m)
[ "def", "verify_convention_version", "(", "self", ",", "ds", ")", ":", "try", ":", "for", "convention", "in", "getattr", "(", "ds", ",", "\"Conventions\"", ",", "''", ")", ".", "replace", "(", "' '", ",", "''", ")", ".", "split", "(", "','", ")", ":",...
Verify that the version in the Conventions field is correct
[ "Verify", "that", "the", "version", "in", "the", "Conventions", "field", "is", "correct" ]
python
train
mitsei/dlkit
dlkit/json_/commenting/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/commenting/sessions.py#L2654-L2681
def get_book_nodes(self, book_id, ancestor_levels, descendant_levels, include_siblings): """Gets a portion of the hierarchy for the given book. arg: book_id (osid.id.Id): the ``Id`` to query arg: ancestor_levels (cardinal): the maximum number of ancestor levels to include. A value of 0 returns no parents in the node. arg: descendant_levels (cardinal): the maximum number of descendant levels to include. A value of 0 returns no children in the node. arg: include_siblings (boolean): ``true`` to include the siblings of the given node, ``false`` to omit the siblings return: (osid.commenting.BookNode) - a book node raise: NotFound - ``book_id`` is not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_bin_nodes return objects.BookNode(self.get_book_node_ids( book_id=book_id, ancestor_levels=ancestor_levels, descendant_levels=descendant_levels, include_siblings=include_siblings)._my_map, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_book_nodes", "(", "self", ",", "book_id", ",", "ancestor_levels", ",", "descendant_levels", ",", "include_siblings", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.get_bin_nodes", "return", "objects", ".", "BookNode", "(", "sel...
Gets a portion of the hierarchy for the given book. arg: book_id (osid.id.Id): the ``Id`` to query arg: ancestor_levels (cardinal): the maximum number of ancestor levels to include. A value of 0 returns no parents in the node. arg: descendant_levels (cardinal): the maximum number of descendant levels to include. A value of 0 returns no children in the node. arg: include_siblings (boolean): ``true`` to include the siblings of the given node, ``false`` to omit the siblings return: (osid.commenting.BookNode) - a book node raise: NotFound - ``book_id`` is not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "a", "portion", "of", "the", "hierarchy", "for", "the", "given", "book", "." ]
python
train
joshspeagle/dynesty
dynesty/nestedsamplers.py
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/nestedsamplers.py#L346-L362
def update(self, pointvol): """Update the bounding ellipsoid using the current set of live points.""" # Check if we should use the provided pool for updating. if self.use_pool_update: pool = self.pool else: pool = None # Update the ellipsoid. self.ell.update(self.live_u, pointvol=pointvol, rstate=self.rstate, bootstrap=self.bootstrap, pool=pool) if self.enlarge != 1.: self.ell.scale_to_vol(self.ell.vol * self.enlarge) return copy.deepcopy(self.ell)
[ "def", "update", "(", "self", ",", "pointvol", ")", ":", "# Check if we should use the provided pool for updating.", "if", "self", ".", "use_pool_update", ":", "pool", "=", "self", ".", "pool", "else", ":", "pool", "=", "None", "# Update the ellipsoid.", "self", "...
Update the bounding ellipsoid using the current set of live points.
[ "Update", "the", "bounding", "ellipsoid", "using", "the", "current", "set", "of", "live", "points", "." ]
python
train
Cadene/pretrained-models.pytorch
pretrainedmodels/models/polynet.py
https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/polynet.py#L461-L480
def polynet(num_classes=1000, pretrained='imagenet'): """PolyNet architecture from the paper 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks' https://arxiv.org/abs/1611.05725 """ if pretrained: settings = pretrained_settings['polynet'][pretrained] assert num_classes == settings['num_classes'], \ 'num_classes should be {}, but is {}'.format( settings['num_classes'], num_classes) model = PolyNet(num_classes=num_classes) model.load_state_dict(model_zoo.load_url(settings['url'])) model.input_space = settings['input_space'] model.input_size = settings['input_size'] model.input_range = settings['input_range'] model.mean = settings['mean'] model.std = settings['std'] else: model = PolyNet(num_classes=num_classes) return model
[ "def", "polynet", "(", "num_classes", "=", "1000", ",", "pretrained", "=", "'imagenet'", ")", ":", "if", "pretrained", ":", "settings", "=", "pretrained_settings", "[", "'polynet'", "]", "[", "pretrained", "]", "assert", "num_classes", "==", "settings", "[", ...
PolyNet architecture from the paper 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks' https://arxiv.org/abs/1611.05725
[ "PolyNet", "architecture", "from", "the", "paper", "PolyNet", ":", "A", "Pursuit", "of", "Structural", "Diversity", "in", "Very", "Deep", "Networks", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1611", ".", "05725" ]
python
train
inasafe/inasafe
safe/common/parameters/default_value_parameter_widget.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/common/parameters/default_value_parameter_widget.py#L77-L97
def get_parameter(self): """Obtain list parameter object from the current widget state. :returns: A DefaultValueParameter from the current state of widget :rtype: DefaultValueParameter """ radio_button_checked_id = self.input_button_group.checkedId() # No radio button checked, then default value = None if radio_button_checked_id == -1: self._parameter.value = None # The last radio button (custom) is checked, get the value from the # line edit elif radio_button_checked_id == len(self._parameter.options) - 1: self._parameter.options[radio_button_checked_id] = \ self.custom_value.value() self._parameter.value = self.custom_value.value() else: self._parameter.value = self._parameter.options[ radio_button_checked_id] return self._parameter
[ "def", "get_parameter", "(", "self", ")", ":", "radio_button_checked_id", "=", "self", ".", "input_button_group", ".", "checkedId", "(", ")", "# No radio button checked, then default value = None", "if", "radio_button_checked_id", "==", "-", "1", ":", "self", ".", "_p...
Obtain list parameter object from the current widget state. :returns: A DefaultValueParameter from the current state of widget :rtype: DefaultValueParameter
[ "Obtain", "list", "parameter", "object", "from", "the", "current", "widget", "state", "." ]
python
train
tomislater/RandomWords
random_words/random_words.py
https://github.com/tomislater/RandomWords/blob/601aa48732d3c389f4c17ba0ed98ffe0e4821d78/random_words/random_words.py#L48-L55
def load_nicknames(self, file): """ Load dict from file for random nicknames. :param str file: filename """ with open(os.path.join(main_dir, file + '.dat'), 'r') as f: self.nicknames = json.load(f)
[ "def", "load_nicknames", "(", "self", ",", "file", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "main_dir", ",", "file", "+", "'.dat'", ")", ",", "'r'", ")", "as", "f", ":", "self", ".", "nicknames", "=", "json", ".", "load...
Load dict from file for random nicknames. :param str file: filename
[ "Load", "dict", "from", "file", "for", "random", "nicknames", "." ]
python
train
tensorflow/lucid
lucid/optvis/objectives.py
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L165-L170
def channel(layer, n_channel, batch=None): """Visualize a single channel""" if batch is None: return lambda T: tf.reduce_mean(T(layer)[..., n_channel]) else: return lambda T: tf.reduce_mean(T(layer)[batch, ..., n_channel])
[ "def", "channel", "(", "layer", ",", "n_channel", ",", "batch", "=", "None", ")", ":", "if", "batch", "is", "None", ":", "return", "lambda", "T", ":", "tf", ".", "reduce_mean", "(", "T", "(", "layer", ")", "[", "...", ",", "n_channel", "]", ")", ...
Visualize a single channel
[ "Visualize", "a", "single", "channel" ]
python
train
kentwait/nxsim
nxsim/simulation.py
https://github.com/kentwait/nxsim/blob/88090d8099e574bc6fd1d24734cfa205ecce4c1d/nxsim/simulation.py#L57-L85
def run_trial(self, trial_id=0): """Run a single trial of the simulation Parameters ---------- trial_id : int """ # Set-up trial environment and graph self.env = NetworkEnvironment(self.G.copy(), initial_time=0, **self.environment_params) # self.G = self.initial_topology.copy() # self.trial_params = deepcopy(self.global_params) # Set up agents on nodes print('Setting up agents...') self.setup_network_agents() # Set up environmental agent if self.environment_agent_type: env_agent = self.environment_agent_type(environment=self.env) # Set up logging logging_interval = self.logging_interval logger = BaseLoggingAgent(environment=self.env, dir_path=self.dir_path, logging_interval=logging_interval) # Run trial self.env.run(until=self.until) # Save output as pickled objects logger.save_trial_state_history(trial_id=trial_id)
[ "def", "run_trial", "(", "self", ",", "trial_id", "=", "0", ")", ":", "# Set-up trial environment and graph", "self", ".", "env", "=", "NetworkEnvironment", "(", "self", ".", "G", ".", "copy", "(", ")", ",", "initial_time", "=", "0", ",", "*", "*", "self...
Run a single trial of the simulation Parameters ---------- trial_id : int
[ "Run", "a", "single", "trial", "of", "the", "simulation" ]
python
train
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewprofiletoolbar.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewprofiletoolbar.py#L329-L339
def profiles(self): """ Returns a list of profiles for this toolbar. :return <projexui.widgets.xviewwidget.XViewProfile> """ output = [] for act in self.actions(): if ( isinstance(act, XViewProfileAction) ): output.append(act.profile()) return output
[ "def", "profiles", "(", "self", ")", ":", "output", "=", "[", "]", "for", "act", "in", "self", ".", "actions", "(", ")", ":", "if", "(", "isinstance", "(", "act", ",", "XViewProfileAction", ")", ")", ":", "output", ".", "append", "(", "act", ".", ...
Returns a list of profiles for this toolbar. :return <projexui.widgets.xviewwidget.XViewProfile>
[ "Returns", "a", "list", "of", "profiles", "for", "this", "toolbar", ".", ":", "return", "<projexui", ".", "widgets", ".", "xviewwidget", ".", "XViewProfile", ">" ]
python
train
jazzband/django-model-utils
model_utils/tracker.py
https://github.com/jazzband/django-model-utils/blob/d557c4253312774a7c2f14bcd02675e9ac2ea05f/model_utils/tracker.py#L270-L277
def has_changed(self, field): """Returns ``True`` if field has changed from currently saved value""" if not self.instance.pk: return True elif field in self.saved_data: return self.previous(field) != self.get_field_value(field) else: raise FieldError('field "%s" not tracked' % field)
[ "def", "has_changed", "(", "self", ",", "field", ")", ":", "if", "not", "self", ".", "instance", ".", "pk", ":", "return", "True", "elif", "field", "in", "self", ".", "saved_data", ":", "return", "self", ".", "previous", "(", "field", ")", "!=", "sel...
Returns ``True`` if field has changed from currently saved value
[ "Returns", "True", "if", "field", "has", "changed", "from", "currently", "saved", "value" ]
python
train
mlperf/training
image_classification/tensorflow/official/resnet/resnet_run_loop.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/image_classification/tensorflow/official/resnet/resnet_run_loop.py#L130-L218
def learning_rate_with_decay( batch_size, batch_denom, num_images, boundary_epochs, decay_rates, base_lr=0.1, enable_lars=False): """Get a learning rate that decays step-wise as training progresses. Args: batch_size: the number of examples processed in each training batch. batch_denom: this value will be used to scale the base learning rate. `0.1 * batch size` is divided by this number, such that when batch_denom == batch_size, the initial learning rate will be 0.1. num_images: total number of images that will be used for training. boundary_epochs: list of ints representing the epochs at which we decay the learning rate. decay_rates: list of floats representing the decay rates to be used for scaling the learning rate. It should have one more element than `boundary_epochs`, and all elements should have the same type. base_lr: Initial learning rate scaled based on batch_denom. Returns: Returns a function that takes a single argument - the number of batches trained so far (global_step)- and returns the learning rate to be used for training the next batch. """ initial_learning_rate = base_lr * batch_size / batch_denom batches_per_epoch = num_images / batch_size # Multiply the learning rate by 0.1 at 100, 150, and 200 epochs. boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs] vals = [initial_learning_rate * decay for decay in decay_rates] def learning_rate_fn(global_step): lr = tf.train.piecewise_constant(global_step, boundaries, vals) warmup_steps = int(batches_per_epoch * 5) warmup_lr = ( initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast( warmup_steps, tf.float32)) return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr) def poly_rate_fn(global_step): """Handles linear scaling rule, gradual warmup, and LR decay. The learning rate starts at 0, then it increases linearly per step. After flags.poly_warmup_epochs, we reach the base learning rate (scaled to account for batch size). The learning rate is then decayed using a polynomial rate decay schedule with power 2.0. Args: global_step: the current global_step Returns: returns the current learning rate """ # Learning rate schedule for LARS polynomial schedule if batch_size < 8192: plr = 5.0 w_epochs = 5 elif batch_size < 16384: plr = 10.0 w_epochs = 5 elif batch_size < 32768: plr = 25.0 w_epochs = 5 else: plr = 32.0 w_epochs = 14 w_steps = int(w_epochs * batches_per_epoch) wrate = (plr * tf.cast(global_step, tf.float32) / tf.cast( w_steps, tf.float32)) # TODO(pkanwar): use a flag to help calc num_epochs. num_epochs = 90 train_steps = batches_per_epoch * num_epochs min_step = tf.constant(1, dtype=tf.int64) decay_steps = tf.maximum(min_step, tf.subtract(global_step, w_steps)) poly_rate = tf.train.polynomial_decay( plr, decay_steps, train_steps - w_steps + 1, power=2.0) return tf.where(global_step <= w_steps, wrate, poly_rate) # For LARS we have a new learning rate schedule if enable_lars: return poly_rate_fn return learning_rate_fn
[ "def", "learning_rate_with_decay", "(", "batch_size", ",", "batch_denom", ",", "num_images", ",", "boundary_epochs", ",", "decay_rates", ",", "base_lr", "=", "0.1", ",", "enable_lars", "=", "False", ")", ":", "initial_learning_rate", "=", "base_lr", "*", "batch_si...
Get a learning rate that decays step-wise as training progresses. Args: batch_size: the number of examples processed in each training batch. batch_denom: this value will be used to scale the base learning rate. `0.1 * batch size` is divided by this number, such that when batch_denom == batch_size, the initial learning rate will be 0.1. num_images: total number of images that will be used for training. boundary_epochs: list of ints representing the epochs at which we decay the learning rate. decay_rates: list of floats representing the decay rates to be used for scaling the learning rate. It should have one more element than `boundary_epochs`, and all elements should have the same type. base_lr: Initial learning rate scaled based on batch_denom. Returns: Returns a function that takes a single argument - the number of batches trained so far (global_step)- and returns the learning rate to be used for training the next batch.
[ "Get", "a", "learning", "rate", "that", "decays", "step", "-", "wise", "as", "training", "progresses", "." ]
python
train
matthiask/django-cte-forest
cte_forest/query.py
https://github.com/matthiask/django-cte-forest/blob/7bff29d69eddfcf214e9cf61647c91d28655619c/cte_forest/query.py#L227-L246
def get_compiler(self, using=None, connection=None): """ Overrides the Query method get_compiler in order to return an instance of the above custom compiler. """ # Copy the body of this method from Django except the final # return statement. We will ignore code coverage for this. if using is None and connection is None: # pragma: no cover raise ValueError("Need either using or connection") if using: connection = connections[using] # Check that the compiler will be able to execute the query for alias, aggregate in self.annotation_select.items(): connection.ops.check_expression_support(aggregate) # Instantiate the custom compiler. return { CTEUpdateQuery: CTEUpdateQueryCompiler, CTEInsertQuery: CTEInsertQueryCompiler, CTEDeleteQuery: CTEDeleteQueryCompiler, CTEAggregateQuery: CTEAggregateQueryCompiler, }.get(self.__class__, CTEQueryCompiler)(self, connection, using)
[ "def", "get_compiler", "(", "self", ",", "using", "=", "None", ",", "connection", "=", "None", ")", ":", "# Copy the body of this method from Django except the final", "# return statement. We will ignore code coverage for this.", "if", "using", "is", "None", "and", "connect...
Overrides the Query method get_compiler in order to return an instance of the above custom compiler.
[ "Overrides", "the", "Query", "method", "get_compiler", "in", "order", "to", "return", "an", "instance", "of", "the", "above", "custom", "compiler", "." ]
python
train
pyGrowler/Growler
growler/http/response.py
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L377-L396
def stringify(self, use_bytes=False): """ Returns representation of headers as a valid HTTP header string. This is called by __str__. Args: use_bytes (bool): Returns a bytes object instead of a str. """ def _str_value(value): if isinstance(value, (list, tuple)): value = (self.EOL + '\t').join(map(_str_value, value)) elif callable(value): value = _str_value(value()) return value s = self.EOL.join(("{key}: {value}".format(key=key, value=_str_value(value)) for key, value in self._header_data.values() if value is not None)) return s + (self.EOL * 2)
[ "def", "stringify", "(", "self", ",", "use_bytes", "=", "False", ")", ":", "def", "_str_value", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "value", "=", "(", "self", ".", "EOL", "+", "...
Returns representation of headers as a valid HTTP header string. This is called by __str__. Args: use_bytes (bool): Returns a bytes object instead of a str.
[ "Returns", "representation", "of", "headers", "as", "a", "valid", "HTTP", "header", "string", ".", "This", "is", "called", "by", "__str__", "." ]
python
train
wglass/lighthouse
lighthouse/log/cli.py
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/log/cli.py#L39-L57
def create_thread_color_cycle(): """ Generates a never-ending cycle of colors to choose from for individual threads. If color is not available, a cycle that repeats None every time is returned instead. """ if not color_available: return itertools.cycle([None]) return itertools.cycle( ( colorama.Fore.CYAN, colorama.Fore.BLUE, colorama.Fore.MAGENTA, colorama.Fore.GREEN, ) )
[ "def", "create_thread_color_cycle", "(", ")", ":", "if", "not", "color_available", ":", "return", "itertools", ".", "cycle", "(", "[", "None", "]", ")", "return", "itertools", ".", "cycle", "(", "(", "colorama", ".", "Fore", ".", "CYAN", ",", "colorama", ...
Generates a never-ending cycle of colors to choose from for individual threads. If color is not available, a cycle that repeats None every time is returned instead.
[ "Generates", "a", "never", "-", "ending", "cycle", "of", "colors", "to", "choose", "from", "for", "individual", "threads", "." ]
python
train
requests/requests-oauthlib
requests_oauthlib/oauth2_session.py
https://github.com/requests/requests-oauthlib/blob/800976faab3b827a42fa1cb80f13fcc03961d2c9/requests_oauthlib/oauth2_session.py#L175-L363
def fetch_token( self, token_url, code=None, authorization_response=None, body="", auth=None, username=None, password=None, method="POST", force_querystring=False, timeout=None, headers=None, verify=True, proxies=None, include_client_id=None, client_secret=None, **kwargs ): """Generic method for fetching an access token from the token endpoint. If you are using the MobileApplicationClient you will want to use `token_from_fragment` instead of `fetch_token`. The current implementation enforces the RFC guidelines. :param token_url: Token endpoint URL, must use HTTPS. :param code: Authorization code (used by WebApplicationClients). :param authorization_response: Authorization response URL, the callback URL of the request back to you. Used by WebApplicationClients instead of code. :param body: Optional application/x-www-form-urlencoded body to add the include in the token request. Prefer kwargs over body. :param auth: An auth tuple or method as accepted by `requests`. :param username: Username required by LegacyApplicationClients to appear in the request body. :param password: Password required by LegacyApplicationClients to appear in the request body. :param method: The HTTP method used to make the request. Defaults to POST, but may also be GET. Other methods should be added as needed. :param force_querystring: If True, force the request body to be sent in the querystring instead. :param timeout: Timeout of the request in seconds. :param headers: Dict to default request headers with. :param verify: Verify SSL certificate. :param proxies: The `proxies` argument is passed onto `requests`. :param include_client_id: Should the request body include the `client_id` parameter. Default is `None`, which will attempt to autodetect. This can be forced to always include (True) or never include (False). :param client_secret: The `client_secret` paired to the `client_id`. This is generally required unless provided in the `auth` tuple. If the value is `None`, it will be omitted from the request, however if the value is an empty string, an empty string will be sent. :param kwargs: Extra parameters to include in the token request. :return: A token dict """ if not is_secure_transport(token_url): raise InsecureTransportError() if not code and authorization_response: self._client.parse_request_uri_response( authorization_response, state=self._state ) code = self._client.code elif not code and isinstance(self._client, WebApplicationClient): code = self._client.code if not code: raise ValueError( "Please supply either code or " "authorization_response parameters." ) # Earlier versions of this library build an HTTPBasicAuth header out of # `username` and `password`. The RFC states, however these attributes # must be in the request body and not the header. # If an upstream server is not spec compliant and requires them to # appear as an Authorization header, supply an explicit `auth` header # to this function. # This check will allow for empty strings, but not `None`. # # Refernences # 4.3.2 - Resource Owner Password Credentials Grant # https://tools.ietf.org/html/rfc6749#section-4.3.2 if isinstance(self._client, LegacyApplicationClient): if username is None: raise ValueError( "`LegacyApplicationClient` requires both the " "`username` and `password` parameters." ) if password is None: raise ValueError( "The required paramter `username` was supplied, " "but `password` was not." ) # merge username and password into kwargs for `prepare_request_body` if username is not None: kwargs["username"] = username if password is not None: kwargs["password"] = password # is an auth explicitly supplied? if auth is not None: # if we're dealing with the default of `include_client_id` (None): # we will assume the `auth` argument is for an RFC compliant server # and we should not send the `client_id` in the body. # This approach allows us to still force the client_id by submitting # `include_client_id=True` along with an `auth` object. if include_client_id is None: include_client_id = False # otherwise we may need to create an auth header else: # since we don't have an auth header, we MAY need to create one # it is possible that we want to send the `client_id` in the body # if so, `include_client_id` should be set to True # otherwise, we will generate an auth header if include_client_id is not True: client_id = self.client_id if client_id: log.debug( 'Encoding `client_id` "%s" with `client_secret` ' "as Basic auth credentials.", client_id, ) client_secret = client_secret if client_secret is not None else "" auth = requests.auth.HTTPBasicAuth(client_id, client_secret) if include_client_id: # this was pulled out of the params # it needs to be passed into prepare_request_body if client_secret is not None: kwargs["client_secret"] = client_secret body = self._client.prepare_request_body( code=code, body=body, redirect_uri=self.redirect_uri, include_client_id=include_client_id, **kwargs ) headers = headers or { "Accept": "application/json", "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8", } self.token = {} request_kwargs = {} if method.upper() == "POST": request_kwargs["params" if force_querystring else "data"] = dict( urldecode(body) ) elif method.upper() == "GET": request_kwargs["params"] = dict(urldecode(body)) else: raise ValueError("The method kwarg must be POST or GET.") r = self.request( method=method, url=token_url, timeout=timeout, headers=headers, auth=auth, verify=verify, proxies=proxies, **request_kwargs ) log.debug("Request to fetch token completed with status %s.", r.status_code) log.debug("Request url was %s", r.request.url) log.debug("Request headers were %s", r.request.headers) log.debug("Request body was %s", r.request.body) log.debug("Response headers were %s and content %s.", r.headers, r.text) log.debug( "Invoking %d token response hooks.", len(self.compliance_hook["access_token_response"]), ) for hook in self.compliance_hook["access_token_response"]: log.debug("Invoking hook %s.", hook) r = hook(r) self._client.parse_request_body_response(r.text, scope=self.scope) self.token = self._client.token log.debug("Obtained token %s.", self.token) return self.token
[ "def", "fetch_token", "(", "self", ",", "token_url", ",", "code", "=", "None", ",", "authorization_response", "=", "None", ",", "body", "=", "\"\"", ",", "auth", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "method", "=...
Generic method for fetching an access token from the token endpoint. If you are using the MobileApplicationClient you will want to use `token_from_fragment` instead of `fetch_token`. The current implementation enforces the RFC guidelines. :param token_url: Token endpoint URL, must use HTTPS. :param code: Authorization code (used by WebApplicationClients). :param authorization_response: Authorization response URL, the callback URL of the request back to you. Used by WebApplicationClients instead of code. :param body: Optional application/x-www-form-urlencoded body to add the include in the token request. Prefer kwargs over body. :param auth: An auth tuple or method as accepted by `requests`. :param username: Username required by LegacyApplicationClients to appear in the request body. :param password: Password required by LegacyApplicationClients to appear in the request body. :param method: The HTTP method used to make the request. Defaults to POST, but may also be GET. Other methods should be added as needed. :param force_querystring: If True, force the request body to be sent in the querystring instead. :param timeout: Timeout of the request in seconds. :param headers: Dict to default request headers with. :param verify: Verify SSL certificate. :param proxies: The `proxies` argument is passed onto `requests`. :param include_client_id: Should the request body include the `client_id` parameter. Default is `None`, which will attempt to autodetect. This can be forced to always include (True) or never include (False). :param client_secret: The `client_secret` paired to the `client_id`. This is generally required unless provided in the `auth` tuple. If the value is `None`, it will be omitted from the request, however if the value is an empty string, an empty string will be sent. :param kwargs: Extra parameters to include in the token request. :return: A token dict
[ "Generic", "method", "for", "fetching", "an", "access", "token", "from", "the", "token", "endpoint", "." ]
python
valid
inveniosoftware/invenio-migrator
invenio_migrator/legacy/utils.py
https://github.com/inveniosoftware/invenio-migrator/blob/6902c6968a39b747d15e32363f43b7dffe2622c2/invenio_migrator/legacy/utils.py#L60-L68
def init_app_context(): """Initialize app context for Invenio 2.x.""" try: from invenio.base.factory import create_app app = create_app() app.test_request_context('/').push() app.preprocess_request() except ImportError: pass
[ "def", "init_app_context", "(", ")", ":", "try", ":", "from", "invenio", ".", "base", ".", "factory", "import", "create_app", "app", "=", "create_app", "(", ")", "app", ".", "test_request_context", "(", "'/'", ")", ".", "push", "(", ")", "app", ".", "p...
Initialize app context for Invenio 2.x.
[ "Initialize", "app", "context", "for", "Invenio", "2", ".", "x", "." ]
python
test
standage/tag
tag/feature.py
https://github.com/standage/tag/blob/94686adf57115cea1c5235e99299e691f80ba10b/tag/feature.py#L468-L489
def get_attribute(self, attrkey, as_string=False, as_list=False): """ Get the value of an attribute. By default, returns a string for ID and attributes with a single value, and a list of strings for attributes with multiple values. The `as_string` and `as_list` options can be used to force the function to return values as a string (comma-separated in case of multiple values) or a list. """ assert not as_string or not as_list if attrkey not in self._attrs: return None if attrkey == 'ID': return self._attrs[attrkey] attrvalues = list(self._attrs[attrkey]) attrvalues.sort() if len(attrvalues) == 1 and not as_list: return attrvalues[0] elif as_string: return ','.join(attrvalues) return attrvalues
[ "def", "get_attribute", "(", "self", ",", "attrkey", ",", "as_string", "=", "False", ",", "as_list", "=", "False", ")", ":", "assert", "not", "as_string", "or", "not", "as_list", "if", "attrkey", "not", "in", "self", ".", "_attrs", ":", "return", "None",...
Get the value of an attribute. By default, returns a string for ID and attributes with a single value, and a list of strings for attributes with multiple values. The `as_string` and `as_list` options can be used to force the function to return values as a string (comma-separated in case of multiple values) or a list.
[ "Get", "the", "value", "of", "an", "attribute", "." ]
python
train
thomasdelaet/python-velbus
velbus/controller.py
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/controller.py#L167-L173
def sync_clock(self): """ This will send all the needed messages to sync the cloc """ self.send(velbus.SetRealtimeClock()) self.send(velbus.SetDate()) self.send(velbus.SetDaylightSaving())
[ "def", "sync_clock", "(", "self", ")", ":", "self", ".", "send", "(", "velbus", ".", "SetRealtimeClock", "(", ")", ")", "self", ".", "send", "(", "velbus", ".", "SetDate", "(", ")", ")", "self", ".", "send", "(", "velbus", ".", "SetDaylightSaving", "...
This will send all the needed messages to sync the cloc
[ "This", "will", "send", "all", "the", "needed", "messages", "to", "sync", "the", "cloc" ]
python
train
sorgerlab/indra
indra/sources/biopax/processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/biopax/processor.py#L58-L69
def save_model(self, file_name=None): """Save the BioPAX model object in an OWL file. Parameters ---------- file_name : Optional[str] The name of the OWL file to save the model in. """ if file_name is None: logger.error('Missing file name') return pcc.model_to_owl(self.model, file_name)
[ "def", "save_model", "(", "self", ",", "file_name", "=", "None", ")", ":", "if", "file_name", "is", "None", ":", "logger", ".", "error", "(", "'Missing file name'", ")", "return", "pcc", ".", "model_to_owl", "(", "self", ".", "model", ",", "file_name", "...
Save the BioPAX model object in an OWL file. Parameters ---------- file_name : Optional[str] The name of the OWL file to save the model in.
[ "Save", "the", "BioPAX", "model", "object", "in", "an", "OWL", "file", "." ]
python
train
tgbugs/pyontutils
pyontutils/hierarchies.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/hierarchies.py#L43-L45
def tcsort(item): # FIXME SUCH WOW SO INEFFICIENT O_O """ get len of transitive closure assume type items is tree... """ return len(item[1]) + sum(tcsort(kv) for kv in item[1].items())
[ "def", "tcsort", "(", "item", ")", ":", "# FIXME SUCH WOW SO INEFFICIENT O_O", "return", "len", "(", "item", "[", "1", "]", ")", "+", "sum", "(", "tcsort", "(", "kv", ")", "for", "kv", "in", "item", "[", "1", "]", ".", "items", "(", ")", ")" ]
get len of transitive closure assume type items is tree...
[ "get", "len", "of", "transitive", "closure", "assume", "type", "items", "is", "tree", "..." ]
python
train
manjitkumar/drf-url-filters
filters/validations.py
https://github.com/manjitkumar/drf-url-filters/blob/ebac358729bcd9aa70537247b2ccd6005f5678c1/filters/validations.py#L13-L32
def IntegerLike(msg=None): ''' Checks whether a value is: - int, or - long, or - float without a fractional part, or - str or unicode composed only of digits ''' def fn(value): if not any([ isinstance(value, numbers.Integral), (isinstance(value, float) and value.is_integer()), (isinstance(value, basestring) and value.isdigit()) ]): raise Invalid(msg or ( 'Invalid input <{0}>; expected an integer'.format(value)) ) else: return value return fn
[ "def", "IntegerLike", "(", "msg", "=", "None", ")", ":", "def", "fn", "(", "value", ")", ":", "if", "not", "any", "(", "[", "isinstance", "(", "value", ",", "numbers", ".", "Integral", ")", ",", "(", "isinstance", "(", "value", ",", "float", ")", ...
Checks whether a value is: - int, or - long, or - float without a fractional part, or - str or unicode composed only of digits
[ "Checks", "whether", "a", "value", "is", ":", "-", "int", "or", "-", "long", "or", "-", "float", "without", "a", "fractional", "part", "or", "-", "str", "or", "unicode", "composed", "only", "of", "digits" ]
python
train
varikin/Tigre
tigre/tigre.py
https://github.com/varikin/Tigre/blob/6ffac1de52f087cf92cbf368997b336c35a0e3c0/tigre/tigre.py#L87-L106
def sync_folder(self, path, bucket): """Syncs a local directory with an S3 bucket. Currently does not delete files from S3 that are not in the local directory. path: The path to the directory to sync to S3 bucket: The name of the bucket on S3 """ bucket = self.conn.get_bucket(bucket) local_files = self._get_local_files(path) s3_files = self._get_s3_files(bucket) for filename, hash in local_files.iteritems(): s3_key = s3_files[filename] if s3_key is None: s3_key = Key(bucket) s3_key.key = filename s3_key.etag = '"!"' if s3_key.etag[1:-1] != hash[0]: s3_key.set_contents_from_filename(join(path, filename), md5=hash)
[ "def", "sync_folder", "(", "self", ",", "path", ",", "bucket", ")", ":", "bucket", "=", "self", ".", "conn", ".", "get_bucket", "(", "bucket", ")", "local_files", "=", "self", ".", "_get_local_files", "(", "path", ")", "s3_files", "=", "self", ".", "_g...
Syncs a local directory with an S3 bucket. Currently does not delete files from S3 that are not in the local directory. path: The path to the directory to sync to S3 bucket: The name of the bucket on S3
[ "Syncs", "a", "local", "directory", "with", "an", "S3", "bucket", ".", "Currently", "does", "not", "delete", "files", "from", "S3", "that", "are", "not", "in", "the", "local", "directory", "." ]
python
test
The-Politico/politico-civic-geography
geography/models/division.py
https://github.com/The-Politico/politico-civic-geography/blob/032b3ee773b50b65cfe672f230dda772df0f89e0/geography/models/division.py#L70-L82
def save(self, *args, **kwargs): """ **uid**: :code:`division:{parentuid}_{levelcode}-{code}` """ slug = "{}:{}".format(self.level.uid, self.code) if self.parent: self.uid = "{}_{}".format(self.parent.uid, slug) else: self.uid = slug self.slug = uuslug( self.name, instance=self, max_length=100, separator="-", start_no=2 ) super(Division, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "slug", "=", "\"{}:{}\"", ".", "format", "(", "self", ".", "level", ".", "uid", ",", "self", ".", "code", ")", "if", "self", ".", "parent", ":", "self", ".", "uid"...
**uid**: :code:`division:{parentuid}_{levelcode}-{code}`
[ "**", "uid", "**", ":", ":", "code", ":", "division", ":", "{", "parentuid", "}", "_", "{", "levelcode", "}", "-", "{", "code", "}" ]
python
train
dade-ai/snipy
snipy/io/fileutil.py
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/io/fileutil.py#L261-L268
def listfolder(p): """ generator of list folder in the path. folders only """ for entry in scandir.scandir(p): if entry.is_dir(): yield entry.name
[ "def", "listfolder", "(", "p", ")", ":", "for", "entry", "in", "scandir", ".", "scandir", "(", "p", ")", ":", "if", "entry", ".", "is_dir", "(", ")", ":", "yield", "entry", ".", "name" ]
generator of list folder in the path. folders only
[ "generator", "of", "list", "folder", "in", "the", "path", ".", "folders", "only" ]
python
valid
gplepage/gvar
examples/pendulum-clock.py
https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/examples/pendulum-clock.py#L46-L56
def find_period(y, Tapprox): """ Find oscillation period of y(t). Parameter Tapprox is the approximate period. The code finds the time between 0.7 * Tapprox and 1.3 * Tapprox where y(t)[1] = d/dt theta(t) vanishes. This is the period. """ def dtheta_dt(t): """ vanishes when dtheta/dt = 0 """ return y(t)[1] return gv.root.refine(dtheta_dt, (0.7 * Tapprox, 1.3 * Tapprox))
[ "def", "find_period", "(", "y", ",", "Tapprox", ")", ":", "def", "dtheta_dt", "(", "t", ")", ":", "\"\"\" vanishes when dtheta/dt = 0 \"\"\"", "return", "y", "(", "t", ")", "[", "1", "]", "return", "gv", ".", "root", ".", "refine", "(", "dtheta_dt", ",",...
Find oscillation period of y(t). Parameter Tapprox is the approximate period. The code finds the time between 0.7 * Tapprox and 1.3 * Tapprox where y(t)[1] = d/dt theta(t) vanishes. This is the period.
[ "Find", "oscillation", "period", "of", "y", "(", "t", ")", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/flows.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/flows.py#L633-L646
def iflat_tasks_wti(self, status=None, op="==", nids=None): """ Generator to iterate over all the tasks of the `Flow`. Yields: (task, work_index, task_index) If status is not None, only the tasks whose status satisfies the condition (task.status op status) are selected status can be either one of the flags defined in the :class:`Task` class (e.g Task.S_OK) or a string e.g "S_OK" nids is an optional list of node identifiers used to filter the tasks. """ return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=True)
[ "def", "iflat_tasks_wti", "(", "self", ",", "status", "=", "None", ",", "op", "=", "\"==\"", ",", "nids", "=", "None", ")", ":", "return", "self", ".", "_iflat_tasks_wti", "(", "status", "=", "status", ",", "op", "=", "op", ",", "nids", "=", "nids", ...
Generator to iterate over all the tasks of the `Flow`. Yields: (task, work_index, task_index) If status is not None, only the tasks whose status satisfies the condition (task.status op status) are selected status can be either one of the flags defined in the :class:`Task` class (e.g Task.S_OK) or a string e.g "S_OK" nids is an optional list of node identifiers used to filter the tasks.
[ "Generator", "to", "iterate", "over", "all", "the", "tasks", "of", "the", "Flow", ".", "Yields", ":" ]
python
train
T-002/pycast
pycast/common/matrix.py
https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/common/matrix.py#L236-L251
def to_multi_dim_timeseries(self): """Return a TimeSeries with the values of :py:obj:`self` The index of the row is used for the timestamp :return: Return a new MultiDimensionalTimeSeries with the values of the Matrix :rtype: MultiDimensionalTimeSeries """ ts = MultiDimensionalTimeSeries(dimensions=self.get_width()) for row in xrange(self.get_height()): newEntry = [] for col in xrange(self.get_width()): newEntry.append(self.get_value(col, row)) ts.add_entry(row, newEntry) return ts
[ "def", "to_multi_dim_timeseries", "(", "self", ")", ":", "ts", "=", "MultiDimensionalTimeSeries", "(", "dimensions", "=", "self", ".", "get_width", "(", ")", ")", "for", "row", "in", "xrange", "(", "self", ".", "get_height", "(", ")", ")", ":", "newEntry",...
Return a TimeSeries with the values of :py:obj:`self` The index of the row is used for the timestamp :return: Return a new MultiDimensionalTimeSeries with the values of the Matrix :rtype: MultiDimensionalTimeSeries
[ "Return", "a", "TimeSeries", "with", "the", "values", "of", ":", "py", ":", "obj", ":", "self" ]
python
train
romanz/trezor-agent
libagent/gpg/keyring.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L243-L249
def export_public_keys(env=None, sp=subprocess): """Export all GPG public keys.""" args = gpg_command(['--export']) result = check_output(args=args, env=env, sp=sp) if not result: raise KeyError('No GPG public keys found at env: {!r}'.format(env)) return result
[ "def", "export_public_keys", "(", "env", "=", "None", ",", "sp", "=", "subprocess", ")", ":", "args", "=", "gpg_command", "(", "[", "'--export'", "]", ")", "result", "=", "check_output", "(", "args", "=", "args", ",", "env", "=", "env", ",", "sp", "=...
Export all GPG public keys.
[ "Export", "all", "GPG", "public", "keys", "." ]
python
train
PiotrDabkowski/Js2Py
js2py/base.py
https://github.com/PiotrDabkowski/Js2Py/blob/c0fa43f5679cf91ca8986c5747fcb07a433dc584/js2py/base.py#L357-L427
def put(self, prop, val, op=None): #external use! '''Just like in js: self.prop op= val for example when op is '+' it will be self.prop+=val op can be either None for simple assignment or one of: * / % + - << >> & ^ |''' if self.Class == 'Undefined' or self.Class == 'Null': raise MakeError('TypeError', 'Undefined and null dont have properties!') if not isinstance(prop, basestring): prop = prop.to_string().value if NUMPY_AVAILABLE and prop.isdigit(): if self.Class == 'Int8Array': val = Js(numpy.int8(val.to_number().value)) elif self.Class == 'Uint8Array': val = Js(numpy.uint8(val.to_number().value)) elif self.Class == 'Uint8ClampedArray': if val < Js(numpy.uint8(0)): val = Js(numpy.uint8(0)) elif val > Js(numpy.uint8(255)): val = Js(numpy.uint8(255)) else: val = Js(numpy.uint8(val.to_number().value)) elif self.Class == 'Int16Array': val = Js(numpy.int16(val.to_number().value)) elif self.Class == 'Uint16Array': val = Js(numpy.uint16(val.to_number().value)) elif self.Class == 'Int32Array': val = Js(numpy.int32(val.to_number().value)) elif self.Class == 'Uint32Array': val = Js(numpy.uint32(val.to_number().value)) elif self.Class == 'Float32Array': val = Js(numpy.float32(val.to_number().value)) elif self.Class == 'Float64Array': val = Js(numpy.float64(val.to_number().value)) if isinstance(self.buff, numpy.ndarray): self.buff[int(prop)] = int(val.to_number().value) #we need to set the value to the incremented one if op is not None: val = getattr(self.get(prop), OP_METHODS[op])(val) if not self.can_put(prop): return val own_desc = self.get_own_property(prop) if is_data_descriptor(own_desc): if self.Class in [ 'Array', 'Int8Array', 'Uint8Array', 'Uint8ClampedArray', 'Int16Array', 'Uint16Array', 'Int32Array', 'Uint32Array', 'Float32Array', 'Float64Array' ]: self.define_own_property(prop, {'value': val}) else: self.own[prop]['value'] = val return val desc = self.get_property(prop) if is_accessor_descriptor(desc): desc['set'].call(self, (val, )) else: new = { 'value': val, 'writable': True, 'configurable': True, 'enumerable': True } if self.Class in [ 'Array', 'Int8Array', 'Uint8Array', 'Uint8ClampedArray', 'Int16Array', 'Uint16Array', 'Int32Array', 'Uint32Array', 'Float32Array', 'Float64Array' ]: self.define_own_property(prop, new) else: self.own[prop] = new return val
[ "def", "put", "(", "self", ",", "prop", ",", "val", ",", "op", "=", "None", ")", ":", "#external use!", "if", "self", ".", "Class", "==", "'Undefined'", "or", "self", ".", "Class", "==", "'Null'", ":", "raise", "MakeError", "(", "'TypeError'", ",", "...
Just like in js: self.prop op= val for example when op is '+' it will be self.prop+=val op can be either None for simple assignment or one of: * / % + - << >> & ^ |
[ "Just", "like", "in", "js", ":", "self", ".", "prop", "op", "=", "val", "for", "example", "when", "op", "is", "+", "it", "will", "be", "self", ".", "prop", "+", "=", "val", "op", "can", "be", "either", "None", "for", "simple", "assignment", "or", ...
python
valid
apple/turicreate
src/unity/python/turicreate/toolkits/regression/linear_regression.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/linear_regression.py#L519-L564
def predict(self, dataset, missing_value_action='auto'): """ Return target value predictions for ``dataset``, using the trained linear regression model. This method can be used to get fitted values for the model by inputting the training dataset. Parameters ---------- dataset : SFrame | pandas.Dataframe Dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. missing_value_action : str, optional Action to perform when missing values are encountered. This can be one of: - 'auto': Default to 'impute' - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'error': Do not proceed with prediction and terminate with an error message. Returns ------- out : SArray Predicted target value for each example (i.e. row) in the dataset. See Also ---------- create, evaluate Examples ---------- >>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv') >>> model = turicreate.linear_regression.create(data, target='price', features=['bath', 'bedroom', 'size']) >>> results = model.predict(data) """ return super(LinearRegression, self).predict(dataset, missing_value_action=missing_value_action)
[ "def", "predict", "(", "self", ",", "dataset", ",", "missing_value_action", "=", "'auto'", ")", ":", "return", "super", "(", "LinearRegression", ",", "self", ")", ".", "predict", "(", "dataset", ",", "missing_value_action", "=", "missing_value_action", ")" ]
Return target value predictions for ``dataset``, using the trained linear regression model. This method can be used to get fitted values for the model by inputting the training dataset. Parameters ---------- dataset : SFrame | pandas.Dataframe Dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. missing_value_action : str, optional Action to perform when missing values are encountered. This can be one of: - 'auto': Default to 'impute' - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'error': Do not proceed with prediction and terminate with an error message. Returns ------- out : SArray Predicted target value for each example (i.e. row) in the dataset. See Also ---------- create, evaluate Examples ---------- >>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv') >>> model = turicreate.linear_regression.create(data, target='price', features=['bath', 'bedroom', 'size']) >>> results = model.predict(data)
[ "Return", "target", "value", "predictions", "for", "dataset", "using", "the", "trained", "linear", "regression", "model", ".", "This", "method", "can", "be", "used", "to", "get", "fitted", "values", "for", "the", "model", "by", "inputting", "the", "training", ...
python
train
inasafe/inasafe
safe/utilities/metadata.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/metadata.py#L183-L247
def read_iso19115_metadata(layer_uri, keyword=None, version_35=False): """Retrieve keywords from a metadata object :param layer_uri: Uri to layer. :type layer_uri: basestring :param keyword: The key of keyword that want to be read. If None, return all keywords in dictionary. :type keyword: basestring :returns: Dictionary of keywords or value of key as string. :rtype: dict, basestring """ xml_uri = os.path.splitext(layer_uri)[0] + '.xml' # Remove the prefix for local file. For example csv. file_prefix = 'file:' if xml_uri.startswith(file_prefix): xml_uri = xml_uri[len(file_prefix):] if not os.path.exists(xml_uri): xml_uri = None if not xml_uri and os.path.exists(layer_uri): message = 'Layer based file but no xml file.\n' message += 'Layer path: %s.' % layer_uri raise NoKeywordsFoundError(message) if version_35: metadata = GenericLayerMetadata35(layer_uri, xml_uri) else: metadata = GenericLayerMetadata(layer_uri, xml_uri) active_metadata_classes = METADATA_CLASSES if version_35: active_metadata_classes = METADATA_CLASSES35 if metadata.layer_purpose in active_metadata_classes: metadata = active_metadata_classes[ metadata.layer_purpose](layer_uri, xml_uri) # dictionary comprehension keywords = { x[0]: x[1]['value'] for x in list(metadata.dict['properties'].items()) if x[1]['value'] is not None} if 'keyword_version' not in list(keywords.keys()) and xml_uri: message = 'No keyword version found. Metadata xml file is invalid.\n' message += 'Layer uri: %s\n' % layer_uri message += 'Keywords file: %s\n' % os.path.exists( os.path.splitext(layer_uri)[0] + '.xml') message += 'keywords:\n' for k, v in list(keywords.items()): message += '%s: %s\n' % (k, v) raise MetadataReadError(message) # Get dictionary keywords that has value != None keywords = { x[0]: x[1]['value'] for x in list(metadata.dict['properties'].items()) if x[1]['value'] is not None} if keyword: try: return keywords[keyword] except KeyError: message = 'Keyword with key %s is not found. ' % keyword message += 'Layer path: %s' % layer_uri raise KeywordNotFoundError(message) return keywords
[ "def", "read_iso19115_metadata", "(", "layer_uri", ",", "keyword", "=", "None", ",", "version_35", "=", "False", ")", ":", "xml_uri", "=", "os", ".", "path", ".", "splitext", "(", "layer_uri", ")", "[", "0", "]", "+", "'.xml'", "# Remove the prefix for local...
Retrieve keywords from a metadata object :param layer_uri: Uri to layer. :type layer_uri: basestring :param keyword: The key of keyword that want to be read. If None, return all keywords in dictionary. :type keyword: basestring :returns: Dictionary of keywords or value of key as string. :rtype: dict, basestring
[ "Retrieve", "keywords", "from", "a", "metadata", "object" ]
python
train
open-mmlab/mmcv
mmcv/image/transforms/geometry.py
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/geometry.py#L112-L163
def imcrop(img, bboxes, scale=1.0, pad_fill=None): """Crop image patches. 3 steps: scale the bboxes -> clip bboxes -> crop and pad. Args: img (ndarray): Image to be cropped. bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes. scale (float, optional): Scale ratio of bboxes, the default value 1.0 means no padding. pad_fill (number or list): Value to be filled for padding, None for no padding. Returns: list or ndarray: The cropped image patches. """ chn = 1 if img.ndim == 2 else img.shape[2] if pad_fill is not None: if isinstance(pad_fill, (int, float)): pad_fill = [pad_fill for _ in range(chn)] assert len(pad_fill) == chn _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32) clipped_bbox = bbox_clip(scaled_bboxes, img.shape) patches = [] for i in range(clipped_bbox.shape[0]): x1, y1, x2, y2 = tuple(clipped_bbox[i, :]) if pad_fill is None: patch = img[y1:y2 + 1, x1:x2 + 1, ...] else: _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :]) if chn == 2: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1) else: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn) patch = np.array( pad_fill, dtype=img.dtype) * np.ones( patch_shape, dtype=img.dtype) x_start = 0 if _x1 >= 0 else -_x1 y_start = 0 if _y1 >= 0 else -_y1 w = x2 - x1 + 1 h = y2 - y1 + 1 patch[y_start:y_start + h, x_start:x_start + w, ...] = img[y1:y1 + h, x1:x1 + w, ...] patches.append(patch) if bboxes.ndim == 1: return patches[0] else: return patches
[ "def", "imcrop", "(", "img", ",", "bboxes", ",", "scale", "=", "1.0", ",", "pad_fill", "=", "None", ")", ":", "chn", "=", "1", "if", "img", ".", "ndim", "==", "2", "else", "img", ".", "shape", "[", "2", "]", "if", "pad_fill", "is", "not", "None...
Crop image patches. 3 steps: scale the bboxes -> clip bboxes -> crop and pad. Args: img (ndarray): Image to be cropped. bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes. scale (float, optional): Scale ratio of bboxes, the default value 1.0 means no padding. pad_fill (number or list): Value to be filled for padding, None for no padding. Returns: list or ndarray: The cropped image patches.
[ "Crop", "image", "patches", "." ]
python
test
iterative/dvc
dvc/utils/__init__.py
https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/utils/__init__.py#L83-L105
def dict_filter(d, exclude=[]): """ Exclude specified keys from a nested dict """ if isinstance(d, list): ret = [] for e in d: ret.append(dict_filter(e, exclude)) return ret elif isinstance(d, dict): ret = {} for k, v in d.items(): if isinstance(k, builtin_str): k = str(k) assert isinstance(k, str) if k in exclude: continue ret[k] = dict_filter(v, exclude) return ret return d
[ "def", "dict_filter", "(", "d", ",", "exclude", "=", "[", "]", ")", ":", "if", "isinstance", "(", "d", ",", "list", ")", ":", "ret", "=", "[", "]", "for", "e", "in", "d", ":", "ret", ".", "append", "(", "dict_filter", "(", "e", ",", "exclude", ...
Exclude specified keys from a nested dict
[ "Exclude", "specified", "keys", "from", "a", "nested", "dict" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py#L338-L391
def push(self, lines): """Push one or more lines of input. This stores the given lines and returns a status code indicating whether the code forms a complete Python block or not. Any exceptions generated in compilation are swallowed, but if an exception was produced, the method returns True. Parameters ---------- lines : string One or more lines of Python input. Returns ------- is_complete : boolean True if the current input source (the result of the current input plus prior inputs) forms a complete Python execution block. Note that this value is also stored as a private attribute (``_is_complete``), so it can be queried at any time. """ if self.input_mode == 'cell': self.reset() self._store(lines) source = self.source # Before calling _compile(), reset the code object to None so that if an # exception is raised in compilation, we don't mislead by having # inconsistent code/source attributes. self.code, self._is_complete = None, None # Honor termination lines properly if source.rstrip().endswith('\\'): return False self._update_indent(lines) try: self.code = self._compile(source, symbol="exec") # Invalid syntax can produce any of a number of different errors from # inside the compiler, so we have to catch them all. Syntax errors # immediately produce a 'ready' block, so the invalid Python can be # sent to the kernel for evaluation with possible ipython # special-syntax conversion. except (SyntaxError, OverflowError, ValueError, TypeError, MemoryError): self._is_complete = True else: # Compilation didn't produce any exceptions (though it may not have # given a complete code object) self._is_complete = self.code is not None return self._is_complete
[ "def", "push", "(", "self", ",", "lines", ")", ":", "if", "self", ".", "input_mode", "==", "'cell'", ":", "self", ".", "reset", "(", ")", "self", ".", "_store", "(", "lines", ")", "source", "=", "self", ".", "source", "# Before calling _compile(), reset ...
Push one or more lines of input. This stores the given lines and returns a status code indicating whether the code forms a complete Python block or not. Any exceptions generated in compilation are swallowed, but if an exception was produced, the method returns True. Parameters ---------- lines : string One or more lines of Python input. Returns ------- is_complete : boolean True if the current input source (the result of the current input plus prior inputs) forms a complete Python execution block. Note that this value is also stored as a private attribute (``_is_complete``), so it can be queried at any time.
[ "Push", "one", "or", "more", "lines", "of", "input", "." ]
python
test
dhylands/rshell
rshell/main.py
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1080-L1109
def send_file_to_host(src_filename, dst_file, filesize): """Function which runs on the pyboard. Matches up with recv_file_from_remote.""" import sys import ubinascii try: with open(src_filename, 'rb') as src_file: bytes_remaining = filesize if HAS_BUFFER: buf_size = BUFFER_SIZE else: buf_size = BUFFER_SIZE // 2 while bytes_remaining > 0: read_size = min(bytes_remaining, buf_size) buf = src_file.read(read_size) if HAS_BUFFER: sys.stdout.buffer.write(buf) else: sys.stdout.write(ubinascii.hexlify(buf)) bytes_remaining -= read_size # Wait for an ack so we don't get ahead of the remote while True: char = sys.stdin.read(1) if char: if char == '\x06': break # This should only happen if an error occurs sys.stdout.write(char) return True except: return False
[ "def", "send_file_to_host", "(", "src_filename", ",", "dst_file", ",", "filesize", ")", ":", "import", "sys", "import", "ubinascii", "try", ":", "with", "open", "(", "src_filename", ",", "'rb'", ")", "as", "src_file", ":", "bytes_remaining", "=", "filesize", ...
Function which runs on the pyboard. Matches up with recv_file_from_remote.
[ "Function", "which", "runs", "on", "the", "pyboard", ".", "Matches", "up", "with", "recv_file_from_remote", "." ]
python
train
un33k/django-toolware
toolware/templatetags/rounder.py
https://github.com/un33k/django-toolware/blob/973f3e003dc38b812897dab88455bee37dcaf931/toolware/templatetags/rounder.py#L10-L34
def roundplus(number): """ given an number, this fuction rounds the number as the following examples: 87 -> 87, 100 -> 100+, 188 -> 100+, 999 -> 900+, 1001 -> 1000+, ...etc """ num = str(number) if not num.isdigit(): return num num = str(number) digits = len(num) rounded = '100+' if digits < 3: rounded = num elif digits == 3: rounded = num[0] + '00+' elif digits == 4: rounded = num[0] + 'K+' elif digits == 5: rounded = num[:1] + 'K+' else: rounded = '100K+' return rounded
[ "def", "roundplus", "(", "number", ")", ":", "num", "=", "str", "(", "number", ")", "if", "not", "num", ".", "isdigit", "(", ")", ":", "return", "num", "num", "=", "str", "(", "number", ")", "digits", "=", "len", "(", "num", ")", "rounded", "=", ...
given an number, this fuction rounds the number as the following examples: 87 -> 87, 100 -> 100+, 188 -> 100+, 999 -> 900+, 1001 -> 1000+, ...etc
[ "given", "an", "number", "this", "fuction", "rounds", "the", "number", "as", "the", "following", "examples", ":", "87", "-", ">", "87", "100", "-", ">", "100", "+", "188", "-", ">", "100", "+", "999", "-", ">", "900", "+", "1001", "-", ">", "1000...
python
test
sirfoga/pyhal
hal/cvs/gits.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/cvs/gits.py#L135-L147
def get_version(self, diff_to_increase_ratio): """Gets version :param diff_to_increase_ratio: Ratio to convert number of changes into :return: Version of this code, based on commits diffs """ diffs = self.get_diff_amounts() version = Version() for diff in diffs: version.increase_by_changes(diff, diff_to_increase_ratio) return version
[ "def", "get_version", "(", "self", ",", "diff_to_increase_ratio", ")", ":", "diffs", "=", "self", ".", "get_diff_amounts", "(", ")", "version", "=", "Version", "(", ")", "for", "diff", "in", "diffs", ":", "version", ".", "increase_by_changes", "(", "diff", ...
Gets version :param diff_to_increase_ratio: Ratio to convert number of changes into :return: Version of this code, based on commits diffs
[ "Gets", "version" ]
python
train
llllllllll/codetransformer
codetransformer/decompiler/_343.py
https://github.com/llllllllll/codetransformer/blob/c5f551e915df45adc7da7e0b1b635f0cc6a1bb27/codetransformer/decompiler/_343.py#L1344-L1352
def _binop_handler(nodetype): """ Factory function for binary operator handlers. """ def _handler(toplevel, stack_builders): right = make_expr(stack_builders) left = make_expr(stack_builders) return ast.BinOp(left=left, op=nodetype(), right=right) return _handler
[ "def", "_binop_handler", "(", "nodetype", ")", ":", "def", "_handler", "(", "toplevel", ",", "stack_builders", ")", ":", "right", "=", "make_expr", "(", "stack_builders", ")", "left", "=", "make_expr", "(", "stack_builders", ")", "return", "ast", ".", "BinOp...
Factory function for binary operator handlers.
[ "Factory", "function", "for", "binary", "operator", "handlers", "." ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/convertlog.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/convertlog.py#L53-L112
def convert(input, width=132, output=None, keep=False): """Input ASCII trailer file "input" will be read. The contents will then be written out to a FITS file in the same format as used by 'stwfits' from IRAF. Parameters =========== input : str Filename of input ASCII trailer file width : int Number of characters wide to use for defining output FITS column [Default: 132] output : str Filename to use for writing out converted FITS trailer file If None, input filename will be converted from *.tra -> *_trl.fits [Default: None] keep : bool Specifies whether or not to keep any previously written FITS files [Default: False] """ # open input trailer file trl = open(input) # process all lines lines = np.array([i for text in trl.readlines() for i in textwrap.wrap(text,width=width)]) # close ASCII trailer file now that we have processed all the lines trl.close() if output is None: # create fits file rootname,suffix = os.path.splitext(input) s = suffix[1:].replace('ra','rl') fitsname = "{}_{}{}fits".format(rootname,s,os.path.extsep) else: fitsname = output full_name = os.path.abspath(os.path.join(os.path.curdir,fitsname)) old_file = os.path.exists(full_name) if old_file: if keep: print("ERROR: Trailer file already written out as: {}".format(full_name)) raise IOError else: os.remove(full_name) # Build FITS table and write it out line_fmt = "{}A".format(width) tbhdu = fits.BinTableHDU.from_columns([fits.Column(name='TEXT_FILE',format=line_fmt,array=lines)]) tbhdu.writeto(fitsname) print("Created output FITS filename for trailer:{} {}".format(os.linesep,full_name)) os.remove(input)
[ "def", "convert", "(", "input", ",", "width", "=", "132", ",", "output", "=", "None", ",", "keep", "=", "False", ")", ":", "# open input trailer file", "trl", "=", "open", "(", "input", ")", "# process all lines", "lines", "=", "np", ".", "array", "(", ...
Input ASCII trailer file "input" will be read. The contents will then be written out to a FITS file in the same format as used by 'stwfits' from IRAF. Parameters =========== input : str Filename of input ASCII trailer file width : int Number of characters wide to use for defining output FITS column [Default: 132] output : str Filename to use for writing out converted FITS trailer file If None, input filename will be converted from *.tra -> *_trl.fits [Default: None] keep : bool Specifies whether or not to keep any previously written FITS files [Default: False]
[ "Input", "ASCII", "trailer", "file", "input", "will", "be", "read", "." ]
python
train
crodjer/paster
paster/services.py
https://github.com/crodjer/paster/blob/0cd7230074850ba74e80c740a8bc2502645dd743/paster/services.py#L63-L74
def list_syntax(self): ''' Prints a list of available syntax for the current paste service ''' syntax_list = ['Available syntax for %s:' %(self)] logging.info(syntax_list[0]) for key in self.SYNTAX_DICT.keys(): syntax = '\t%-20s%-30s' %(key, self.SYNTAX_DICT[key]) logging.info(syntax) syntax_list.append(syntax) return syntax_list
[ "def", "list_syntax", "(", "self", ")", ":", "syntax_list", "=", "[", "'Available syntax for %s:'", "%", "(", "self", ")", "]", "logging", ".", "info", "(", "syntax_list", "[", "0", "]", ")", "for", "key", "in", "self", ".", "SYNTAX_DICT", ".", "keys", ...
Prints a list of available syntax for the current paste service
[ "Prints", "a", "list", "of", "available", "syntax", "for", "the", "current", "paste", "service" ]
python
train
OSSOS/MOP
src/ossos/core/ossos/parsers.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/parsers.py#L23-L41
def ossos_release_parser(table=False, data_release=parameters.RELEASE_VERSION): """ extra fun as this is space-separated so using CSV parsers is not an option """ names = ['cl', 'p', 'j', 'k', 'sh', 'object', 'mag', 'e_mag', 'Filt', 'Hsur', 'dist', 'e_dist', 'Nobs', 'time', 'av_xres', 'av_yres', 'max_x', 'max_y', 'a', 'e_a', 'e', 'e_e', 'i', 'e_i', 'Omega', 'e_Omega', 'omega', 'e_omega', 'tperi', 'e_tperi', 'RAdeg', 'DEdeg', 'JD', 'rate']#, 'eff', 'm_lim'] if table: retval = Table.read(parameters.RELEASE_DETECTIONS[data_release], format='ascii', guess=False, delimiter=' ', data_start=0, comment='#', names=names, header_start=None) else: retval = [] with open(data_release, 'r') as detectionsfile: for line in detectionsfile.readlines()[1:]: # first line is column definitions obj = TNO.from_string(line, version=parameters.RELEASE_DETECTIONS[data_release]) retval.append(obj) return retval
[ "def", "ossos_release_parser", "(", "table", "=", "False", ",", "data_release", "=", "parameters", ".", "RELEASE_VERSION", ")", ":", "names", "=", "[", "'cl'", ",", "'p'", ",", "'j'", ",", "'k'", ",", "'sh'", ",", "'object'", ",", "'mag'", ",", "'e_mag'"...
extra fun as this is space-separated so using CSV parsers is not an option
[ "extra", "fun", "as", "this", "is", "space", "-", "separated", "so", "using", "CSV", "parsers", "is", "not", "an", "option" ]
python
train
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L6011-L6039
def relative_humidity(self, value=999): """Corresponds to IDD Field `relative_humidity` Args: value (int): value for IDD Field `relative_humidity` value >= 0 value <= 110 Missing value: 999 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `relative_humidity`'.format(value)) if value < 0: raise ValueError('value need to be greater or equal 0 ' 'for field `relative_humidity`') if value > 110: raise ValueError('value need to be smaller 110 ' 'for field `relative_humidity`') self._relative_humidity = value
[ "def", "relative_humidity", "(", "self", ",", "value", "=", "999", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "int", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type...
Corresponds to IDD Field `relative_humidity` Args: value (int): value for IDD Field `relative_humidity` value >= 0 value <= 110 Missing value: 999 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "relative_humidity" ]
python
train
PythonSanSebastian/docstamp
docstamp/qrcode.py
https://github.com/PythonSanSebastian/docstamp/blob/b43808f2e15351b0b2f0b7eade9c7ef319c9e646/docstamp/qrcode.py#L10-L41
def save_into_qrcode(text, out_filepath, color='', box_size=10, pixel_size=1850): """ Save `text` in a qrcode svg image file. Parameters ---------- text: str The string to be codified in the QR image. out_filepath: str Path to the output file color: str A RGB color expressed in 6 hexadecimal values. box_size: scalar Size of the QR code boxes. """ try: qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=box_size, border=0, ) qr.add_data(text) qr.make(fit=True) except Exception as exc: raise Exception('Error trying to generate QR code ' ' from `vcard_string`: {}'.format(text)) from exc else: img = qr.make_image(image_factory=qrcode.image.svg.SvgPathImage) _ = _qrcode_to_file(img, out_filepath) if color: replace_file_content(out_filepath, 'fill:#000000', 'fill:#{}'.format(color))
[ "def", "save_into_qrcode", "(", "text", ",", "out_filepath", ",", "color", "=", "''", ",", "box_size", "=", "10", ",", "pixel_size", "=", "1850", ")", ":", "try", ":", "qr", "=", "qrcode", ".", "QRCode", "(", "version", "=", "1", ",", "error_correction...
Save `text` in a qrcode svg image file. Parameters ---------- text: str The string to be codified in the QR image. out_filepath: str Path to the output file color: str A RGB color expressed in 6 hexadecimal values. box_size: scalar Size of the QR code boxes.
[ "Save", "text", "in", "a", "qrcode", "svg", "image", "file", "." ]
python
test
renatopp/liac-arff
arff.py
https://github.com/renatopp/liac-arff/blob/6771f4cdd13d0eca74d3ebbaa6290297dd0a381d/arff.py#L968-L976
def encode(self, obj): '''Encodes a given object to an ARFF file. :param obj: the object containing the ARFF information. :return: the ARFF file as an unicode string. ''' data = [row for row in self.iter_encode(obj)] return u'\n'.join(data)
[ "def", "encode", "(", "self", ",", "obj", ")", ":", "data", "=", "[", "row", "for", "row", "in", "self", ".", "iter_encode", "(", "obj", ")", "]", "return", "u'\\n'", ".", "join", "(", "data", ")" ]
Encodes a given object to an ARFF file. :param obj: the object containing the ARFF information. :return: the ARFF file as an unicode string.
[ "Encodes", "a", "given", "object", "to", "an", "ARFF", "file", "." ]
python
train
rkhleics/wagtailmenus
wagtailmenus/models/mixins.py
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/mixins.py#L21-L56
def _get_specified_sub_menu_template_name(self, level): """ Called by get_sub_menu_template(). Iterates through the various ways in which developers can specify potential sub menu templates for a menu, and returns the name of the most suitable template for the ``current_level``. Values are checked in the following order: 1. The ``sub_menu_template`` value passed to the template tag (if provided) 2. The most suitable template from the ``sub_menu_templates`` list passed to the template tag (if provided) 3. The ``sub_menu_template_name`` attribute set on the menu class (if set) 4. The most suitable template from a list of templates set as the ``sub_menu_template_names`` attribute on the menu class (if set) Parameters ---------- level : int The 'current_level' value from the context, indicating the depth of sub menu being rendered as part of a multi-level menu. For sub menus, the value will always be greater than or equal to 2. Returns ------- str or None A template name string (the path to a template in the file system), or None if no template has been 'specified' """ ideal_index = level - 2 return self._option_vals.sub_menu_template_name or \ get_item_by_index_or_last_item( self._option_vals.sub_menu_template_names, ideal_index) or \ self.sub_menu_template_name or \ get_item_by_index_or_last_item( self.sub_menu_template_names, ideal_index)
[ "def", "_get_specified_sub_menu_template_name", "(", "self", ",", "level", ")", ":", "ideal_index", "=", "level", "-", "2", "return", "self", ".", "_option_vals", ".", "sub_menu_template_name", "or", "get_item_by_index_or_last_item", "(", "self", ".", "_option_vals", ...
Called by get_sub_menu_template(). Iterates through the various ways in which developers can specify potential sub menu templates for a menu, and returns the name of the most suitable template for the ``current_level``. Values are checked in the following order: 1. The ``sub_menu_template`` value passed to the template tag (if provided) 2. The most suitable template from the ``sub_menu_templates`` list passed to the template tag (if provided) 3. The ``sub_menu_template_name`` attribute set on the menu class (if set) 4. The most suitable template from a list of templates set as the ``sub_menu_template_names`` attribute on the menu class (if set) Parameters ---------- level : int The 'current_level' value from the context, indicating the depth of sub menu being rendered as part of a multi-level menu. For sub menus, the value will always be greater than or equal to 2. Returns ------- str or None A template name string (the path to a template in the file system), or None if no template has been 'specified'
[ "Called", "by", "get_sub_menu_template", "()", ".", "Iterates", "through", "the", "various", "ways", "in", "which", "developers", "can", "specify", "potential", "sub", "menu", "templates", "for", "a", "menu", "and", "returns", "the", "name", "of", "the", "most...
python
train
gwastro/pycbc
pycbc/workflow/segment.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/segment.py#L1325-L1393
def file_needs_generating(file_path, cp, tags=None): """ This job tests the file location and determines if the file should be generated now or if an error should be raised. This uses the generate_segment_files variable, global to this module, which is described above and in the documentation. Parameters ----------- file_path : path Location of file to check cp : ConfigParser The associated ConfigParser from which the segments-generate-segment-files variable is returned. It is recommended for most applications to use the default option by leaving segments-generate-segment-files blank, which will regenerate all segment files at runtime. Only use this facility if you need it. Choices are * 'always' : DEFAULT: All files will be generated even if they already exist. * 'if_not_present': Files will be generated if they do not already exist. Pre-existing files will be read in and used. * 'error_on_duplicate': Files will be generated if they do not already exist. Pre-existing files will raise a failure. * 'never': Pre-existing files will be read in and used. If no file exists the code will fail. Returns -------- int 1 = Generate the file. 0 = File already exists, use it. Other cases will raise an error. """ if tags is None: tags = [] if cp.has_option_tags("workflow-segments", "segments-generate-segment-files", tags): value = cp.get_opt_tags("workflow-segments", "segments-generate-segment-files", tags) generate_segment_files = value else: generate_segment_files = 'always' # Does the file exist if os.path.isfile(file_path): if generate_segment_files in ['if_not_present', 'never']: return 0 elif generate_segment_files == 'always': err_msg = "File %s already exists. " %(file_path,) err_msg += "Regenerating and overwriting." logging.warn(err_msg) return 1 elif generate_segment_files == 'error_on_duplicate': err_msg = "File %s already exists. " %(file_path,) err_msg += "Refusing to overwrite file and exiting." raise ValueError(err_msg) else: err_msg = 'Global variable generate_segment_files must be one of ' err_msg += '"always", "if_not_present", "error_on_duplicate", ' err_msg += '"never". Got %s.' %(generate_segment_files,) raise ValueError(err_msg) else: if generate_segment_files in ['always', 'if_not_present', 'error_on_duplicate']: return 1 elif generate_segment_files == 'never': err_msg = 'File %s does not exist. ' %(file_path,) raise ValueError(err_msg) else: err_msg = 'Global variable generate_segment_files must be one of ' err_msg += '"always", "if_not_present", "error_on_duplicate", ' err_msg += '"never". Got %s.' %(generate_segment_files,) raise ValueError(err_msg)
[ "def", "file_needs_generating", "(", "file_path", ",", "cp", ",", "tags", "=", "None", ")", ":", "if", "tags", "is", "None", ":", "tags", "=", "[", "]", "if", "cp", ".", "has_option_tags", "(", "\"workflow-segments\"", ",", "\"segments-generate-segment-files\"...
This job tests the file location and determines if the file should be generated now or if an error should be raised. This uses the generate_segment_files variable, global to this module, which is described above and in the documentation. Parameters ----------- file_path : path Location of file to check cp : ConfigParser The associated ConfigParser from which the segments-generate-segment-files variable is returned. It is recommended for most applications to use the default option by leaving segments-generate-segment-files blank, which will regenerate all segment files at runtime. Only use this facility if you need it. Choices are * 'always' : DEFAULT: All files will be generated even if they already exist. * 'if_not_present': Files will be generated if they do not already exist. Pre-existing files will be read in and used. * 'error_on_duplicate': Files will be generated if they do not already exist. Pre-existing files will raise a failure. * 'never': Pre-existing files will be read in and used. If no file exists the code will fail. Returns -------- int 1 = Generate the file. 0 = File already exists, use it. Other cases will raise an error.
[ "This", "job", "tests", "the", "file", "location", "and", "determines", "if", "the", "file", "should", "be", "generated", "now", "or", "if", "an", "error", "should", "be", "raised", ".", "This", "uses", "the", "generate_segment_files", "variable", "global", ...
python
train
mushkevych/scheduler
synergy/scheduler/tree.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/tree.py#L78-L106
def _get_next_child_node(self, parent): """ Iterates among children of the given parent and looks for a suitable node to process In case given parent has no suitable nodes, a younger parent will be found and the logic will be repeated for him """ children_keys = list(parent.children) sorted_keys = sorted(children_keys) for key in sorted_keys: node = parent.children[key] if node.job_record is None: self.timetable.assign_job_record(node) return node elif self.should_skip_tree_node(node): continue elif node.job_record.is_active: return node # special case, when all children of the parent node are not suitable for processing new_parent = self._get_next_parent_node(parent) if new_parent is not None: # in case all nodes are processed or blocked - look for next valid parent node return self._get_next_child_node(new_parent) else: # if all valid parents are exploited - return current node process_name = parent.children[sorted_keys[0]].process_name time_qualifier = parent.children[sorted_keys[0]].time_qualifier actual_timeperiod = time_helper.actual_timeperiod(time_qualifier) return self.get_node(process_name, actual_timeperiod)
[ "def", "_get_next_child_node", "(", "self", ",", "parent", ")", ":", "children_keys", "=", "list", "(", "parent", ".", "children", ")", "sorted_keys", "=", "sorted", "(", "children_keys", ")", "for", "key", "in", "sorted_keys", ":", "node", "=", "parent", ...
Iterates among children of the given parent and looks for a suitable node to process In case given parent has no suitable nodes, a younger parent will be found and the logic will be repeated for him
[ "Iterates", "among", "children", "of", "the", "given", "parent", "and", "looks", "for", "a", "suitable", "node", "to", "process", "In", "case", "given", "parent", "has", "no", "suitable", "nodes", "a", "younger", "parent", "will", "be", "found", "and", "th...
python
train
adafruit/Adafruit_CircuitPython_OneWire
adafruit_onewire/bus.py
https://github.com/adafruit/Adafruit_CircuitPython_OneWire/blob/113ca99b9087f7031f0b46a963472ad106520f9b/adafruit_onewire/bus.py#L126-L141
def write(self, buf, *, start=0, end=None): """ Write the bytes from ``buf`` to the device. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buffer[start:end]``. This will not cause an allocation like ``buffer[start:end]`` will so it saves memory. :param bytearray buf: buffer containing the bytes to write :param int start: Index to start writing from :param int end: Index to read up to but not include """ if end is None: end = len(buf) for i in range(start, end): self._writebyte(buf[i])
[ "def", "write", "(", "self", ",", "buf", ",", "*", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "if", "end", "is", "None", ":", "end", "=", "len", "(", "buf", ")", "for", "i", "in", "range", "(", "start", ",", "end", ")", ":",...
Write the bytes from ``buf`` to the device. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buffer[start:end]``. This will not cause an allocation like ``buffer[start:end]`` will so it saves memory. :param bytearray buf: buffer containing the bytes to write :param int start: Index to start writing from :param int end: Index to read up to but not include
[ "Write", "the", "bytes", "from", "buf", "to", "the", "device", "." ]
python
train
dopefishh/pympi
pympi/Elan.py
https://github.com/dopefishh/pympi/blob/79c747cde45b5ba203ed93154d8c123ac9c3ef56/pympi/Elan.py#L436-L464
def create_gaps_and_overlaps_tier(self, tier1, tier2, tier_name=None, maxlen=-1, fast=False): """Create a tier with the gaps and overlaps of the annotations. For types see :func:`get_gaps_and_overlaps` :param str tier1: Name of the first tier. :param str tier2: Name of the second tier. :param str tier_name: Name of the new tier, if ``None`` the name will be generated. :param int maxlen: Maximum length of gaps (skip longer ones), if ``-1`` no maximum will be used. :param bool fast: Flag for using the fast method. :returns: List of gaps and overlaps of the form: ``[(type, start, end)]``. :raises KeyError: If a tier is non existent. :raises IndexError: If no annotations are available in the tiers. """ if tier_name is None: tier_name = '{}_{}_ftos'.format(tier1, tier2) self.add_tier(tier_name) ftos = [] ftogen = self.get_gaps_and_overlaps2(tier1, tier2, maxlen) if fast\ else self.get_gaps_and_overlaps(tier1, tier2, maxlen) for fto in ftogen: ftos.append(fto) if fto[1]-fto[0] >= 1: self.add_annotation(tier_name, fto[0], fto[1], fto[2]) self.clean_time_slots() return ftos
[ "def", "create_gaps_and_overlaps_tier", "(", "self", ",", "tier1", ",", "tier2", ",", "tier_name", "=", "None", ",", "maxlen", "=", "-", "1", ",", "fast", "=", "False", ")", ":", "if", "tier_name", "is", "None", ":", "tier_name", "=", "'{}_{}_ftos'", "."...
Create a tier with the gaps and overlaps of the annotations. For types see :func:`get_gaps_and_overlaps` :param str tier1: Name of the first tier. :param str tier2: Name of the second tier. :param str tier_name: Name of the new tier, if ``None`` the name will be generated. :param int maxlen: Maximum length of gaps (skip longer ones), if ``-1`` no maximum will be used. :param bool fast: Flag for using the fast method. :returns: List of gaps and overlaps of the form: ``[(type, start, end)]``. :raises KeyError: If a tier is non existent. :raises IndexError: If no annotations are available in the tiers.
[ "Create", "a", "tier", "with", "the", "gaps", "and", "overlaps", "of", "the", "annotations", ".", "For", "types", "see", ":", "func", ":", "get_gaps_and_overlaps" ]
python
test
cisco-sas/kitty
kitty/data/data_manager.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/data/data_manager.py#L290-L300
def _create_table(self): ''' create the current table if not exists ''' self._cursor.execute(''' CREATE TABLE IF NOT EXISTS %(name)s ( %(fields)s ) ''' % { 'name': self._name, 'fields': ','.join('%s %s' % (k, v) for (k, v) in self._fields) }) self._connection.commit()
[ "def", "_create_table", "(", "self", ")", ":", "self", ".", "_cursor", ".", "execute", "(", "'''\n CREATE TABLE IF NOT EXISTS %(name)s ( %(fields)s )\n '''", "%", "{", "'name'", ":", "self", ".", "_name", ",", "'fields'", ":", "','", ".", "join", ...
create the current table if not exists
[ "create", "the", "current", "table", "if", "not", "exists" ]
python
train
GNS3/gns3-server
gns3server/controller/compute.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/compute.py#L376-L388
def http_query(self, method, path, data=None, dont_connect=False, **kwargs): """ :param dont_connect: If true do not reconnect if not connected """ if not self._connected and not dont_connect: if self._id == "vm" and not self._controller.gns3vm.running: yield from self._controller.gns3vm.start() yield from self.connect() if not self._connected and not dont_connect: raise ComputeError("Cannot connect to compute '{}' with request {} {}".format(self._name, method, path)) response = yield from self._run_http_query(method, path, data=data, **kwargs) return response
[ "def", "http_query", "(", "self", ",", "method", ",", "path", ",", "data", "=", "None", ",", "dont_connect", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "_connected", "and", "not", "dont_connect", ":", "if", "self", "."...
:param dont_connect: If true do not reconnect if not connected
[ ":", "param", "dont_connect", ":", "If", "true", "do", "not", "reconnect", "if", "not", "connected" ]
python
train
theelous3/asks
asks/request_object.py
https://github.com/theelous3/asks/blob/ea522ea971ecb031d488a6301dc2718516cadcd6/asks/request_object.py#L427-L460
def _dict_to_query(data, params=True, base_query=False): ''' Turns python dicts in to valid body-queries or queries for use directly in the request url. Unlike the stdlib quote() and it's variations, this also works on iterables like lists which are normally not valid. The use of lists in this manner is not a great idea unless the server supports it. Caveat emptor. Returns: Query part of url (or body). ''' query = [] for k, v in data.items(): if v is None: continue if isinstance(v, (str, Number)): query.append('='.join(quote_plus(x) for x in (k, str(v)))) elif isinstance(v, dict): for key in v: query.append('='.join(quote_plus(x) for x in (k, key))) elif hasattr(v, '__iter__'): for elm in v: query.append('='.join(quote_plus(x) for x in (k, quote_plus('+'.join(str(elm).split()))))) if params and query: if not base_query: return requote_uri('?' + '&'.join(query)) else: return requote_uri('&' + '&'.join(query)) return requote_uri('&'.join(query))
[ "def", "_dict_to_query", "(", "data", ",", "params", "=", "True", ",", "base_query", "=", "False", ")", ":", "query", "=", "[", "]", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", ":", "if", "v", "is", "None", ":", "continue", "if", ...
Turns python dicts in to valid body-queries or queries for use directly in the request url. Unlike the stdlib quote() and it's variations, this also works on iterables like lists which are normally not valid. The use of lists in this manner is not a great idea unless the server supports it. Caveat emptor. Returns: Query part of url (or body).
[ "Turns", "python", "dicts", "in", "to", "valid", "body", "-", "queries", "or", "queries", "for", "use", "directly", "in", "the", "request", "url", ".", "Unlike", "the", "stdlib", "quote", "()", "and", "it", "s", "variations", "this", "also", "works", "on...
python
train
inasafe/inasafe
safe/definitions/utilities.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/definitions/utilities.py#L380-L393
def all_default_fields(): """Helper to retrieve all fields which has default value. :returns: List of default fields. :rtype: list """ default_fields = [] for item in dir(fields): if not item.startswith("__"): var = getattr(definitions, item) if isinstance(var, dict): if var.get('replace_null', False): default_fields.append(var) return default_fields
[ "def", "all_default_fields", "(", ")", ":", "default_fields", "=", "[", "]", "for", "item", "in", "dir", "(", "fields", ")", ":", "if", "not", "item", ".", "startswith", "(", "\"__\"", ")", ":", "var", "=", "getattr", "(", "definitions", ",", "item", ...
Helper to retrieve all fields which has default value. :returns: List of default fields. :rtype: list
[ "Helper", "to", "retrieve", "all", "fields", "which", "has", "default", "value", "." ]
python
train
berkerpeksag/astor
astor/node_util.py
https://github.com/berkerpeksag/astor/blob/d9e893eb49d9eb2e30779680f90cd632c30e0ba1/astor/node_util.py#L61-L92
def dump_tree(node, name=None, initial_indent='', indentation=' ', maxline=120, maxmerged=80, # Runtime optimization iter_node=iter_node, special=ast.AST, list=list, isinstance=isinstance, type=type, len=len): """Dumps an AST or similar structure: - Pretty-prints with indentation - Doesn't print line/column/ctx info """ def dump(node, name=None, indent=''): level = indent + indentation name = name and name + '=' or '' values = list(iter_node(node)) if isinstance(node, list): prefix, suffix = '%s[' % name, ']' elif values: prefix, suffix = '%s%s(' % (name, type(node).__name__), ')' elif isinstance(node, special): prefix, suffix = name + type(node).__name__, '' else: return '%s%s' % (name, repr(node)) node = [dump(a, b, level) for a, b in values if b != 'ctx'] oneline = '%s%s%s' % (prefix, ', '.join(node), suffix) if len(oneline) + len(indent) < maxline: return '%s' % oneline if node and len(prefix) + len(node[0]) < maxmerged: prefix = '%s%s,' % (prefix, node.pop(0)) node = (',\n%s' % level).join(node).lstrip() return '%s\n%s%s%s' % (prefix, level, node, suffix) return dump(node, name, initial_indent)
[ "def", "dump_tree", "(", "node", ",", "name", "=", "None", ",", "initial_indent", "=", "''", ",", "indentation", "=", "' '", ",", "maxline", "=", "120", ",", "maxmerged", "=", "80", ",", "# Runtime optimization", "iter_node", "=", "iter_node", ",", "spe...
Dumps an AST or similar structure: - Pretty-prints with indentation - Doesn't print line/column/ctx info
[ "Dumps", "an", "AST", "or", "similar", "structure", ":" ]
python
train
robdmc/behold
behold/logger.py
https://github.com/robdmc/behold/blob/ac1b7707e2d7472a50d837dda78be1e23af8fce5/behold/logger.py#L519-L528
def is_true(self, item=None): """ If you are filtering on object values, you need to pass that object here. """ if item: values = [item] else: values = [] self._get_item_and_att_names(*values) return self._passes_all
[ "def", "is_true", "(", "self", ",", "item", "=", "None", ")", ":", "if", "item", ":", "values", "=", "[", "item", "]", "else", ":", "values", "=", "[", "]", "self", ".", "_get_item_and_att_names", "(", "*", "values", ")", "return", "self", ".", "_p...
If you are filtering on object values, you need to pass that object here.
[ "If", "you", "are", "filtering", "on", "object", "values", "you", "need", "to", "pass", "that", "object", "here", "." ]
python
train
ansible/tower-cli
tower_cli/resources/role.py
https://github.com/ansible/tower-cli/blob/a2b151fed93c47725018d3034848cb3a1814bed7/tower_cli/resources/role.py#L203-L209
def set_display_columns(self, set_true=[], set_false=[]): """Add or remove columns from the output.""" for i in range(len(self.fields)): if self.fields[i].name in set_true: self.fields[i].display = True elif self.fields[i].name in set_false: self.fields[i].display = False
[ "def", "set_display_columns", "(", "self", ",", "set_true", "=", "[", "]", ",", "set_false", "=", "[", "]", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "fields", ")", ")", ":", "if", "self", ".", "fields", "[", "i", "]", ...
Add or remove columns from the output.
[ "Add", "or", "remove", "columns", "from", "the", "output", "." ]
python
valid