repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
eReuse/utils
ereuse_utils/__init__.py
https://github.com/eReuse/utils/blob/989062e85095ea4e1204523fe0e298cf1046a01c/ereuse_utils/__init__.py#L22-L34
def ensure_utf8(app_name_to_show_on_error: str): """ Python3 uses by default the system set, but it expects it to be ‘utf-8’ to work correctly. This can generate problems in reading and writing files and in ``.decode()`` method. An example how to 'fix' it: nano .bash_profile and add the following: export LC_CTYPE=en_US.UTF-8 export LC_ALL=en_US.UTF-8 """ encoding = locale.getpreferredencoding() if encoding.lower() != 'utf-8': raise OSError('{} works only in UTF-8, but yours is set at {}'.format(app_name_to_show_on_error, encoding))
[ "def", "ensure_utf8", "(", "app_name_to_show_on_error", ":", "str", ")", ":", "encoding", "=", "locale", ".", "getpreferredencoding", "(", ")", "if", "encoding", ".", "lower", "(", ")", "!=", "'utf-8'", ":", "raise", "OSError", "(", "'{} works only in UTF-8, but...
Python3 uses by default the system set, but it expects it to be ‘utf-8’ to work correctly. This can generate problems in reading and writing files and in ``.decode()`` method. An example how to 'fix' it: nano .bash_profile and add the following: export LC_CTYPE=en_US.UTF-8 export LC_ALL=en_US.UTF-8
[ "Python3", "uses", "by", "default", "the", "system", "set", "but", "it", "expects", "it", "to", "be", "‘utf", "-", "8’", "to", "work", "correctly", ".", "This", "can", "generate", "problems", "in", "reading", "and", "writing", "files", "and", "in", ".", ...
python
train
codelv/enaml-native
src/enamlnative/core/block.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/core/block.py#L34-L55
def initialize(self): """ A reimplemented initializer. This method will add the include objects to the parent of the include and ensure that they are initialized. """ super(Block, self).initialize() block = self.block if block: #: This block is setting the content of another block #: Remove the existing blocks children if self.mode == 'replace': #: Clear the blocks children for c in block.children: c.destroy() #: Add this blocks children to the other block block.insert_children(None, self.children) else: #: This block is inserting it's children into it's parent self.parent.insert_children(self, self.children)
[ "def", "initialize", "(", "self", ")", ":", "super", "(", "Block", ",", "self", ")", ".", "initialize", "(", ")", "block", "=", "self", ".", "block", "if", "block", ":", "#: This block is setting the content of another block", "#: Remove the existing blocks children...
A reimplemented initializer. This method will add the include objects to the parent of the include and ensure that they are initialized.
[ "A", "reimplemented", "initializer", "." ]
python
train
muckamuck/stackility
stackility/command.py
https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/command.py#L108-L127
def list(region, profile): """ List all the CloudFormation stacks in the given region. """ ini_data = {} environment = {} if region: environment['region'] = region else: environment['region'] = find_myself() if profile: environment['profile'] = profile ini_data['environment'] = environment if start_list(ini_data): sys.exit(0) else: sys.exit(1)
[ "def", "list", "(", "region", ",", "profile", ")", ":", "ini_data", "=", "{", "}", "environment", "=", "{", "}", "if", "region", ":", "environment", "[", "'region'", "]", "=", "region", "else", ":", "environment", "[", "'region'", "]", "=", "find_mysel...
List all the CloudFormation stacks in the given region.
[ "List", "all", "the", "CloudFormation", "stacks", "in", "the", "given", "region", "." ]
python
train
mitsei/dlkit
dlkit/services/repository.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/repository.py#L1862-L1870
def use_comparative_composition_view(self): """Pass through to provider CompositionLookupSession.use_comparative_composition_view""" self._object_views['composition'] = COMPARATIVE # self._get_provider_session('composition_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_comparative_composition_view() except AttributeError: pass
[ "def", "use_comparative_composition_view", "(", "self", ")", ":", "self", ".", "_object_views", "[", "'composition'", "]", "=", "COMPARATIVE", "# self._get_provider_session('composition_lookup_session') # To make sure the session is tracked", "for", "session", "in", "self", "."...
Pass through to provider CompositionLookupSession.use_comparative_composition_view
[ "Pass", "through", "to", "provider", "CompositionLookupSession", ".", "use_comparative_composition_view" ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/classifier/threshold.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/classifier/threshold.py#L403-L412
def _check_quantiles(self, val): """ Validate the quantiles passed in. Returns the np array if valid. """ if len(val) != 3 or not is_monotonic(val) or not np.all(val < 1): raise YellowbrickValueError( "quantiles must be a sequence of three " "monotonically increasing values less than 1" ) return np.asarray(val)
[ "def", "_check_quantiles", "(", "self", ",", "val", ")", ":", "if", "len", "(", "val", ")", "!=", "3", "or", "not", "is_monotonic", "(", "val", ")", "or", "not", "np", ".", "all", "(", "val", "<", "1", ")", ":", "raise", "YellowbrickValueError", "(...
Validate the quantiles passed in. Returns the np array if valid.
[ "Validate", "the", "quantiles", "passed", "in", ".", "Returns", "the", "np", "array", "if", "valid", "." ]
python
train
kivy/python-for-android
pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/lexer.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/lexer.py#L186-L206
def compile_rules(environment): """Compiles all the rules from the environment into a list of rules.""" e = re.escape rules = [ (len(environment.comment_start_string), 'comment', e(environment.comment_start_string)), (len(environment.block_start_string), 'block', e(environment.block_start_string)), (len(environment.variable_start_string), 'variable', e(environment.variable_start_string)) ] if environment.line_statement_prefix is not None: rules.append((len(environment.line_statement_prefix), 'linestatement', r'^\s*' + e(environment.line_statement_prefix))) if environment.line_comment_prefix is not None: rules.append((len(environment.line_comment_prefix), 'linecomment', r'(?:^|(?<=\S))[^\S\r\n]*' + e(environment.line_comment_prefix))) return [x[1:] for x in sorted(rules, reverse=True)]
[ "def", "compile_rules", "(", "environment", ")", ":", "e", "=", "re", ".", "escape", "rules", "=", "[", "(", "len", "(", "environment", ".", "comment_start_string", ")", ",", "'comment'", ",", "e", "(", "environment", ".", "comment_start_string", ")", ")",...
Compiles all the rules from the environment into a list of rules.
[ "Compiles", "all", "the", "rules", "from", "the", "environment", "into", "a", "list", "of", "rules", "." ]
python
train
googleapis/oauth2client
oauth2client/contrib/devshell.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/contrib/devshell.py#L72-L94
def _SendRecv(): """Communicate with the Developer Shell server socket.""" port = int(os.getenv(DEVSHELL_ENV, 0)) if port == 0: raise NoDevshellServer() sock = socket.socket() sock.connect(('localhost', port)) data = CREDENTIAL_INFO_REQUEST_JSON msg = '{0}\n{1}'.format(len(data), data) sock.sendall(_helpers._to_bytes(msg, encoding='utf-8')) header = sock.recv(6).decode() if '\n' not in header: raise CommunicationError('saw no newline in the first 6 bytes') len_str, json_str = header.split('\n', 1) to_read = int(len_str) - len(json_str) if to_read > 0: json_str += sock.recv(to_read, socket.MSG_WAITALL).decode() return CredentialInfoResponse(json_str)
[ "def", "_SendRecv", "(", ")", ":", "port", "=", "int", "(", "os", ".", "getenv", "(", "DEVSHELL_ENV", ",", "0", ")", ")", "if", "port", "==", "0", ":", "raise", "NoDevshellServer", "(", ")", "sock", "=", "socket", ".", "socket", "(", ")", "sock", ...
Communicate with the Developer Shell server socket.
[ "Communicate", "with", "the", "Developer", "Shell", "server", "socket", "." ]
python
valid
CiscoUcs/UcsPythonSDK
src/UcsSdk/utils/helper.py
https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/utils/helper.py#L83-L109
def config_managed_object(p_dn, p_class_id, class_id, mo_config, mo_dn, handle=None, delete=True): """Configure the specified MO in UCS Manager. :param uuid: MO config :param p_dn: parent MO DN :param p_class_id: parent MO class ID :param class_id: MO class ID :param MO configuration: MO config :param mo_dn: MO DN value :param handle: optional UCS Manager handle object :returns: Managed Object :raises: UcsOperationError in case of failure. """ if handle is None: handle = self.handle try: result = handle.AddManagedObject(None, classId=class_id, params=mo_config, modifyPresent=True, dumpXml=YesOrNo.FALSE) return result except UcsException as ex: print(_("Cisco client exception: %(msg)s"), {'msg': ex}) raise exception.UcsOperationError('config_managed_object', error=ex)
[ "def", "config_managed_object", "(", "p_dn", ",", "p_class_id", ",", "class_id", ",", "mo_config", ",", "mo_dn", ",", "handle", "=", "None", ",", "delete", "=", "True", ")", ":", "if", "handle", "is", "None", ":", "handle", "=", "self", ".", "handle", ...
Configure the specified MO in UCS Manager. :param uuid: MO config :param p_dn: parent MO DN :param p_class_id: parent MO class ID :param class_id: MO class ID :param MO configuration: MO config :param mo_dn: MO DN value :param handle: optional UCS Manager handle object :returns: Managed Object :raises: UcsOperationError in case of failure.
[ "Configure", "the", "specified", "MO", "in", "UCS", "Manager", "." ]
python
train
fusionbox/django-backupdb
backupdb/utils/commands.py
https://github.com/fusionbox/django-backupdb/blob/db4aa73049303245ef0182cda5c76b1dd194cd00/backupdb/utils/commands.py#L59-L74
def get_mysql_args(db_config): """ Returns an array of argument values that will be passed to a `mysql` or `mysqldump` process when it is started based on the given database configuration. """ db = db_config['NAME'] mapping = [('--user={0}', db_config.get('USER')), ('--password={0}', db_config.get('PASSWORD')), ('--host={0}', db_config.get('HOST')), ('--port={0}', db_config.get('PORT'))] args = apply_arg_values(mapping) args.append(db) return args
[ "def", "get_mysql_args", "(", "db_config", ")", ":", "db", "=", "db_config", "[", "'NAME'", "]", "mapping", "=", "[", "(", "'--user={0}'", ",", "db_config", ".", "get", "(", "'USER'", ")", ")", ",", "(", "'--password={0}'", ",", "db_config", ".", "get", ...
Returns an array of argument values that will be passed to a `mysql` or `mysqldump` process when it is started based on the given database configuration.
[ "Returns", "an", "array", "of", "argument", "values", "that", "will", "be", "passed", "to", "a", "mysql", "or", "mysqldump", "process", "when", "it", "is", "started", "based", "on", "the", "given", "database", "configuration", "." ]
python
train
saltstack/salt
salt/cli/support/collector.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L180-L192
def link(self, title, path): ''' Add a static file on the file system. :param title: :param path: :return: ''' # The filehandler needs to be explicitly passed here, so PyLint needs to accept that. # pylint: disable=W8470 if not isinstance(path, file): path = salt.utils.files.fopen(path) self.__current_section.append({title: path})
[ "def", "link", "(", "self", ",", "title", ",", "path", ")", ":", "# The filehandler needs to be explicitly passed here, so PyLint needs to accept that.", "# pylint: disable=W8470", "if", "not", "isinstance", "(", "path", ",", "file", ")", ":", "path", "=", "salt", "."...
Add a static file on the file system. :param title: :param path: :return:
[ "Add", "a", "static", "file", "on", "the", "file", "system", "." ]
python
train
pyviz/holoviews
holoviews/plotting/bokeh/tabular.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/tabular.py#L116-L138
def update_frame(self, key, ranges=None, plot=None): """ Updates an existing plot with data corresponding to the key. """ element = self._get_frame(key) self._get_title_div(key, '12pt') # Cache frame object id to skip updating data if unchanged previous_id = self.handles.get('previous_id', None) current_id = element._plot_id self.handles['previous_id'] = current_id self.static_source = (self.dynamic and (current_id == previous_id)) if (element is None or (not self.dynamic and self.static) or (self.streaming and self.streaming[0].data is self.current_frame.data and not self.streaming[0]._triggering) or self.static_source): return source = self.handles['source'] style = self.lookup_options(element, 'style')[self.cyclic_index] data, _, style = self.get_data(element, ranges, style) columns = self._get_columns(element, data) self.handles['table'].columns = columns self._update_datasource(source, data)
[ "def", "update_frame", "(", "self", ",", "key", ",", "ranges", "=", "None", ",", "plot", "=", "None", ")", ":", "element", "=", "self", ".", "_get_frame", "(", "key", ")", "self", ".", "_get_title_div", "(", "key", ",", "'12pt'", ")", "# Cache frame ob...
Updates an existing plot with data corresponding to the key.
[ "Updates", "an", "existing", "plot", "with", "data", "corresponding", "to", "the", "key", "." ]
python
train
Metatab/metapack
metapack/doc.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/doc.py#L130-L139
def set_wrappable_term(self, v, term): """Set the Root.Description, possibly splitting long descriptions across multiple terms. """ import textwrap for t in self['Root'].find(term): self.remove_term(t) for l in textwrap.wrap(v, 80): self['Root'].new_term(term, l)
[ "def", "set_wrappable_term", "(", "self", ",", "v", ",", "term", ")", ":", "import", "textwrap", "for", "t", "in", "self", "[", "'Root'", "]", ".", "find", "(", "term", ")", ":", "self", ".", "remove_term", "(", "t", ")", "for", "l", "in", "textwra...
Set the Root.Description, possibly splitting long descriptions across multiple terms.
[ "Set", "the", "Root", ".", "Description", "possibly", "splitting", "long", "descriptions", "across", "multiple", "terms", "." ]
python
train
xflr6/gsheets
gsheets/api.py
https://github.com/xflr6/gsheets/blob/ca4f1273044704e529c1138e3f942836fc496e1b/gsheets/api.py#L17-L29
def from_files(cls, secrets=None, storage=None, scopes=None, no_webserver=False): """Return a spreadsheet collection making OAauth 2.0 credentials. Args: secrets (str): location of secrets file (default: ``%r``) storage (str): location of storage file (default: ``%r``) scopes: scope URL(s) or ``'read'`` or ``'write'`` (default: ``%r``) no_webserver (bool): URL/code prompt instead of webbrowser auth Returns: Sheets: new Sheets instance with OAauth 2.0 credentials """ creds = oauth2.get_credentials(scopes, secrets, storage, no_webserver) return cls(creds)
[ "def", "from_files", "(", "cls", ",", "secrets", "=", "None", ",", "storage", "=", "None", ",", "scopes", "=", "None", ",", "no_webserver", "=", "False", ")", ":", "creds", "=", "oauth2", ".", "get_credentials", "(", "scopes", ",", "secrets", ",", "sto...
Return a spreadsheet collection making OAauth 2.0 credentials. Args: secrets (str): location of secrets file (default: ``%r``) storage (str): location of storage file (default: ``%r``) scopes: scope URL(s) or ``'read'`` or ``'write'`` (default: ``%r``) no_webserver (bool): URL/code prompt instead of webbrowser auth Returns: Sheets: new Sheets instance with OAauth 2.0 credentials
[ "Return", "a", "spreadsheet", "collection", "making", "OAauth", "2", ".", "0", "credentials", "." ]
python
train
tango-controls/pytango
tango/asyncio_executor.py
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/asyncio_executor.py#L83-L86
def submit(self, fn, *args, **kwargs): """Submit an operation""" corofn = asyncio.coroutine(lambda: fn(*args, **kwargs)) return run_coroutine_threadsafe(corofn(), self.loop)
[ "def", "submit", "(", "self", ",", "fn", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "corofn", "=", "asyncio", ".", "coroutine", "(", "lambda", ":", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "return", "run_coroutine_thread...
Submit an operation
[ "Submit", "an", "operation" ]
python
train
Becksteinlab/GromacsWrapper
gromacs/fileformats/xvg.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/fileformats/xvg.py#L691-L834
def errorbar(self, **kwargs): """errorbar plot for a single time series with errors. Set *columns* keyword to select [x, y, dy] or [x, y, dx, dy], e.g. ``columns=[0,1,2]``. See :meth:`XVG.plot` for details. Only a single timeseries can be plotted and the user needs to select the appropriate columns with the *columns* keyword. By default, the data are decimated (see :meth:`XVG.plot`) for the default of *maxpoints* = 10000 by averaging data in *maxpoints* bins. x,y,dx,dy data can plotted with error bars in the x- and y-dimension (use *filled* = ``False``). For x,y,dy use *filled* = ``True`` to fill the region between y±dy. *fill_alpha* determines the transparency of the fill color. *filled* = ``False`` will draw lines for the error bars. Additional keywords are passed to :func:`pylab.errorbar`. By default, the errors are decimated by plotting the 5% and 95% percentile of the data in each bin. The percentile can be changed with the *percentile* keyword; e.g. *percentile* = 1 will plot the 1% and 99% perentile (as will *percentile* = 99). The *error_method* keyword can be used to compute errors as the root mean square sum (*error_method* = "rms") across each bin instead of percentiles ("percentile"). The value of the keyword *demean* is applied to the decimation of error data alone. .. SeeAlso:: :meth:`XVG.plot` lists keywords common to both methods. """ ax = kwargs.pop('ax', None) color = kwargs.pop('color', 'black') filled = kwargs.pop('filled', True) fill_alpha = kwargs.pop('fill_alpha', 0.2) kwargs.setdefault('capsize', 0) kwargs.setdefault('elinewidth', 1) kwargs.setdefault('ecolor', color) kwargs.setdefault('alpha', 0.3) kwargs.setdefault('fmt', None) columns = kwargs.pop('columns', Ellipsis) # slice for everything maxpoints = kwargs.pop('maxpoints', self.maxpoints_default) transform = kwargs.pop('transform', lambda x: x) # default is identity transformation method = kwargs.pop('method', "mean") if method != "mean": raise NotImplementedError("For errors only method == 'mean' is supported.") error_method = kwargs.pop('error_method', "percentile") # can also use 'rms' and 'error' percentile = numpy.abs(kwargs.pop('percentile', 95.)) demean = kwargs.pop('demean', False) # order: (decimate/smooth o slice o transform)(array) try: data = numpy.asarray(transform(self.array))[columns] except IndexError: raise MissingDataError("columns {0!r} are not suitable to index the transformed array, possibly not eneough data".format(columns)) if data.shape[-1] == 0: raise MissingDataError("There is no data to be plotted.") a = numpy.zeros((data.shape[0], maxpoints), dtype=numpy.float64) a[0:2] = self.decimate("mean", data[0:2], maxpoints=maxpoints) error_data = numpy.vstack((data[0], data[2:])) if error_method == "percentile": if percentile > 50: upper_per = percentile lower_per = 100 - percentile else: upper_per = 100 - percentile lower_per = percentile # demean generally does not make sense with the percentiles (but for analysing # the regularised data itself we use this as a flag --- see below!) upper = a[2:] = self.decimate("percentile", error_data, maxpoints=maxpoints, per=upper_per, demean=False)[1:] lower = self.decimate("percentile", error_data, maxpoints=maxpoints, per=lower_per, demean=False)[1:] else: a[2:] = self.decimate(error_method, error_data, maxpoints=maxpoints, demean=demean)[1:] lower = None # now deal with infs, nans etc AFTER all transformations (needed for plotting across inf/nan) ma = numpy.ma.MaskedArray(a, mask=numpy.logical_not(numpy.isfinite(a))) if lower is not None: mlower = numpy.ma.MaskedArray(lower, mask=numpy.logical_not(numpy.isfinite(lower))) # finally plot X = ma[0] # abscissa set separately Y = ma[1] try: kwargs['yerr'] = ma[3] kwargs['xerr'] = ma[2] except IndexError: try: kwargs['yerr'] = ma[2] except IndexError: raise TypeError("Either too few columns selected or data does not have a error column") if ax is None: ax = plt.gca() if filled: # can only plot dy if error_method == "percentile": if demean: # signal that we are looking at percentiles of an observable and not error y1 = mlower[-1] y2 = kwargs['yerr'] else: # percentiles of real errors (>0) y1 = Y - mlower[-1] y2 = Y + kwargs['yerr'] else: y1 = Y - kwargs['yerr'] y2 = Y + kwargs['yerr'] ax.fill_between(X, y1, y2, color=color, alpha=fill_alpha) else: if error_method == "percentile": # errorbars extend to different lengths; if demean: kwargs['yerr'] = numpy.vstack((mlower[-1], kwargs['yerr'])) else: kwargs['yerr'] = numpy.vstack((Y - mlower[-1], Y + kwargs['yerr'])) try: # xerr only makes sense when the data is a real # error so we don't even bother with demean=? kwargs['xerr'] = numpy.vstack((X - mlower[0], X + kwargs['xerr'])) except (KeyError, IndexError): pass ax.errorbar(X, Y, **kwargs) # clean up args for plot for kw in "yerr", "xerr", "capsize", "ecolor", "elinewidth", "fmt": kwargs.pop(kw, None) kwargs['alpha'] = 1.0 ax.plot(X, Y, color=color, **kwargs) return ax
[ "def", "errorbar", "(", "self", ",", "*", "*", "kwargs", ")", ":", "ax", "=", "kwargs", ".", "pop", "(", "'ax'", ",", "None", ")", "color", "=", "kwargs", ".", "pop", "(", "'color'", ",", "'black'", ")", "filled", "=", "kwargs", ".", "pop", "(", ...
errorbar plot for a single time series with errors. Set *columns* keyword to select [x, y, dy] or [x, y, dx, dy], e.g. ``columns=[0,1,2]``. See :meth:`XVG.plot` for details. Only a single timeseries can be plotted and the user needs to select the appropriate columns with the *columns* keyword. By default, the data are decimated (see :meth:`XVG.plot`) for the default of *maxpoints* = 10000 by averaging data in *maxpoints* bins. x,y,dx,dy data can plotted with error bars in the x- and y-dimension (use *filled* = ``False``). For x,y,dy use *filled* = ``True`` to fill the region between y±dy. *fill_alpha* determines the transparency of the fill color. *filled* = ``False`` will draw lines for the error bars. Additional keywords are passed to :func:`pylab.errorbar`. By default, the errors are decimated by plotting the 5% and 95% percentile of the data in each bin. The percentile can be changed with the *percentile* keyword; e.g. *percentile* = 1 will plot the 1% and 99% perentile (as will *percentile* = 99). The *error_method* keyword can be used to compute errors as the root mean square sum (*error_method* = "rms") across each bin instead of percentiles ("percentile"). The value of the keyword *demean* is applied to the decimation of error data alone. .. SeeAlso:: :meth:`XVG.plot` lists keywords common to both methods.
[ "errorbar", "plot", "for", "a", "single", "time", "series", "with", "errors", "." ]
python
valid
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L274-L294
def linear_interpolation(x, xp, fp, **kwargs): """Multi-dimensional linear interpolation. Returns the multi-dimensional piecewise linear interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Keywords for np.interp. Returns: An array of shape [*N, *M], the interpolated values. """ yp = fp.reshape([fp.shape[0], -1]).transpose() y = np.stack([np.interp(x, xp, zp, **kwargs) for zp in yp]).transpose() return y.reshape(x.shape[:1] + fp.shape[1:]).astype(np.float32)
[ "def", "linear_interpolation", "(", "x", ",", "xp", ",", "fp", ",", "*", "*", "kwargs", ")", ":", "yp", "=", "fp", ".", "reshape", "(", "[", "fp", ".", "shape", "[", "0", "]", ",", "-", "1", "]", ")", ".", "transpose", "(", ")", "y", "=", "...
Multi-dimensional linear interpolation. Returns the multi-dimensional piecewise linear interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Keywords for np.interp. Returns: An array of shape [*N, *M], the interpolated values.
[ "Multi", "-", "dimensional", "linear", "interpolation", "." ]
python
train
Azure/azure-storage-python
azure-storage-queue/azure/storage/queue/sharedaccesssignature.py
https://github.com/Azure/azure-storage-python/blob/52327354b192cbcf6b7905118ec6b5d57fa46275/azure-storage-queue/azure/storage/queue/sharedaccesssignature.py#L31-L81
def generate_queue(self, queue_name, permission=None, expiry=None, start=None, id=None, ip=None, protocol=None): ''' Generates a shared access signature for the queue. Use the returned signature with the sas_token parameter of QueueService. :param str queue_name: Name of queue. :param QueuePermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Permissions must be ordered read, add, update, process. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: datetime or str :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type start: datetime or str :param str id: A unique value up to 64 characters in length that correlates to a stored access policy. To create a stored access policy, use set_blob_service_properties. :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. The default value is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. ''' sas = _SharedAccessHelper() sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) sas.add_id(id) sas.add_resource_signature(self.account_name, self.account_key, 'queue', queue_name) return sas.get_token()
[ "def", "generate_queue", "(", "self", ",", "queue_name", ",", "permission", "=", "None", ",", "expiry", "=", "None", ",", "start", "=", "None", ",", "id", "=", "None", ",", "ip", "=", "None", ",", "protocol", "=", "None", ")", ":", "sas", "=", "_Sh...
Generates a shared access signature for the queue. Use the returned signature with the sas_token parameter of QueueService. :param str queue_name: Name of queue. :param QueuePermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Permissions must be ordered read, add, update, process. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: datetime or str :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type start: datetime or str :param str id: A unique value up to 64 characters in length that correlates to a stored access policy. To create a stored access policy, use set_blob_service_properties. :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. The default value is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
[ "Generates", "a", "shared", "access", "signature", "for", "the", "queue", ".", "Use", "the", "returned", "signature", "with", "the", "sas_token", "parameter", "of", "QueueService", "." ]
python
train
cloudtools/stacker
stacker/actions/graph.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/actions/graph.py#L14-L24
def each_step(graph): """Returns an iterator that yields each step and it's direct dependencies. """ steps = graph.topological_sort() steps.reverse() for step in steps: deps = graph.downstream(step.name) yield (step, deps)
[ "def", "each_step", "(", "graph", ")", ":", "steps", "=", "graph", ".", "topological_sort", "(", ")", "steps", ".", "reverse", "(", ")", "for", "step", "in", "steps", ":", "deps", "=", "graph", ".", "downstream", "(", "step", ".", "name", ")", "yield...
Returns an iterator that yields each step and it's direct dependencies.
[ "Returns", "an", "iterator", "that", "yields", "each", "step", "and", "it", "s", "direct", "dependencies", "." ]
python
train
saltstack/salt
salt/modules/sysbench.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sysbench.py#L59-L85
def cpu(): ''' Tests for the CPU performance of minions. CLI Examples: .. code-block:: bash salt '*' sysbench.cpu ''' # Test data max_primes = [500, 1000, 2500, 5000] # Initializing the test variables test_command = 'sysbench --test=cpu --cpu-max-prime={0} run' result = None ret_val = {} # Test beings! for primes in max_primes: key = 'Prime numbers limit: {0}'.format(primes) run_command = test_command.format(primes) result = __salt__['cmd.run'](run_command) ret_val[key] = _parser(result) return ret_val
[ "def", "cpu", "(", ")", ":", "# Test data", "max_primes", "=", "[", "500", ",", "1000", ",", "2500", ",", "5000", "]", "# Initializing the test variables", "test_command", "=", "'sysbench --test=cpu --cpu-max-prime={0} run'", "result", "=", "None", "ret_val", "=", ...
Tests for the CPU performance of minions. CLI Examples: .. code-block:: bash salt '*' sysbench.cpu
[ "Tests", "for", "the", "CPU", "performance", "of", "minions", "." ]
python
train
pandas-dev/pandas
pandas/core/indexes/multi.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1234-L1267
def _hashed_indexing_key(self, key): """ validate and return the hash for the provided key *this is internal for use for the cython routines* Parameters ---------- key : string or tuple Returns ------- np.uint64 Notes ----- we need to stringify if we have mixed levels """ from pandas.core.util.hashing import hash_tuples, hash_tuple if not isinstance(key, tuple): return hash_tuples(key) if not len(key) == self.nlevels: raise KeyError def f(k, stringify): if stringify and not isinstance(k, str): k = str(k) return k key = tuple(f(k, stringify) for k, stringify in zip(key, self._have_mixed_levels)) return hash_tuple(key)
[ "def", "_hashed_indexing_key", "(", "self", ",", "key", ")", ":", "from", "pandas", ".", "core", ".", "util", ".", "hashing", "import", "hash_tuples", ",", "hash_tuple", "if", "not", "isinstance", "(", "key", ",", "tuple", ")", ":", "return", "hash_tuples"...
validate and return the hash for the provided key *this is internal for use for the cython routines* Parameters ---------- key : string or tuple Returns ------- np.uint64 Notes ----- we need to stringify if we have mixed levels
[ "validate", "and", "return", "the", "hash", "for", "the", "provided", "key" ]
python
train
CartoDB/cartoframes
cartoframes/context.py
https://github.com/CartoDB/cartoframes/blob/c94238a545f3dec45963dac3892540942b6f0df8/cartoframes/context.py#L1580-L1601
def _check_query(self, query, style_cols=None): """Checks if query from Layer or QueryLayer is valid""" try: self.sql_client.send( utils.minify_sql(( 'EXPLAIN', 'SELECT', ' {style_cols}{comma}', ' the_geom, the_geom_webmercator', 'FROM ({query}) _wrap;', )).format(query=query, comma=',' if style_cols else '', style_cols=(','.join(style_cols) if style_cols else '')), do_post=False) except Exception as err: raise ValueError(('Layer query `{query}` and/or style column(s) ' '{cols} are not valid: {err}.' '').format(query=query, cols=', '.join(['`{}`'.format(c) for c in style_cols]), err=err))
[ "def", "_check_query", "(", "self", ",", "query", ",", "style_cols", "=", "None", ")", ":", "try", ":", "self", ".", "sql_client", ".", "send", "(", "utils", ".", "minify_sql", "(", "(", "'EXPLAIN'", ",", "'SELECT'", ",", "' {style_cols}{comma}'", ",", ...
Checks if query from Layer or QueryLayer is valid
[ "Checks", "if", "query", "from", "Layer", "or", "QueryLayer", "is", "valid" ]
python
train
DenisCarriere/geocoder
geocoder/geonames.py
https://github.com/DenisCarriere/geocoder/blob/39b9999ec70e61da9fa52fe9fe82a261ad70fa8b/geocoder/geonames.py#L125-L141
def _catch_errors(self, json_response): """ Changed: removed check on number of elements: - totalResultsCount not sytematically returned (e.g in hierarchy) - done in base.py """ status = json_response.get('status') if status: message = status.get('message') value = status.get('value') custom_messages = { 10: 'Invalid credentials', 18: 'Do not use the demo account for your application', } self.error = custom_messages.get(value, message) LOGGER.error("Error %s from JSON %s", self.error, json_response) return self.error
[ "def", "_catch_errors", "(", "self", ",", "json_response", ")", ":", "status", "=", "json_response", ".", "get", "(", "'status'", ")", "if", "status", ":", "message", "=", "status", ".", "get", "(", "'message'", ")", "value", "=", "status", ".", "get", ...
Changed: removed check on number of elements: - totalResultsCount not sytematically returned (e.g in hierarchy) - done in base.py
[ "Changed", ":", "removed", "check", "on", "number", "of", "elements", ":", "-", "totalResultsCount", "not", "sytematically", "returned", "(", "e", ".", "g", "in", "hierarchy", ")", "-", "done", "in", "base", ".", "py" ]
python
train
django-extensions/django-extensions
django_extensions/__init__.py
https://github.com/django-extensions/django-extensions/blob/7e0bef97ea6cb7f9eea5e2528e3a985a83a7b9b8/django_extensions/__init__.py#L5-L15
def get_version(version): """Dynamically calculate the version based on VERSION tuple.""" if len(version) > 2 and version[2] is not None: if isinstance(version[2], int): str_version = "%s.%s.%s" % version[:3] else: str_version = "%s.%s_%s" % version[:3] else: str_version = "%s.%s" % version[:2] return str_version
[ "def", "get_version", "(", "version", ")", ":", "if", "len", "(", "version", ")", ">", "2", "and", "version", "[", "2", "]", "is", "not", "None", ":", "if", "isinstance", "(", "version", "[", "2", "]", ",", "int", ")", ":", "str_version", "=", "\...
Dynamically calculate the version based on VERSION tuple.
[ "Dynamically", "calculate", "the", "version", "based", "on", "VERSION", "tuple", "." ]
python
train
michael-lazar/rtv
rtv/packages/praw/__init__.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L1078-L1088
def get_sticky(self, subreddit, bottom=False): """Return a Submission object for the sticky of the subreddit. :param bottom: Get the top or bottom sticky. If the subreddit has only a single sticky, it is considered the top one. """ url = self.config['sticky'].format(subreddit=six.text_type(subreddit)) param = {'num': 2} if bottom else None return objects.Submission.from_json(self.request_json(url, params=param))
[ "def", "get_sticky", "(", "self", ",", "subreddit", ",", "bottom", "=", "False", ")", ":", "url", "=", "self", ".", "config", "[", "'sticky'", "]", ".", "format", "(", "subreddit", "=", "six", ".", "text_type", "(", "subreddit", ")", ")", "param", "=...
Return a Submission object for the sticky of the subreddit. :param bottom: Get the top or bottom sticky. If the subreddit has only a single sticky, it is considered the top one.
[ "Return", "a", "Submission", "object", "for", "the", "sticky", "of", "the", "subreddit", "." ]
python
train
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L1213-L1223
def get_principal_credit_string_metadata(self): """Gets the metadata for the principal credit string. return: (osid.Metadata) - metadata for the credit string *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['principal_credit_string']) metadata.update({'existing_string_values': self._my_map['principalCreditString']}) return Metadata(**metadata)
[ "def", "get_principal_credit_string_metadata", "(", "self", ")", ":", "# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template", "metadata", "=", "dict", "(", "self", ".", "_mdata", "[", "'principal_credit_string'", "]", ")", "metadata", ".", "u...
Gets the metadata for the principal credit string. return: (osid.Metadata) - metadata for the credit string *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "metadata", "for", "the", "principal", "credit", "string", "." ]
python
train
geertj/gruvi
lib/gruvi/ssl.py
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/ssl.py#L194-L234
def feed_appdata(self, data, offset=0): """Feed plaintext data into the pipe. Return an (ssldata, offset) tuple. The ssldata element is a list of buffers containing record level data that needs to be sent to the remote SSL instance. The offset is the number of plaintext bytes that were processed, which may be less than the length of data. NOTE: In case of short writes, this call MUST be retried with the SAME buffer passed into the *data* argument (i.e. the ``id()`` must be the same). This is an OpenSSL requirement. A further particularity is that a short write will always have offset == 0, because the _ssl module does not enable partial writes. And even though the offset is zero, there will still be encrypted data in ssldata. """ if self._state == self.S_UNWRAPPED: # pass through data in unwrapped mode return ([data[offset:]] if offset < len(data) else [], len(data)) ssldata = [] view = memoryview(data) while True: self._need_ssldata = False try: if offset < len(view): offset += self._sslobj.write(view[offset:]) except ssl.SSLError as e: # It is not allowed to call write() after unwrap() until the # close_notify is acknowledged. We return the condition to the # caller as a short write. if sslcompat.get_reason(e) == 'PROTOCOL_IS_SHUTDOWN': e.errno = ssl.SSL_ERROR_WANT_READ if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE, ssl.SSL_ERROR_SYSCALL): raise self._need_ssldata = e.errno == ssl.SSL_ERROR_WANT_READ # See if there's any record level data back for us. if self._outgoing.pending: ssldata.append(self._outgoing.read()) if offset == len(view) or self._need_ssldata: break return (ssldata, offset)
[ "def", "feed_appdata", "(", "self", ",", "data", ",", "offset", "=", "0", ")", ":", "if", "self", ".", "_state", "==", "self", ".", "S_UNWRAPPED", ":", "# pass through data in unwrapped mode", "return", "(", "[", "data", "[", "offset", ":", "]", "]", "if...
Feed plaintext data into the pipe. Return an (ssldata, offset) tuple. The ssldata element is a list of buffers containing record level data that needs to be sent to the remote SSL instance. The offset is the number of plaintext bytes that were processed, which may be less than the length of data. NOTE: In case of short writes, this call MUST be retried with the SAME buffer passed into the *data* argument (i.e. the ``id()`` must be the same). This is an OpenSSL requirement. A further particularity is that a short write will always have offset == 0, because the _ssl module does not enable partial writes. And even though the offset is zero, there will still be encrypted data in ssldata.
[ "Feed", "plaintext", "data", "into", "the", "pipe", "." ]
python
train
rca/cmdline
src/cmdline/logconfig.py
https://github.com/rca/cmdline/blob/c01990aa1781c4d435c91c67962fb6ad92b7b579/src/cmdline/logconfig.py#L15-L43
def setup_logging(fail_silently=False): """ Setup logging configuration Finds the most user-facing log config on disk and uses it """ config = None paths = list(get_config_paths(filename='logconfig.yml', reversed=True)) for path in paths: if not os.path.exists(path): continue with open(path, 'rt') as f: config = yaml.safe_load(f.read()) LOG_LEVEL = os.environ.get('LOG_LEVEL') if LOG_LEVEL: config['root']['level'] = LOG_LEVEL.upper() config['handlers']['console']['level'] = LOG_LEVEL.upper() logging.config.dictConfig(config) break else: if not fail_silently: raise LogconfigError('Unable to find logconfig in {}'.format(paths)) return config
[ "def", "setup_logging", "(", "fail_silently", "=", "False", ")", ":", "config", "=", "None", "paths", "=", "list", "(", "get_config_paths", "(", "filename", "=", "'logconfig.yml'", ",", "reversed", "=", "True", ")", ")", "for", "path", "in", "paths", ":", ...
Setup logging configuration Finds the most user-facing log config on disk and uses it
[ "Setup", "logging", "configuration" ]
python
train
mandiant/ioc_writer
ioc_writer/utils/xmlutils.py
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/utils/xmlutils.py#L30-L57
def read_xml(filename): """ Use et to read in a xml file, or string, into a Element object. :param filename: File to parse. :return: lxml._elementTree object or None """ parser = et.XMLParser(remove_blank_text=True) isfile=False try: isfile = os.path.exists(filename) except ValueError as e: if 'path too long for Windows' in str(e): pass else: raise try: if isfile: return et.parse(filename, parser) else: r = et.fromstring(filename, parser) return r.getroottree() except IOError: log.exception('unable to open file [[}]'.format(filename)) except et.XMLSyntaxError: log.exception('unable to parse XML [{}]'.format(filename)) return None return None
[ "def", "read_xml", "(", "filename", ")", ":", "parser", "=", "et", ".", "XMLParser", "(", "remove_blank_text", "=", "True", ")", "isfile", "=", "False", "try", ":", "isfile", "=", "os", ".", "path", ".", "exists", "(", "filename", ")", "except", "Value...
Use et to read in a xml file, or string, into a Element object. :param filename: File to parse. :return: lxml._elementTree object or None
[ "Use", "et", "to", "read", "in", "a", "xml", "file", "or", "string", "into", "a", "Element", "object", "." ]
python
train
saltstack/salt
salt/states/neutron_subnet.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/neutron_subnet.py#L62-L139
def present(name, auth=None, **kwargs): ''' Ensure a subnet exists and is up-to-date name Name of the subnet network_name_or_id The unique name or ID of the attached network. If a non-unique name is supplied, an exception is raised. allocation_pools A list of dictionaries of the start and end addresses for the allocation pools gateway_ip The gateway IP address. dns_nameservers A list of DNS name servers for the subnet. host_routes A list of host route dictionaries for the subnet. ipv6_ra_mode IPv6 Router Advertisement mode. Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’. ipv6_address_mode IPv6 address mode. Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’. ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} kwargs = __utils__['args.clean_kwargs'](**kwargs) __salt__['neutronng.setup_clouds'](auth) kwargs['subnet_name'] = name subnet = __salt__['neutronng.subnet_get'](name=name) if subnet is None: if __opts__['test']: ret['result'] = None ret['changes'] = kwargs ret['comment'] = 'Subnet will be created.' return ret new_subnet = __salt__['neutronng.subnet_create'](**kwargs) ret['changes'] = new_subnet ret['comment'] = 'Created subnet' return ret changes = __salt__['neutronng.compare_changes'](subnet, **kwargs) if changes: if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes ret['comment'] = 'Project will be updated.' return ret # update_subnet does not support changing cidr, # so we have to delete and recreate the subnet in this case. if 'cidr' in changes or 'tenant_id' in changes: __salt__['neutronng.subnet_delete'](name=name) new_subnet = __salt__['neutronng.subnet_create'](**kwargs) ret['changes'] = new_subnet ret['comment'] = 'Deleted and recreated subnet' return ret __salt__['neutronng.subnet_update'](**kwargs) ret['changes'].update(changes) ret['comment'] = 'Updated subnet' return ret
[ "def", "present", "(", "name", ",", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "kwargs", "=", ...
Ensure a subnet exists and is up-to-date name Name of the subnet network_name_or_id The unique name or ID of the attached network. If a non-unique name is supplied, an exception is raised. allocation_pools A list of dictionaries of the start and end addresses for the allocation pools gateway_ip The gateway IP address. dns_nameservers A list of DNS name servers for the subnet. host_routes A list of host route dictionaries for the subnet. ipv6_ra_mode IPv6 Router Advertisement mode. Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’. ipv6_address_mode IPv6 address mode. Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’.
[ "Ensure", "a", "subnet", "exists", "and", "is", "up", "-", "to", "-", "date" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/transformer_vae.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_vae.py#L62-L76
def attend(x, source, hparams, name): """Self-attention layer with source as memory antecedent.""" with tf.variable_scope(name): x = tf.squeeze(x, axis=2) if len(source.get_shape()) > 3: source = tf.squeeze(source, axis=2) source = common_attention.add_timing_signal_1d(source) y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), source, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) res = common_layers.layer_postprocess(x, y, hparams) return tf.expand_dims(res, axis=2)
[ "def", "attend", "(", "x", ",", "source", ",", "hparams", ",", "name", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "x", "=", "tf", ".", "squeeze", "(", "x", ",", "axis", "=", "2", ")", "if", "len", "(", "source", ".", ...
Self-attention layer with source as memory antecedent.
[ "Self", "-", "attention", "layer", "with", "source", "as", "memory", "antecedent", "." ]
python
train
cuihantao/andes
andes/variables/varout.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/variables/varout.py#L37-L95
def store(self, t, step): """ Record the state/algeb values at time t to self.vars """ max_cache = int(self.system.tds.config.max_cache) if len(self.vars) >= max_cache > 0: self.dump() self.vars = list() self.t = list() self.k = list() logger.debug( 'varout cache cleared at simulation t = {:g}.'.format( self.system.dae.t)) self._mode = 'a' var_data = matrix([self.system.dae.x, self.system.dae.y]) # ===== This code block is deprecated ===== self.t.append(t) self.k.append(step) self.vars.append(var_data) # ========================================= # clear data cache if written to disk if self.np_nrows >= max_cache > 0: self.dump_np_vars() self.np_vars = np.zeros(self._np_block_shape) self.np_nrows = 0 self.np_t = np.zeros((self._np_block_rows,)) self.np_k = np.zeros((self._np_block_rows,)) logger.debug( 'np_vars cache cleared at simulation t = {:g}.'.format( self.system.dae.t)) self._mode = 'a' # initialize before first-time adding data if self.np_nrows == 0: self.np_ncols = len(var_data) self._np_block_shape = (self._np_block_rows, self.np_ncols) self.np_vars = np.zeros(self._np_block_shape) self.np_t = np.zeros((self._np_block_rows,)) self.np_k = np.zeros((self._np_block_rows,)) # adding data to the matrix # self.np_vars[self.np_nrows, 0] = t self.np_t[self.np_nrows] = t self.np_k[self.np_nrows] = step self.np_vars[self.np_nrows, :] = np.array(var_data).reshape((-1)) self.np_nrows += 1 # check if matrix extension is needed if self.np_nrows >= self.np_vars.shape[0]: self.np_vars = np.concatenate([self.np_vars, np.zeros(self._np_block_shape)], axis=0) self.np_t = np.concatenate([self.np_t, np.zeros((self._np_block_rows,))], axis=0) self.np_k = np.concatenate([self.np_k, np.zeros((self._np_block_rows,))], axis=0) # remove the post-computed variables from the variable list if self.system.tds.config.compute_flows: self.system.dae.y = self.system.dae.y[:self.system.dae.m]
[ "def", "store", "(", "self", ",", "t", ",", "step", ")", ":", "max_cache", "=", "int", "(", "self", ".", "system", ".", "tds", ".", "config", ".", "max_cache", ")", "if", "len", "(", "self", ".", "vars", ")", ">=", "max_cache", ">", "0", ":", "...
Record the state/algeb values at time t to self.vars
[ "Record", "the", "state", "/", "algeb", "values", "at", "time", "t", "to", "self", ".", "vars" ]
python
train
aliyun/aliyun-odps-python-sdk
odps/df/expr/window.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/window.py#L375-L391
def cumcount(expr, sort=None, ascending=True, unique=False, preceding=None, following=None): """ Calculate cumulative count of a sequence expression. :param expr: expression for calculation :param sort: name of the sort column :param ascending: whether to sort in ascending order :param unique: whether to eliminate duplicate entries :param preceding: the start point of a window :param following: the end point of a window :return: calculated column """ data_type = types.int64 return _cumulative_op(expr, CumCount, sort=sort, ascending=ascending, unique=unique, preceding=preceding, following=following, data_type=data_type)
[ "def", "cumcount", "(", "expr", ",", "sort", "=", "None", ",", "ascending", "=", "True", ",", "unique", "=", "False", ",", "preceding", "=", "None", ",", "following", "=", "None", ")", ":", "data_type", "=", "types", ".", "int64", "return", "_cumulativ...
Calculate cumulative count of a sequence expression. :param expr: expression for calculation :param sort: name of the sort column :param ascending: whether to sort in ascending order :param unique: whether to eliminate duplicate entries :param preceding: the start point of a window :param following: the end point of a window :return: calculated column
[ "Calculate", "cumulative", "count", "of", "a", "sequence", "expression", "." ]
python
train
kislyuk/aegea
aegea/packages/github3/orgs.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/orgs.py#L289-L305
def add_repo(self, repo, team): """Add ``repo`` to ``team``. .. note:: This method is of complexity O(n). This iterates over all teams in your organization and only adds the repo when the team name matches the team parameter above. If you want constant time, you should retrieve the team and call ``add_repo`` on that team directly. :param str repo: (required), form: 'user/repo' :param str team: (required), team name """ for t in self.iter_teams(): if team == t.name: return t.add_repo(repo) return False
[ "def", "add_repo", "(", "self", ",", "repo", ",", "team", ")", ":", "for", "t", "in", "self", ".", "iter_teams", "(", ")", ":", "if", "team", "==", "t", ".", "name", ":", "return", "t", ".", "add_repo", "(", "repo", ")", "return", "False" ]
Add ``repo`` to ``team``. .. note:: This method is of complexity O(n). This iterates over all teams in your organization and only adds the repo when the team name matches the team parameter above. If you want constant time, you should retrieve the team and call ``add_repo`` on that team directly. :param str repo: (required), form: 'user/repo' :param str team: (required), team name
[ "Add", "repo", "to", "team", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/orm_inspect.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_inspect.py#L127-L133
def from_attrdict(cls, attrdict: OrderedNamespace) -> object: """ Builds a new instance of the ORM object from values in an attrdict. """ dictionary = attrdict.__dict__ # noinspection PyArgumentList return cls(**dictionary)
[ "def", "from_attrdict", "(", "cls", ",", "attrdict", ":", "OrderedNamespace", ")", "->", "object", ":", "dictionary", "=", "attrdict", ".", "__dict__", "# noinspection PyArgumentList", "return", "cls", "(", "*", "*", "dictionary", ")" ]
Builds a new instance of the ORM object from values in an attrdict.
[ "Builds", "a", "new", "instance", "of", "the", "ORM", "object", "from", "values", "in", "an", "attrdict", "." ]
python
train
praekelt/django-moderator
moderator/admin.py
https://github.com/praekelt/django-moderator/blob/72f1d5259128ff5a1a0341d4a573bfd561ba4665/moderator/admin.py#L38-L53
def formfield_for_foreignkey(self, db_field, request=None, **kwargs): """ Limit canned reply options to those with same site as comment. """ field = super(CommentReplyAdmin, self).\ formfield_for_foreignkey(db_field, request, **kwargs) comment_id = request.GET.get(self.fk_name, None) if db_field.name == 'canned_reply' and comment_id: comment_id = comment_id.split(',') comment_sites = Comment.objects.filter(id__in=comment_id)\ .values('site')\ .distinct() field.queryset = field.queryset.filter(Q(site__in=comment_sites) | Q(site__isnull=True)) return field
[ "def", "formfield_for_foreignkey", "(", "self", ",", "db_field", ",", "request", "=", "None", ",", "*", "*", "kwargs", ")", ":", "field", "=", "super", "(", "CommentReplyAdmin", ",", "self", ")", ".", "formfield_for_foreignkey", "(", "db_field", ",", "reques...
Limit canned reply options to those with same site as comment.
[ "Limit", "canned", "reply", "options", "to", "those", "with", "same", "site", "as", "comment", "." ]
python
train
inveniosoftware/invenio-admin
invenio_admin/ext.py
https://github.com/inveniosoftware/invenio-admin/blob/b5ff8f7de66d1d6b67efc9f81ff094eb2428f969/invenio_admin/ext.py#L53-L84
def load_entry_point_group(self, entry_point_group): """Load administration interface from entry point group. :param str entry_point_group: Name of the entry point group. """ for ep in pkg_resources.iter_entry_points(group=entry_point_group): admin_ep = dict(ep.load()) keys = tuple( k in admin_ep for k in ('model', 'modelview', 'view_class')) if keys == (False, False, True): self.register_view( admin_ep.pop('view_class'), *admin_ep.pop('args', []), **admin_ep.pop('kwargs', {}) ) elif keys == (True, True, False): warnings.warn( 'Usage of model and modelview kwargs are deprecated in ' 'favor of view_class, args and kwargs.', PendingDeprecationWarning ) self.register_view( admin_ep.pop('modelview'), admin_ep.pop('model'), admin_ep.pop('session', db.session), **admin_ep ) else: raise Exception( 'Admin entry point dictionary must contain ' 'either "view_class" OR "model" and "modelview" keys.')
[ "def", "load_entry_point_group", "(", "self", ",", "entry_point_group", ")", ":", "for", "ep", "in", "pkg_resources", ".", "iter_entry_points", "(", "group", "=", "entry_point_group", ")", ":", "admin_ep", "=", "dict", "(", "ep", ".", "load", "(", ")", ")", ...
Load administration interface from entry point group. :param str entry_point_group: Name of the entry point group.
[ "Load", "administration", "interface", "from", "entry", "point", "group", "." ]
python
train
saltstack/salt
salt/modules/k8s.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/k8s.py#L295-L339
def label_absent(name, node=None, apiserver_url=None): ''' .. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Get salt minion ID node = _guess_node_id(node) # Try to get kubernetes master apiserver_url = _guess_apiserver(apiserver_url) if apiserver_url is None: return False # Get all labels old_labels = _get_labels(node, apiserver_url) # Prepare a temp labels dict labels = dict([(key, value) for key, value in old_labels.items() if key != name]) # Compare old labels and what we want if labels == old_labels: # Label already absent ret['comment'] = "Label {0} already absent".format(name) else: # Label needs to be delete res = _set_labels(node, apiserver_url, labels) if res.get('status') == 409: # there is an update during operation, need to retry log.debug("Got 409, will try later") ret['changes'] = {} ret['comment'] = "Could not delete label {0}, please retry".format(name) else: ret['changes'] = {"deleted": name} ret['comment'] = "Label {0} absent".format(name) return ret
[ "def", "label_absent", "(", "name", ",", "node", "=", "None", ",", "apiserver_url", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "# G...
.. versionadded:: 2016.3.0 Delete label to the current node CLI Example: .. code-block:: bash salt '*' k8s.label_absent hw/disktype salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local
[ "..", "versionadded", "::", "2016", ".", "3", ".", "0" ]
python
train
RI-imaging/qpformat
qpformat/file_formats/__init__.py
https://github.com/RI-imaging/qpformat/blob/364e29d7d9e8b9f1d7a4a25c753d1baf9d73d5eb/qpformat/file_formats/__init__.py#L168-L173
def get_qpimage_raw(self, idx): """Return QPImage without background correction""" ds = self._get_dataset(idx) qpi = ds.get_qpimage_raw() qpi["identifier"] = self.get_identifier(idx) return qpi
[ "def", "get_qpimage_raw", "(", "self", ",", "idx", ")", ":", "ds", "=", "self", ".", "_get_dataset", "(", "idx", ")", "qpi", "=", "ds", ".", "get_qpimage_raw", "(", ")", "qpi", "[", "\"identifier\"", "]", "=", "self", ".", "get_identifier", "(", "idx",...
Return QPImage without background correction
[ "Return", "QPImage", "without", "background", "correction" ]
python
train
Stranger6667/postmarker
postmarker/logging.py
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/logging.py#L11-L24
def get_logger(name, verbosity, stream): """ Returns simple console logger. """ logger = logging.getLogger(name) logger.setLevel( {0: DEFAULT_LOGGING_LEVEL, 1: logging.INFO, 2: logging.DEBUG}.get(min(2, verbosity), DEFAULT_LOGGING_LEVEL) ) logger.handlers = [] handler = logging.StreamHandler(stream) handler.setLevel(logging.DEBUG) handler.setFormatter(logging.Formatter(LOG_FORMAT)) logger.addHandler(handler) return logger
[ "def", "get_logger", "(", "name", ",", "verbosity", ",", "stream", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "logger", ".", "setLevel", "(", "{", "0", ":", "DEFAULT_LOGGING_LEVEL", ",", "1", ":", "logging", ".", "INFO", ",...
Returns simple console logger.
[ "Returns", "simple", "console", "logger", "." ]
python
train
sepandhaghighi/pycm
pycm/pycm_overall_func.py
https://github.com/sepandhaghighi/pycm/blob/cb03258afd6a821d10acba73c965aaac174bedcd/pycm/pycm_overall_func.py#L136-L168
def convex_combination(classes, TP, TOP, P, class_name, modified=False): """ Calculate Overall_CEN coefficient. :param classes: classes :type classes : list :param TP: true Positive Dict For All Classes :type TP : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param class_name: reviewed class name :type class_name : any valid type :param modified : modified mode flag :type modified : bool :return: coefficient as float """ try: class_number = len(classes) alpha = 1 if class_number == 2: alpha = 0 matrix_sum = sum(list(TOP.values())) TP_sum = sum(list(TP.values())) up = TOP[class_name] + P[class_name] down = 2 * matrix_sum if modified: down -= (alpha * TP_sum) up -= TP[class_name] return up / down except Exception: return "None"
[ "def", "convex_combination", "(", "classes", ",", "TP", ",", "TOP", ",", "P", ",", "class_name", ",", "modified", "=", "False", ")", ":", "try", ":", "class_number", "=", "len", "(", "classes", ")", "alpha", "=", "1", "if", "class_number", "==", "2", ...
Calculate Overall_CEN coefficient. :param classes: classes :type classes : list :param TP: true Positive Dict For All Classes :type TP : dict :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param class_name: reviewed class name :type class_name : any valid type :param modified : modified mode flag :type modified : bool :return: coefficient as float
[ "Calculate", "Overall_CEN", "coefficient", "." ]
python
train
Microsoft/nni
examples/trials/weight_sharing/ga_squad/util.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/weight_sharing/ga_squad/util.py#L46-L52
def dropout(tensor, drop_prob, is_training): ''' Dropout except test. ''' if not is_training: return tensor return tf.nn.dropout(tensor, 1.0 - drop_prob)
[ "def", "dropout", "(", "tensor", ",", "drop_prob", ",", "is_training", ")", ":", "if", "not", "is_training", ":", "return", "tensor", "return", "tf", ".", "nn", ".", "dropout", "(", "tensor", ",", "1.0", "-", "drop_prob", ")" ]
Dropout except test.
[ "Dropout", "except", "test", "." ]
python
train
pycontribs/pyrax
pyrax/base_identity.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/base_identity.py#L411-L420
def set_credentials(self, username, password=None, region=None, tenant_id=None, authenticate=False): """Sets the username and password directly.""" self.username = username self.password = password self.tenant_id = tenant_id if region: self.region = region if authenticate: self.authenticate()
[ "def", "set_credentials", "(", "self", ",", "username", ",", "password", "=", "None", ",", "region", "=", "None", ",", "tenant_id", "=", "None", ",", "authenticate", "=", "False", ")", ":", "self", ".", "username", "=", "username", "self", ".", "password...
Sets the username and password directly.
[ "Sets", "the", "username", "and", "password", "directly", "." ]
python
train
anjishnu/ask-alexa-pykit
examples/twitter/twitter.py
https://github.com/anjishnu/ask-alexa-pykit/blob/a47c278ca7a60532bbe1a9b789f6c37e609fea8b/examples/twitter/twitter.py#L350-L357
def geo_search(user_id, search_location): """ Search for a location - free form """ url = "https://api.twitter.com/1.1/geo/search.json" params = {"query" : search_location } response = make_twitter_request(url, user_id, params).json() return response
[ "def", "geo_search", "(", "user_id", ",", "search_location", ")", ":", "url", "=", "\"https://api.twitter.com/1.1/geo/search.json\"", "params", "=", "{", "\"query\"", ":", "search_location", "}", "response", "=", "make_twitter_request", "(", "url", ",", "user_id", "...
Search for a location - free form
[ "Search", "for", "a", "location", "-", "free", "form" ]
python
train
diux-dev/ncluster
ncluster/backend.py
https://github.com/diux-dev/ncluster/blob/2fd359621896717197b479c7174d06d80df1529b/ncluster/backend.py#L111-L133
def wait_for_file(self, fn: str, max_wait_sec: int = 3600 * 24 * 365, check_interval: float = 0.02) -> bool: """ Waits for file maximum of max_wait_sec. Returns True if file was detected within specified max_wait_sec Args: fn: filename on task machine max_wait_sec: how long to wait in seconds check_interval: how often to check in seconds Returns: False if waiting was was cut short by max_wait_sec limit, True otherwise """ print("Waiting for file", fn) start_time = time.time() while True: if time.time() - start_time > max_wait_sec: util.log(f"Timeout exceeded ({max_wait_sec} sec) for {fn}") return False if not self.exists(fn): time.sleep(check_interval) continue else: break return True
[ "def", "wait_for_file", "(", "self", ",", "fn", ":", "str", ",", "max_wait_sec", ":", "int", "=", "3600", "*", "24", "*", "365", ",", "check_interval", ":", "float", "=", "0.02", ")", "->", "bool", ":", "print", "(", "\"Waiting for file\"", ",", "fn", ...
Waits for file maximum of max_wait_sec. Returns True if file was detected within specified max_wait_sec Args: fn: filename on task machine max_wait_sec: how long to wait in seconds check_interval: how often to check in seconds Returns: False if waiting was was cut short by max_wait_sec limit, True otherwise
[ "Waits", "for", "file", "maximum", "of", "max_wait_sec", ".", "Returns", "True", "if", "file", "was", "detected", "within", "specified", "max_wait_sec", "Args", ":", "fn", ":", "filename", "on", "task", "machine", "max_wait_sec", ":", "how", "long", "to", "w...
python
train
vovanec/httputil
httputil/httputil.py
https://github.com/vovanec/httputil/blob/0b8dab5a23166cceb7dbc2a1dbc802ab5b311347/httputil/httputil.py#L157-L180
def to_chunks(stream_or_generator): """This generator function receives file-like or generator as input and returns generator. :param file|__generator[bytes] stream_or_generator: readable stream or generator. :rtype: __generator[bytes] :raise: TypeError """ if isinstance(stream_or_generator, types.GeneratorType): yield from stream_or_generator elif hasattr(stream_or_generator, 'read'): while True: chunk = stream_or_generator.read(CHUNK_SIZE) if not chunk: break # no more data yield chunk else: raise TypeError('Input must be either readable or generator.')
[ "def", "to_chunks", "(", "stream_or_generator", ")", ":", "if", "isinstance", "(", "stream_or_generator", ",", "types", ".", "GeneratorType", ")", ":", "yield", "from", "stream_or_generator", "elif", "hasattr", "(", "stream_or_generator", ",", "'read'", ")", ":", ...
This generator function receives file-like or generator as input and returns generator. :param file|__generator[bytes] stream_or_generator: readable stream or generator. :rtype: __generator[bytes] :raise: TypeError
[ "This", "generator", "function", "receives", "file", "-", "like", "or", "generator", "as", "input", "and", "returns", "generator", "." ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L3351-L3374
def connect_patch_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501 """connect_patch_namespaced_service_proxy_with_path # noqa: E501 connect PATCH requests to proxy of Service # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_patch_namespaced_service_proxy_with_path(name, namespace, path, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ServiceProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: path to the resource (required) :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy. :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_patch_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501 else: (data) = self.connect_patch_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501 return data
[ "def", "connect_patch_namespaced_service_proxy_with_path", "(", "self", ",", "name", ",", "namespace", ",", "path", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", ...
connect_patch_namespaced_service_proxy_with_path # noqa: E501 connect PATCH requests to proxy of Service # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_patch_namespaced_service_proxy_with_path(name, namespace, path, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ServiceProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: path to the resource (required) :param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy. :return: str If the method is called asynchronously, returns the request thread.
[ "connect_patch_namespaced_service_proxy_with_path", "#", "noqa", ":", "E501" ]
python
train
aleju/imgaug
imgaug/augmenters/contrast.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmenters/contrast.py#L41-L105
def adjust_contrast_gamma(arr, gamma): """ Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``. dtype support:: * ``uint8``: yes; fully tested (1) (2) (3) * ``uint16``: yes; tested (2) (3) * ``uint32``: yes; tested (2) (3) * ``uint64``: yes; tested (2) (3) (4) * ``int8``: limited; tested (2) (3) (5) * ``int16``: limited; tested (2) (3) (5) * ``int32``: limited; tested (2) (3) (5) * ``int64``: limited; tested (2) (3) (4) (5) * ``float16``: limited; tested (5) * ``float32``: limited; tested (5) * ``float64``: limited; tested (5) * ``float128``: no (6) * ``bool``: no (7) - (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``. - (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards, e.g. ``result*255`` for ``uint8``. - (3) Integer-like values are not rounded after applying the contrast adjustment equation (before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous space to discrete happens according to floor function. - (4) Note that scikit-image doc says that integers are converted to ``float64`` values before applying the contrast normalization method. This might lead to inaccuracies for large 64bit integer values. Tests showed no indication of that happening though. - (5) Must not contain negative values. Values >=0 are fully supported. - (6) Leads to error in scikit-image. - (7) Does not make sense for contrast adjustments. Parameters ---------- arr : numpy.ndarray Array for which to adjust the contrast. Dtype ``uint8`` is fastest. gamma : number Exponent for the contrast adjustment. Higher values darken the image. Returns ------- numpy.ndarray Array with adjusted contrast. """ # int8 is also possible according to docs # https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed # like `d` was 0 for CV_8S, causing that to fail if arr.dtype.name == "uint8": min_value, _center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype) dynamic_range = max_value - min_value value_range = np.linspace(0, 1.0, num=dynamic_range+1, dtype=np.float32) # 255 * ((I_ij/255)**gamma) # using np.float32(.) here still works when the input is a numpy array of size 1 table = (min_value + (value_range ** np.float32(gamma)) * dynamic_range) arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype)) if arr.ndim == 3 and arr_aug.ndim == 2: return arr_aug[..., np.newaxis] return arr_aug else: return ski_exposure.adjust_gamma(arr, gamma)
[ "def", "adjust_contrast_gamma", "(", "arr", ",", "gamma", ")", ":", "# int8 is also possible according to docs", "# https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html#cv2.LUT , but here it seemed", "# like `d` was 0 for CV_8S, causing that to fail", "if", "arr", "....
Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``. dtype support:: * ``uint8``: yes; fully tested (1) (2) (3) * ``uint16``: yes; tested (2) (3) * ``uint32``: yes; tested (2) (3) * ``uint64``: yes; tested (2) (3) (4) * ``int8``: limited; tested (2) (3) (5) * ``int16``: limited; tested (2) (3) (5) * ``int32``: limited; tested (2) (3) (5) * ``int64``: limited; tested (2) (3) (4) (5) * ``float16``: limited; tested (5) * ``float32``: limited; tested (5) * ``float64``: limited; tested (5) * ``float128``: no (6) * ``bool``: no (7) - (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``. - (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards, e.g. ``result*255`` for ``uint8``. - (3) Integer-like values are not rounded after applying the contrast adjustment equation (before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous space to discrete happens according to floor function. - (4) Note that scikit-image doc says that integers are converted to ``float64`` values before applying the contrast normalization method. This might lead to inaccuracies for large 64bit integer values. Tests showed no indication of that happening though. - (5) Must not contain negative values. Values >=0 are fully supported. - (6) Leads to error in scikit-image. - (7) Does not make sense for contrast adjustments. Parameters ---------- arr : numpy.ndarray Array for which to adjust the contrast. Dtype ``uint8`` is fastest. gamma : number Exponent for the contrast adjustment. Higher values darken the image. Returns ------- numpy.ndarray Array with adjusted contrast.
[ "Adjust", "contrast", "by", "scaling", "each", "pixel", "value", "to", "255", "*", "((", "I_ij", "/", "255", ")", "**", "gamma", ")", "." ]
python
valid
Hironsan/anago
anago/utils.py
https://github.com/Hironsan/anago/blob/66a97f91c41f9613b736892e9762dccb9c28f623/anago/utils.py#L27-L75
def load_data_and_labels(filename, encoding='utf-8'): """Loads data and label from a file. Args: filename (str): path to the file. encoding (str): file encoding format. The file format is tab-separated values. A blank line is required at the end of a sentence. For example: ``` EU B-ORG rejects O German B-MISC call O to O boycott O British B-MISC lamb O . O Peter B-PER Blackburn I-PER ... ``` Returns: tuple(numpy array, numpy array): data and labels. Example: >>> filename = 'conll2003/en/ner/train.txt' >>> data, labels = load_data_and_labels(filename) """ sents, labels = [], [] words, tags = [], [] with open(filename, encoding=encoding) as f: for line in f: line = line.rstrip() if line: word, tag = line.split('\t') words.append(word) tags.append(tag) else: sents.append(words) labels.append(tags) words, tags = [], [] return sents, labels
[ "def", "load_data_and_labels", "(", "filename", ",", "encoding", "=", "'utf-8'", ")", ":", "sents", ",", "labels", "=", "[", "]", ",", "[", "]", "words", ",", "tags", "=", "[", "]", ",", "[", "]", "with", "open", "(", "filename", ",", "encoding", "...
Loads data and label from a file. Args: filename (str): path to the file. encoding (str): file encoding format. The file format is tab-separated values. A blank line is required at the end of a sentence. For example: ``` EU B-ORG rejects O German B-MISC call O to O boycott O British B-MISC lamb O . O Peter B-PER Blackburn I-PER ... ``` Returns: tuple(numpy array, numpy array): data and labels. Example: >>> filename = 'conll2003/en/ner/train.txt' >>> data, labels = load_data_and_labels(filename)
[ "Loads", "data", "and", "label", "from", "a", "file", "." ]
python
train
toumorokoshi/transmute-core
transmute_core/contenttype_serializers/serializer_set.py
https://github.com/toumorokoshi/transmute-core/blob/a2c26625d5d8bab37e00038f9d615a26167fc7f4/transmute_core/contenttype_serializers/serializer_set.py#L41-L52
def keys(self): """ return a list of the content types this set supports. this is not a complete list: serializers can accept more than one content type. However, it is a good representation of the class of content types supported. """ return_value = [] for s in self.serializers: return_value += s.content_type return return_value
[ "def", "keys", "(", "self", ")", ":", "return_value", "=", "[", "]", "for", "s", "in", "self", ".", "serializers", ":", "return_value", "+=", "s", ".", "content_type", "return", "return_value" ]
return a list of the content types this set supports. this is not a complete list: serializers can accept more than one content type. However, it is a good representation of the class of content types supported.
[ "return", "a", "list", "of", "the", "content", "types", "this", "set", "supports", "." ]
python
train
ribozz/sphinx-argparse
sphinxarg/markdown.py
https://github.com/ribozz/sphinx-argparse/blob/178672cd5c846440ff7ecd695e3708feea13e4b4/sphinxarg/markdown.py#L199-L211
def section(node): """ A section in reStructuredText, which needs a title (the first child) This is a custom type """ title = '' # All sections need an id if node.first_child is not None: if node.first_child.t == u'heading': title = node.first_child.first_child.literal o = nodes.section(ids=[title], names=[title]) for n in MarkDown(node): o += n return o
[ "def", "section", "(", "node", ")", ":", "title", "=", "''", "# All sections need an id", "if", "node", ".", "first_child", "is", "not", "None", ":", "if", "node", ".", "first_child", ".", "t", "==", "u'heading'", ":", "title", "=", "node", ".", "first_c...
A section in reStructuredText, which needs a title (the first child) This is a custom type
[ "A", "section", "in", "reStructuredText", "which", "needs", "a", "title", "(", "the", "first", "child", ")", "This", "is", "a", "custom", "type" ]
python
train
CivicSpleen/ambry
ambry/identity.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/identity.py#L459-L472
def sub_path(self): """The path of the partition source, excluding the bundle path parts. Includes the revision. """ try: return os.path.join(*(self._local_parts())) except TypeError as e: raise TypeError( "Path failed for partition {} : {}".format( self.name, e.message))
[ "def", "sub_path", "(", "self", ")", ":", "try", ":", "return", "os", ".", "path", ".", "join", "(", "*", "(", "self", ".", "_local_parts", "(", ")", ")", ")", "except", "TypeError", "as", "e", ":", "raise", "TypeError", "(", "\"Path failed for partiti...
The path of the partition source, excluding the bundle path parts. Includes the revision.
[ "The", "path", "of", "the", "partition", "source", "excluding", "the", "bundle", "path", "parts", "." ]
python
train
PyFilesystem/pyfilesystem2
fs/copy.py
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/copy.py#L50-L80
def copy_fs_if_newer( src_fs, # type: Union[FS, Text] dst_fs, # type: Union[FS, Text] walker=None, # type: Optional[Walker] on_copy=None, # type: Optional[_OnCopy] workers=0, # type: int ): # type: (...) -> None """Copy the contents of one filesystem to another, checking times. If both source and destination files exist, the copy is executed only if the source file is newer than the destination file. In case modification times of source or destination files are not available, copy file is always executed. Arguments: src_fs (FS or str): Source filesystem (URL or instance). dst_fs (FS or str): Destination filesystem (URL or instance). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable):A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use ``worker`` threads to copy data, or ``0`` (default) for a single-threaded copy. """ return copy_dir_if_newer( src_fs, "/", dst_fs, "/", walker=walker, on_copy=on_copy, workers=workers )
[ "def", "copy_fs_if_newer", "(", "src_fs", ",", "# type: Union[FS, Text]", "dst_fs", ",", "# type: Union[FS, Text]", "walker", "=", "None", ",", "# type: Optional[Walker]", "on_copy", "=", "None", ",", "# type: Optional[_OnCopy]", "workers", "=", "0", ",", "# type: int",...
Copy the contents of one filesystem to another, checking times. If both source and destination files exist, the copy is executed only if the source file is newer than the destination file. In case modification times of source or destination files are not available, copy file is always executed. Arguments: src_fs (FS or str): Source filesystem (URL or instance). dst_fs (FS or str): Destination filesystem (URL or instance). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable):A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use ``worker`` threads to copy data, or ``0`` (default) for a single-threaded copy.
[ "Copy", "the", "contents", "of", "one", "filesystem", "to", "another", "checking", "times", "." ]
python
train
yahoo/TensorFlowOnSpark
examples/cifar10/cifar10.py
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/cifar10/cifar10.py#L142-L160
def distorted_inputs(): """Construct distorted input for CIFAR training using the Reader ops. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises: ValueError: If no data_dir """ if not FLAGS.data_dir: raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') images, labels = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return images, labels
[ "def", "distorted_inputs", "(", ")", ":", "if", "not", "FLAGS", ".", "data_dir", ":", "raise", "ValueError", "(", "'Please supply a data_dir'", ")", "data_dir", "=", "os", ".", "path", ".", "join", "(", "FLAGS", ".", "data_dir", ",", "'cifar-10-batches-bin'", ...
Construct distorted input for CIFAR training using the Reader ops. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises: ValueError: If no data_dir
[ "Construct", "distorted", "input", "for", "CIFAR", "training", "using", "the", "Reader", "ops", "." ]
python
train
blue-yonder/tsfresh
tsfresh/feature_extraction/feature_calculators.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L727-L737
def count_below_mean(x): """ Returns the number of values in x that are lower than the mean of x :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: float """ m = np.mean(x) return np.where(x < m)[0].size
[ "def", "count_below_mean", "(", "x", ")", ":", "m", "=", "np", ".", "mean", "(", "x", ")", "return", "np", ".", "where", "(", "x", "<", "m", ")", "[", "0", "]", ".", "size" ]
Returns the number of values in x that are lower than the mean of x :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: float
[ "Returns", "the", "number", "of", "values", "in", "x", "that", "are", "lower", "than", "the", "mean", "of", "x" ]
python
train
pywbem/pywbem
pywbem/_listener.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/_listener.py#L244-L405
def do_POST(self): """ This method will be called for each POST request to one of the listener ports. It parses the CIM-XML export message and delivers the contained CIM indication to the stored listener object. """ # Accept header check described in DSP0200 accept = self.headers.get('Accept', 'text/xml') if accept not in ('text/xml', 'application/xml', '*/*'): self.send_http_error( 406, 'header-mismatch', _format("Invalid Accept header value: {0} (need text/xml, " "application/xml or */*)", accept)) return # Accept-Charset header check described in DSP0200 accept_charset = self.headers.get('Accept-Charset', 'UTF-8') tq_list = re.findall(TOKEN_QUALITY_FINDALL_PATTERN, accept_charset) found = False if tq_list is not None: for token, quality in tq_list: if token.lower() in ('utf-8', '*'): found = True break if not found: self.send_http_error( 406, 'header-mismatch', _format("Invalid Accept-Charset header value: {0} " "(need UTF-8 or *)", accept_charset)) return # Accept-Encoding header check described in DSP0200 accept_encoding = self.headers.get('Accept-Encoding', 'Identity') tq_list = re.findall(TOKEN_QUALITY_FINDALL_PATTERN, accept_encoding) identity_acceptable = False identity_found = False if tq_list is not None: for token, quality in tq_list: quality = 1 if quality == '' else float(quality) if token.lower() == 'identity': identity_found = True if quality > 0: identity_acceptable = True break if not identity_found: for token, quality in tq_list: quality = 1 if quality == '' else float(quality) if token == '*' and quality > 0: identity_acceptable = True break if not identity_acceptable: self.send_http_error( 406, 'header-mismatch', _format("Invalid Accept-Encoding header value: {0} " "(need Identity to be acceptable)", accept_encoding)) return # Accept-Language header check described in DSP0200. # Ignored, because this WBEM listener does not support multiple # languages, and hence any language is allowed to be returned. # Accept-Range header check described in DSP0200 accept_range = self.headers.get('Accept-Range', None) if accept_range is not None: self.send_http_error( 406, 'header-mismatch', _format("Accept-Range header is not permitted {0}", accept_range)) return # Content-Type header check described in DSP0200 content_type = self.headers.get('Content-Type', None) if content_type is None: self.send_http_error( 406, 'header-mismatch', "Content-Type header is required") return tc_list = re.findall(TOKEN_CHARSET_FINDALL_PATTERN, content_type) found = False if tc_list is not None: for token, charset in tc_list: if token.lower() in ('text/xml', 'application/xml') and \ (charset == '' or charset.lower() == 'utf-8'): found = True break if not found: self.send_http_error( 406, 'header-mismatch', _format("Invalid Content-Type header value: {0} " "(need text/xml or application/xml with " "charset=utf-8 or empty)", content_type)) return # Content-Encoding header check described in DSP0200 content_encoding = self.headers.get('Content-Encoding', 'identity') if content_encoding.lower() != 'identity': self.send_http_error( 406, 'header-mismatch', _format("Invalid Content-Encoding header value: {0}" "(listener supports only identity)", content_encoding)) return # Content-Language header check described in DSP0200. # Ignored, because this WBEM listener does not support multiple # languages, and hence any language is allowed in the request. # The following headers are ignored. They are not allowed to be used # by servers, but listeners are not required to reject them: # Content-Range, Expires, If-Range, Range. # Start processing the request content_len = int(self.headers.get('Content-Length', 0)) body = self.rfile.read(content_len) try: msgid, methodname, params = self.parse_export_request(body) except (CIMXMLParseError, XMLParseError) as exc: self.send_http_error(400, "request-not-well-formed", str(exc)) return except VersionError as exc: if str(exc).startswith("DTD"): self.send_http_error(400, "unsupported-dtd-version", str(exc)) elif str(exc).startswith("Protocol"): self.send_http_error(400, "unsupported-protocol-version", str(exc)) else: self.send_http_error(400, "unsupported-version", str(exc)) return if methodname == 'ExportIndication': if len(params) != 1 or 'NewIndication' not in params: self.send_error_response( msgid, methodname, CIM_ERR_INVALID_PARAMETER, _format("Expecting one parameter NewIndication, got {0!A}", params.keys())) return indication_inst = params['NewIndication'] if not isinstance(indication_inst, CIMInstance): self.send_error_response( msgid, methodname, CIM_ERR_INVALID_PARAMETER, _format("NewIndication parameter is not a CIM instance, " "but {0!A}", indication_inst)) return # server.listener created in WBEMListener.start function self.server.listener.deliver_indication(indication_inst, self.client_address[0]) self.send_success_response(msgid, methodname) else: self.send_error_response( msgid, methodname, CIM_ERR_NOT_SUPPORTED, _format("Unknown export method: {0!A}", methodname))
[ "def", "do_POST", "(", "self", ")", ":", "# Accept header check described in DSP0200", "accept", "=", "self", ".", "headers", ".", "get", "(", "'Accept'", ",", "'text/xml'", ")", "if", "accept", "not", "in", "(", "'text/xml'", ",", "'application/xml'", ",", "'...
This method will be called for each POST request to one of the listener ports. It parses the CIM-XML export message and delivers the contained CIM indication to the stored listener object.
[ "This", "method", "will", "be", "called", "for", "each", "POST", "request", "to", "one", "of", "the", "listener", "ports", "." ]
python
train
apache/airflow
airflow/contrib/hooks/gcs_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L382-L445
def create_bucket(self, bucket_name, resource=None, storage_class='MULTI_REGIONAL', location='US', project_id=None, labels=None ): """ Creates a new bucket. Google Cloud Storage uses a flat namespace, so you can't create a bucket with a name that is already in use. .. seealso:: For more information, see Bucket Naming Guidelines: https://cloud.google.com/storage/docs/bucketnaming.html#requirements :param bucket_name: The name of the bucket. :type bucket_name: str :param resource: An optional dict with parameters for creating the bucket. For information on available parameters, see Cloud Storage API doc: https://cloud.google.com/storage/docs/json_api/v1/buckets/insert :type resource: dict :param storage_class: This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include - ``MULTI_REGIONAL`` - ``REGIONAL`` - ``STANDARD`` - ``NEARLINE`` - ``COLDLINE``. If this value is not specified when the bucket is created, it will default to STANDARD. :type storage_class: str :param location: The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. .. seealso:: https://developers.google.com/storage/docs/bucket-locations :type location: str :param project_id: The ID of the GCP Project. :type project_id: str :param labels: User-provided labels, in key/value pairs. :type labels: dict :return: If successful, it returns the ``id`` of the bucket. """ self.log.info('Creating Bucket: %s; Location: %s; Storage Class: %s', bucket_name, location, storage_class) client = self.get_conn() bucket = client.bucket(bucket_name=bucket_name) bucket_resource = resource or {} for item in bucket_resource: if item != "name": bucket._patch_property(name=item, value=resource[item]) bucket.storage_class = storage_class bucket.labels = labels or {} bucket.create(project=project_id, location=location) return bucket.id
[ "def", "create_bucket", "(", "self", ",", "bucket_name", ",", "resource", "=", "None", ",", "storage_class", "=", "'MULTI_REGIONAL'", ",", "location", "=", "'US'", ",", "project_id", "=", "None", ",", "labels", "=", "None", ")", ":", "self", ".", "log", ...
Creates a new bucket. Google Cloud Storage uses a flat namespace, so you can't create a bucket with a name that is already in use. .. seealso:: For more information, see Bucket Naming Guidelines: https://cloud.google.com/storage/docs/bucketnaming.html#requirements :param bucket_name: The name of the bucket. :type bucket_name: str :param resource: An optional dict with parameters for creating the bucket. For information on available parameters, see Cloud Storage API doc: https://cloud.google.com/storage/docs/json_api/v1/buckets/insert :type resource: dict :param storage_class: This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include - ``MULTI_REGIONAL`` - ``REGIONAL`` - ``STANDARD`` - ``NEARLINE`` - ``COLDLINE``. If this value is not specified when the bucket is created, it will default to STANDARD. :type storage_class: str :param location: The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. .. seealso:: https://developers.google.com/storage/docs/bucket-locations :type location: str :param project_id: The ID of the GCP Project. :type project_id: str :param labels: User-provided labels, in key/value pairs. :type labels: dict :return: If successful, it returns the ``id`` of the bucket.
[ "Creates", "a", "new", "bucket", ".", "Google", "Cloud", "Storage", "uses", "a", "flat", "namespace", "so", "you", "can", "t", "create", "a", "bucket", "with", "a", "name", "that", "is", "already", "in", "use", "." ]
python
test
rosenbrockc/fortpy
fortpy/tramp.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/tramp.py#L163-L178
def _read_check(self, filepath): """Returns the path of a file on the *local* system that can be read from. If the filepath is on a remote server, the file is first copied locally.""" if self.is_ssh(filepath): self._check_ftp() #First we need to generate a file path on the local system to #copy the file to. source = self._get_remote(filepath) target = self._get_hashed_path(filepath) self.ftp.get(source, target) #Now we can just read it with the normal python commands. else: target = filepath return target
[ "def", "_read_check", "(", "self", ",", "filepath", ")", ":", "if", "self", ".", "is_ssh", "(", "filepath", ")", ":", "self", ".", "_check_ftp", "(", ")", "#First we need to generate a file path on the local system to", "#copy the file to.", "source", "=", "self", ...
Returns the path of a file on the *local* system that can be read from. If the filepath is on a remote server, the file is first copied locally.
[ "Returns", "the", "path", "of", "a", "file", "on", "the", "*", "local", "*", "system", "that", "can", "be", "read", "from", ".", "If", "the", "filepath", "is", "on", "a", "remote", "server", "the", "file", "is", "first", "copied", "locally", "." ]
python
train
fermiPy/fermipy
fermipy/tsmap.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/tsmap.py#L507-L523
def f_cash(x, counts, bkg, model): """ Wrapper for cash statistics, that defines the model function. Parameters ---------- x : float Model amplitude. counts : `~numpy.ndarray` Count map slice, where model is defined. bkg : `~numpy.ndarray` Background map slice, where model is defined. model : `~numpy.ndarray` Source template (multiplied with exposure). """ return 2.0 * poisson_log_like(counts, bkg + x * model)
[ "def", "f_cash", "(", "x", ",", "counts", ",", "bkg", ",", "model", ")", ":", "return", "2.0", "*", "poisson_log_like", "(", "counts", ",", "bkg", "+", "x", "*", "model", ")" ]
Wrapper for cash statistics, that defines the model function. Parameters ---------- x : float Model amplitude. counts : `~numpy.ndarray` Count map slice, where model is defined. bkg : `~numpy.ndarray` Background map slice, where model is defined. model : `~numpy.ndarray` Source template (multiplied with exposure).
[ "Wrapper", "for", "cash", "statistics", "that", "defines", "the", "model", "function", "." ]
python
train
dwkim78/upsilon
upsilon/extract_features/extract_features.py
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L168-L273
def get_period_LS(self, date, mag, n_threads, min_period): """ Period finding using the Lomb-Scargle algorithm. Finding two periods. The second period is estimated after whitening the first period. Calculating various other features as well using derived periods. Parameters ---------- date : array_like An array of observed date, in days. mag : array_like An array of observed magnitude. n_threads : int The number of threads to use. min_period : float The minimum period to calculate. """ # DO NOT CHANGE THESE PARAMETERS. oversampling = 3. hifac = int((max(date) - min(date)) / len(date) / min_period * 2.) # Minimum hifac if hifac < 100: hifac = 100 # Lomb-Scargle. fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac, n_threads) self.f = fx[jmax] self.period = 1. / self.f self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax) self.period_log10FAP = \ np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax]) # self.f_SNR1 = fy[jmax] / np.median(fy) self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy) # Fit Fourier Series of order 3. order = 3 # Initial guess of Fourier coefficients. p0 = np.ones(order * 2 + 1) date_period = (date % self.period) / self.period p1, success = leastsq(self.residuals, p0, args=(date_period, mag, order)) # fitted_y = self.FourierSeries(p1, date_period, order) # print p1, self.mean, self.median # plt.plot(date_period, self.mag, 'b+') # plt.show() # Derive Fourier features for the first period. # Petersen, J. O., 1986, A&A self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2) self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude self.f_phase = np.arctan(-p1[1] / p1[2]) self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase """ # Derive a second period. # Whitening a light curve. residual_mag = mag - fitted_y # Lomb-Scargle again to find the second period. omega_top, power_top = search_frequencies(date, residual_mag, err, #LS_kwargs={'generalized':True, 'subtract_mean':True}, n_eval=5000, n_retry=3, n_save=50) self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0] self.f2 = 1. / self.period2 self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \ * (len(self.date) - 1) / 2. # Fit Fourier Series again. p0 = [1.] * order * 2 date_period = (date % self.period) / self.period p2, success = leastsq(self.residuals, p0, args=(date_period, residual_mag, order)) fitted_y = self.FourierSeries(p2, date_period, order) #plt.plot(date%self.period2, residual_mag, 'b+') #plt.show() # Derive Fourier features for the first second. self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2) self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp self.f2_phase = np.arctan(-p2[1] / p2[2]) self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase # Calculate features using the first and second periods. self.f12_ratio = self.f2 / self.f1 self.f12_remain = self.f1 % self.f2 \ if self.f1 > self.f2 else self.f2 % self.f1 self.f12_amp = self.f2_amp / self.f1_amp self.f12_phase = self.f2_phase - self.f1_phase """
[ "def", "get_period_LS", "(", "self", ",", "date", ",", "mag", ",", "n_threads", ",", "min_period", ")", ":", "# DO NOT CHANGE THESE PARAMETERS.", "oversampling", "=", "3.", "hifac", "=", "int", "(", "(", "max", "(", "date", ")", "-", "min", "(", "date", ...
Period finding using the Lomb-Scargle algorithm. Finding two periods. The second period is estimated after whitening the first period. Calculating various other features as well using derived periods. Parameters ---------- date : array_like An array of observed date, in days. mag : array_like An array of observed magnitude. n_threads : int The number of threads to use. min_period : float The minimum period to calculate.
[ "Period", "finding", "using", "the", "Lomb", "-", "Scargle", "algorithm", "." ]
python
train
dpkp/kafka-python
kafka/metrics/stats/sensor.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/metrics/stats/sensor.py#L93-L111
def add_compound(self, compound_stat, config=None): """ Register a compound statistic with this sensor which yields multiple measurable quantities (like a histogram) Arguments: stat (AbstractCompoundStat): The stat to register config (MetricConfig): The configuration for this stat. If None then the stat will use the default configuration for this sensor. """ if not compound_stat: raise ValueError('compound stat must be non-empty') self._stats.append(compound_stat) for named_measurable in compound_stat.stats(): metric = KafkaMetric(named_measurable.name, named_measurable.stat, config or self._config) self._registry.register_metric(metric) self._metrics.append(metric)
[ "def", "add_compound", "(", "self", ",", "compound_stat", ",", "config", "=", "None", ")", ":", "if", "not", "compound_stat", ":", "raise", "ValueError", "(", "'compound stat must be non-empty'", ")", "self", ".", "_stats", ".", "append", "(", "compound_stat", ...
Register a compound statistic with this sensor which yields multiple measurable quantities (like a histogram) Arguments: stat (AbstractCompoundStat): The stat to register config (MetricConfig): The configuration for this stat. If None then the stat will use the default configuration for this sensor.
[ "Register", "a", "compound", "statistic", "with", "this", "sensor", "which", "yields", "multiple", "measurable", "quantities", "(", "like", "a", "histogram", ")" ]
python
train
census-instrumentation/opencensus-python
contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/trace_exporter/__init__.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/trace_exporter/__init__.py#L118-L139
def set_attribute_label(span, resource_type, resource_labels, attribute_key, canonical_key=None, label_value_prefix=''): """Set a label to span that can be used for tracing. :param span: Span object :param resource_type: resource type :param resource_labels: collection of labels :param attribute_key: actual label key :param canonical_key: exporter specific label key, Optional :param label_value_prefix: exporter specific label value prefix, Optional """ if attribute_key in resource_labels: if canonical_key is None: canonical_key = attribute_key pair = {RESOURCE_LABEL % (resource_type, canonical_key): label_value_prefix + resource_labels[attribute_key] } pair_attrs = Attributes(pair).format_attributes_json()\ .get('attributeMap') _update_attr_map(span, pair_attrs)
[ "def", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "attribute_key", ",", "canonical_key", "=", "None", ",", "label_value_prefix", "=", "''", ")", ":", "if", "attribute_key", "in", "resource_labels", ":", "if", "canonical_...
Set a label to span that can be used for tracing. :param span: Span object :param resource_type: resource type :param resource_labels: collection of labels :param attribute_key: actual label key :param canonical_key: exporter specific label key, Optional :param label_value_prefix: exporter specific label value prefix, Optional
[ "Set", "a", "label", "to", "span", "that", "can", "be", "used", "for", "tracing", ".", ":", "param", "span", ":", "Span", "object", ":", "param", "resource_type", ":", "resource", "type", ":", "param", "resource_labels", ":", "collection", "of", "labels", ...
python
train
PredixDev/predixpy
predix/data/eventhub/publisher.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/data/eventhub/publisher.py#L171-L187
def add_message(self, id, body, tags=False): """ add messages to the rx_queue :param id: str message Id :param body: str the message body :param tags: dict[string->string] tags to be associated with the message :return: self """ if not tags: tags = {} try: self._tx_queue_lock.acquire() self._tx_queue.append( EventHub_pb2.Message(id=id, body=body, tags=tags, zone_id=self.eventhub_client.zone_id)) finally: self._tx_queue_lock.release() return self
[ "def", "add_message", "(", "self", ",", "id", ",", "body", ",", "tags", "=", "False", ")", ":", "if", "not", "tags", ":", "tags", "=", "{", "}", "try", ":", "self", ".", "_tx_queue_lock", ".", "acquire", "(", ")", "self", ".", "_tx_queue", ".", "...
add messages to the rx_queue :param id: str message Id :param body: str the message body :param tags: dict[string->string] tags to be associated with the message :return: self
[ "add", "messages", "to", "the", "rx_queue", ":", "param", "id", ":", "str", "message", "Id", ":", "param", "body", ":", "str", "the", "message", "body", ":", "param", "tags", ":", "dict", "[", "string", "-", ">", "string", "]", "tags", "to", "be", ...
python
train
Spinmob/spinmob
_plotting_mess.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_plotting_mess.py#L774-L803
def xy_files(xscript=0, yscript='d[1]', eyscript=None, exscript=None, paths=None, g=None, **kwargs): """ This will load a bunch of data files, generate data based on the supplied scripts, and then plot the ydata versus xdata. Parameters ---------- xscript=0 Script for x data yscript='d[1]' Script for y data eyscript=None Script for y error exscript=None Script for x error paths=None List of paths to open. g=None Optional dictionary of globals for the scripts See spinmob.plot.xy.data() for additional optional arguments. See spinmob.data.databox.execute_script() for more information about scripts. Common additional parameters ---------------------------- filters="*.*" Set the file filters for the dialog. """ return files(xscript, yscript, eyscript, exscript, plotter=xy_databoxes, paths=paths, g=g, **kwargs)
[ "def", "xy_files", "(", "xscript", "=", "0", ",", "yscript", "=", "'d[1]'", ",", "eyscript", "=", "None", ",", "exscript", "=", "None", ",", "paths", "=", "None", ",", "g", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "files", "(", "...
This will load a bunch of data files, generate data based on the supplied scripts, and then plot the ydata versus xdata. Parameters ---------- xscript=0 Script for x data yscript='d[1]' Script for y data eyscript=None Script for y error exscript=None Script for x error paths=None List of paths to open. g=None Optional dictionary of globals for the scripts See spinmob.plot.xy.data() for additional optional arguments. See spinmob.data.databox.execute_script() for more information about scripts. Common additional parameters ---------------------------- filters="*.*" Set the file filters for the dialog.
[ "This", "will", "load", "a", "bunch", "of", "data", "files", "generate", "data", "based", "on", "the", "supplied", "scripts", "and", "then", "plot", "the", "ydata", "versus", "xdata", "." ]
python
train
inasafe/inasafe
safe/gui/tools/wizard/step_fc90_analysis.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc90_analysis.py#L228-L252
def read_settings(self): """Set the IF state from QSettings.""" extent = setting('user_extent', None, str) if extent: extent = QgsGeometry.fromWkt(extent) if not extent.isGeosValid(): extent = None crs = setting('user_extent_crs', None, str) if crs: crs = QgsCoordinateReferenceSystem(crs) if not crs.isValid(): crs = None mode = setting('analysis_extents_mode', HAZARD_EXPOSURE_VIEW) if crs and extent and mode == HAZARD_EXPOSURE_BOUNDINGBOX: self.extent.set_user_extent(extent, crs) self.extent.show_rubber_bands = setting( 'showRubberBands', False, bool) self.zoom_to_impact_flag = setting('setZoomToImpactFlag', True, bool) # whether exposure layer should be hidden after model completes self.hide_exposure_flag = setting('setHideExposureFlag', False, bool)
[ "def", "read_settings", "(", "self", ")", ":", "extent", "=", "setting", "(", "'user_extent'", ",", "None", ",", "str", ")", "if", "extent", ":", "extent", "=", "QgsGeometry", ".", "fromWkt", "(", "extent", ")", "if", "not", "extent", ".", "isGeosValid",...
Set the IF state from QSettings.
[ "Set", "the", "IF", "state", "from", "QSettings", "." ]
python
train
ml4ai/delphi
delphi/AnalysisGraph.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/AnalysisGraph.py#L905-L917
def get_subgraph_for_concept_pair( self, source: str, target: str, cutoff: Optional[int] = None ): """ Get subgraph comprised of simple paths between the source and the target. Args: source target cutoff """ paths = nx.all_simple_paths(self, source, target, cutoff=cutoff) return AnalysisGraph(self.subgraph(set(chain.from_iterable(paths))))
[ "def", "get_subgraph_for_concept_pair", "(", "self", ",", "source", ":", "str", ",", "target", ":", "str", ",", "cutoff", ":", "Optional", "[", "int", "]", "=", "None", ")", ":", "paths", "=", "nx", ".", "all_simple_paths", "(", "self", ",", "source", ...
Get subgraph comprised of simple paths between the source and the target. Args: source target cutoff
[ "Get", "subgraph", "comprised", "of", "simple", "paths", "between", "the", "source", "and", "the", "target", "." ]
python
train
apache/incubator-mxnet
python/mxnet/gluon/parameter.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L476-L494
def data(self, ctx=None): """Returns a copy of this parameter on one context. Must have been initialized on this context before. For sparse parameters, use :py:meth:`Parameter.row_sparse_data` instead. Parameters ---------- ctx : Context Desired context. Returns ------- NDArray on ctx """ if self._stype != 'default': raise RuntimeError("Cannot return a copy of Parameter '%s' on ctx %s via data() " \ "because its storage type is %s. Please use row_sparse_data() " \ "instead." % (self.name, str(ctx), self._stype)) return self._check_and_get(self._data, ctx)
[ "def", "data", "(", "self", ",", "ctx", "=", "None", ")", ":", "if", "self", ".", "_stype", "!=", "'default'", ":", "raise", "RuntimeError", "(", "\"Cannot return a copy of Parameter '%s' on ctx %s via data() \"", "\"because its storage type is %s. Please use row_sparse_dat...
Returns a copy of this parameter on one context. Must have been initialized on this context before. For sparse parameters, use :py:meth:`Parameter.row_sparse_data` instead. Parameters ---------- ctx : Context Desired context. Returns ------- NDArray on ctx
[ "Returns", "a", "copy", "of", "this", "parameter", "on", "one", "context", ".", "Must", "have", "been", "initialized", "on", "this", "context", "before", ".", "For", "sparse", "parameters", "use", ":", "py", ":", "meth", ":", "Parameter", ".", "row_sparse_...
python
train
vcatalano/py-authorize
authorize/apis/authorize_api.py
https://github.com/vcatalano/py-authorize/blob/4d000b5a1ff2d8e7e955b83dab9d6c6a495c2851/authorize/apis/authorize_api.py#L53-L76
def _make_call(self, call): """Make a call to the Authorize.net server with the XML.""" try: request = urllib2.Request(self.config.environment, E.tostring(call)) request.add_header('Content-Type', 'text/xml') response = urllib2.urlopen(request).read() response = E.fromstring(response) response_json = parse_response(response) except urllib2.HTTPError: raise AuthorizeConnectionError('Error processing XML request.') # Exception handling for transaction response errors. try: error = response_json.transaction_response.errors[0] raise AuthorizeResponseError(error.error_code, error.error_text, response_json) except (KeyError, AttributeError): # Attempt to access transaction response errors pass # Throw an exception for invalid calls. This makes error handling easier. if response_json.messages[0].result_code != 'Ok': error = response_json.messages[0].message raise AuthorizeResponseError(error.code, error.text, response_json) return response_json
[ "def", "_make_call", "(", "self", ",", "call", ")", ":", "try", ":", "request", "=", "urllib2", ".", "Request", "(", "self", ".", "config", ".", "environment", ",", "E", ".", "tostring", "(", "call", ")", ")", "request", ".", "add_header", "(", "'Con...
Make a call to the Authorize.net server with the XML.
[ "Make", "a", "call", "to", "the", "Authorize", ".", "net", "server", "with", "the", "XML", "." ]
python
train
eumis/pyviews
pyviews/rendering/pipeline.py
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/rendering/pipeline.py#L98-L100
def call_set_attr(node: Node, key: str, value): """Calls node setter""" node.set_attr(key, value)
[ "def", "call_set_attr", "(", "node", ":", "Node", ",", "key", ":", "str", ",", "value", ")", ":", "node", ".", "set_attr", "(", "key", ",", "value", ")" ]
Calls node setter
[ "Calls", "node", "setter" ]
python
train
ynop/audiomate
audiomate/annotations/label_list.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/annotations/label_list.py#L242-L262
def label_values(self): """ Return a list of all occuring label values. Returns: list: Lexicographically sorted list (str) of label values. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14), >>> Label('d', 15, 18) >>> ]) >>> ll.label_values() ['a', 'b', 'c', 'd'] """ all_labels = set([l.value for l in self]) return sorted(all_labels)
[ "def", "label_values", "(", "self", ")", ":", "all_labels", "=", "set", "(", "[", "l", ".", "value", "for", "l", "in", "self", "]", ")", "return", "sorted", "(", "all_labels", ")" ]
Return a list of all occuring label values. Returns: list: Lexicographically sorted list (str) of label values. Example: >>> ll = LabelList(labels=[ >>> Label('a', 3.2, 4.5), >>> Label('b', 5.1, 8.9), >>> Label('c', 7.2, 10.5), >>> Label('d', 10.5, 14), >>> Label('d', 15, 18) >>> ]) >>> ll.label_values() ['a', 'b', 'c', 'd']
[ "Return", "a", "list", "of", "all", "occuring", "label", "values", "." ]
python
train
saltstack/salt
salt/modules/mount.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mount.py#L577-L617
def fstab(config='/etc/fstab'): ''' .. versionchanged:: 2016.3.2 List the contents of the fstab CLI Example: .. code-block:: bash salt '*' mount.fstab ''' ret = {} if not os.path.isfile(config): return ret with salt.utils.files.fopen(config) as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) try: if __grains__['kernel'] == 'SunOS': # Note: comments use in default vfstab file! if line[0] == '#': continue entry = _vfstab_entry.dict_from_line( line) else: entry = _fstab_entry.dict_from_line( line, _fstab_entry.compatibility_keys) entry['opts'] = entry['opts'].split(',') while entry['name'] in ret: entry['name'] += '_' ret[entry.pop('name')] = entry except _fstab_entry.ParseError: pass except _vfstab_entry.ParseError: pass return ret
[ "def", "fstab", "(", "config", "=", "'/etc/fstab'", ")", ":", "ret", "=", "{", "}", "if", "not", "os", ".", "path", ".", "isfile", "(", "config", ")", ":", "return", "ret", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "config", ...
.. versionchanged:: 2016.3.2 List the contents of the fstab CLI Example: .. code-block:: bash salt '*' mount.fstab
[ "..", "versionchanged", "::", "2016", ".", "3", ".", "2" ]
python
train
LuminosoInsight/langcodes
langcodes/__init__.py
https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L1162-L1229
def best_match(desired_language: {str, Language}, supported_languages: list, min_score: int=75) -> (str, int): """ You have software that supports any of the `supported_languages`. You want to use `desired_language`. This function lets you choose the right language, even if there isn't an exact match. Returns: - The best-matching language code, which will be one of the `supported_languages` or 'und' - The score of the match, from 0 to 100 `min_score` sets the minimum match score. If all languages match with a lower score than that, the result will be 'und' with a score of 0. When there is a tie for the best matching language, the first one in the tie will be used. Setting `min_score` lower will enable more things to match, at the cost of possibly mis-handling data or upsetting users. Read the documentation for :func:`tag_match_score` to understand what the numbers mean. >>> best_match('fr', ['de', 'en', 'fr']) ('fr', 100) >>> best_match('sh', ['hr', 'bs', 'sr-Latn', 'sr-Cyrl']) ('sr-Latn', 100) >>> best_match('zh-CN', ['zh-Hant', 'zh-Hans', 'gan', 'nan']) ('zh-Hans', 100) >>> best_match('zh-CN', ['cmn-Hant', 'cmn-Hans', 'gan', 'nan']) ('cmn-Hans', 100) >>> best_match('pt', ['pt-BR', 'pt-PT']) ('pt-BR', 100) >>> best_match('en-AU', ['en-GB', 'en-US']) ('en-GB', 96) >>> best_match('es-MX', ['es-ES', 'es-419', 'en-US']) ('es-419', 96) >>> best_match('es-MX', ['es-PU', 'es-AR', 'es-PY']) ('es-PU', 95) >>> best_match('es-MX', ['es-AR', 'es-PU', 'es-PY']) ('es-AR', 95) >>> best_match('zsm', ['id', 'mhp']) ('id', 86) >>> best_match('eu', ['el', 'en', 'es']) ('es', 90) >>> best_match('eu', ['el', 'en', 'es'], min_score=92) ('und', 0) """ # Quickly return if the desired language is directly supported if desired_language in supported_languages: return desired_language, 100 # Reduce the desired language to a standard form that could also match desired_language = standardize_tag(desired_language) if desired_language in supported_languages: return desired_language, 100 match_scores = [ (supported, tag_match_score(desired_language, supported)) for supported in supported_languages ] match_scores = [ (supported, score) for (supported, score) in match_scores if score >= min_score ] + [('und', 0)] match_scores.sort(key=lambda item: -item[1]) return match_scores[0]
[ "def", "best_match", "(", "desired_language", ":", "{", "str", ",", "Language", "}", ",", "supported_languages", ":", "list", ",", "min_score", ":", "int", "=", "75", ")", "->", "(", "str", ",", "int", ")", ":", "# Quickly return if the desired language is dir...
You have software that supports any of the `supported_languages`. You want to use `desired_language`. This function lets you choose the right language, even if there isn't an exact match. Returns: - The best-matching language code, which will be one of the `supported_languages` or 'und' - The score of the match, from 0 to 100 `min_score` sets the minimum match score. If all languages match with a lower score than that, the result will be 'und' with a score of 0. When there is a tie for the best matching language, the first one in the tie will be used. Setting `min_score` lower will enable more things to match, at the cost of possibly mis-handling data or upsetting users. Read the documentation for :func:`tag_match_score` to understand what the numbers mean. >>> best_match('fr', ['de', 'en', 'fr']) ('fr', 100) >>> best_match('sh', ['hr', 'bs', 'sr-Latn', 'sr-Cyrl']) ('sr-Latn', 100) >>> best_match('zh-CN', ['zh-Hant', 'zh-Hans', 'gan', 'nan']) ('zh-Hans', 100) >>> best_match('zh-CN', ['cmn-Hant', 'cmn-Hans', 'gan', 'nan']) ('cmn-Hans', 100) >>> best_match('pt', ['pt-BR', 'pt-PT']) ('pt-BR', 100) >>> best_match('en-AU', ['en-GB', 'en-US']) ('en-GB', 96) >>> best_match('es-MX', ['es-ES', 'es-419', 'en-US']) ('es-419', 96) >>> best_match('es-MX', ['es-PU', 'es-AR', 'es-PY']) ('es-PU', 95) >>> best_match('es-MX', ['es-AR', 'es-PU', 'es-PY']) ('es-AR', 95) >>> best_match('zsm', ['id', 'mhp']) ('id', 86) >>> best_match('eu', ['el', 'en', 'es']) ('es', 90) >>> best_match('eu', ['el', 'en', 'es'], min_score=92) ('und', 0)
[ "You", "have", "software", "that", "supports", "any", "of", "the", "supported_languages", ".", "You", "want", "to", "use", "desired_language", ".", "This", "function", "lets", "you", "choose", "the", "right", "language", "even", "if", "there", "isn", "t", "a...
python
train
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L2918-L2931
def complete_confirmation(self, confirmation_id, complete_dict): """ Completes an confirmation :param complete_dict: the complete dict with the template id :param confirmation_id: the confirmation id :return: Response """ return self._create_put_request( resource=CONFIRMATIONS, billomat_id=confirmation_id, command=COMPLETE, send_data=complete_dict )
[ "def", "complete_confirmation", "(", "self", ",", "confirmation_id", ",", "complete_dict", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "CONFIRMATIONS", ",", "billomat_id", "=", "confirmation_id", ",", "command", "=", "COMPLETE", "...
Completes an confirmation :param complete_dict: the complete dict with the template id :param confirmation_id: the confirmation id :return: Response
[ "Completes", "an", "confirmation" ]
python
train
schul-cloud/resources-api-v1
generators/python_client/schul_cloud_resources_api_v1/schema/__init__.py
https://github.com/schul-cloud/resources-api-v1/blob/58b2d7ba13669fa013ef81c0ffcffbf6b3fdb52d/generators/python_client/schul_cloud_resources_api_v1/schema/__init__.py#L14-L21
def _get_json_content_from_folder(folder): """yield objects from json files in the folder and subfolders.""" for dirpath, dirnames, filenames in os.walk(folder): for filename in filenames: if filename.lower().endswith(".json"): filepath = os.path.join(dirpath, filename) with open(filepath, "rb") as file: yield json.loads(file.read().decode("UTF-8"))
[ "def", "_get_json_content_from_folder", "(", "folder", ")", ":", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "folder", ")", ":", "for", "filename", "in", "filenames", ":", "if", "filename", ".", "lower", "(", ")", "...
yield objects from json files in the folder and subfolders.
[ "yield", "objects", "from", "json", "files", "in", "the", "folder", "and", "subfolders", "." ]
python
test
booktype/python-ooxml
ooxml/serialize.py
https://github.com/booktype/python-ooxml/blob/b56990a5bee2e1bc46839cec5161ff3726dc4d87/ooxml/serialize.py#L123-L157
def close_list(ctx, root): """Close already opened list if needed. This will try to see if it is needed to close already opened list. :Args: - ctx (:class:`Context`): Context object - root (Element): lxml element representing current position. :Returns: lxml element where future content should be placed. """ try: n = len(ctx.in_list) if n <= 0: return root elem = root while n > 0: while True: if elem.tag in ['ul', 'ol', 'td']: elem = elem.getparent() break elem = elem.getparent() n -= 1 ctx.in_list = [] return elem except: return None
[ "def", "close_list", "(", "ctx", ",", "root", ")", ":", "try", ":", "n", "=", "len", "(", "ctx", ".", "in_list", ")", "if", "n", "<=", "0", ":", "return", "root", "elem", "=", "root", "while", "n", ">", "0", ":", "while", "True", ":", "if", "...
Close already opened list if needed. This will try to see if it is needed to close already opened list. :Args: - ctx (:class:`Context`): Context object - root (Element): lxml element representing current position. :Returns: lxml element where future content should be placed.
[ "Close", "already", "opened", "list", "if", "needed", "." ]
python
train
yjzhang/uncurl_python
uncurl/qual2quant.py
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/qual2quant.py#L43-L49
def binarize(qualitative): """ binarizes an expression dataset. """ thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0 binarized = qualitative > thresholds.reshape((len(thresholds), 1)).repeat(8,1) return binarized.astype(int)
[ "def", "binarize", "(", "qualitative", ")", ":", "thresholds", "=", "qualitative", ".", "min", "(", "1", ")", "+", "(", "qualitative", ".", "max", "(", "1", ")", "-", "qualitative", ".", "min", "(", "1", ")", ")", "/", "2.0", "binarized", "=", "qua...
binarizes an expression dataset.
[ "binarizes", "an", "expression", "dataset", "." ]
python
train
inspirehep/inspire-crawler
inspire_crawler/models.py
https://github.com/inspirehep/inspire-crawler/blob/36d5cc0cd87cc597ba80e680b7de7254b120173a/inspire_crawler/models.py#L84-L96
def create(cls, job_id, spider, workflow, results=None, logs=None, status=JobStatus.PENDING): """Create a new entry for a scheduled crawler job.""" obj = cls( job_id=job_id, spider=spider, workflow=workflow, results=results, logs=logs, status=status, ) db.session.add(obj) return obj
[ "def", "create", "(", "cls", ",", "job_id", ",", "spider", ",", "workflow", ",", "results", "=", "None", ",", "logs", "=", "None", ",", "status", "=", "JobStatus", ".", "PENDING", ")", ":", "obj", "=", "cls", "(", "job_id", "=", "job_id", ",", "spi...
Create a new entry for a scheduled crawler job.
[ "Create", "a", "new", "entry", "for", "a", "scheduled", "crawler", "job", "." ]
python
train
google/grr
grr/core/grr_response_core/lib/plist.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/plist.py#L108-L115
def _AtNonLeaf(self, attr_value, path): """Makes dictionaries expandable when dealing with plists.""" if isinstance(attr_value, dict): for value in self.Expand(attr_value, path[1:]): yield value else: for v in objectfilter.ValueExpander._AtNonLeaf(self, attr_value, path): yield v
[ "def", "_AtNonLeaf", "(", "self", ",", "attr_value", ",", "path", ")", ":", "if", "isinstance", "(", "attr_value", ",", "dict", ")", ":", "for", "value", "in", "self", ".", "Expand", "(", "attr_value", ",", "path", "[", "1", ":", "]", ")", ":", "yi...
Makes dictionaries expandable when dealing with plists.
[ "Makes", "dictionaries", "expandable", "when", "dealing", "with", "plists", "." ]
python
train
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/navigation/__init__.py
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/navigation/__init__.py#L185-L266
def render_EPUB2(self, location): """ Creates the NCX specified file for EPUB2 """ def make_navlabel(text): """ Creates and returns a navLabel element with the supplied text. """ navlabel = etree.Element('navLabel') navlabel_text = etree.SubElement(navlabel, 'text') navlabel_text.text = text return navlabel def make_navMap(nav=None): if nav is None: nav_element = etree.Element('navMap') for nav_point in self.nav: nav_element.append(make_navMap(nav=nav_point)) else: nav_element = etree.Element('navPoint') nav_element.attrib['id'] = nav.id nav_element.attrib['playOrder'] = nav.playOrder nav_element.append(make_navlabel(nav.label)) content_element = etree.SubElement(nav_element, 'content') content_element.attrib['src'] = nav.source for child in nav.children: nav_element.append(make_navMap(nav=child)) return nav_element root = etree.XML('''\ <?xml version="1.0"?>\ <ncx version="2005-1" xmlns="http://www.daisy.org/z3986/2005/ncx/">\ <head>\ <meta name="dtb:uid" content="{uid}"/>\ <meta name="dtb:depth" content="{depth}"/>\ <meta name="dtb:totalPageCount" content="0"/>\ <meta name="dtb:maxPageNumber" content="0"/>\ <meta name="dtb:generator" content="OpenAccess_EPUB {version}"/>\ </head>\ </ncx>'''.format(**{'uid': ','.join(self.all_dois), 'depth': self.nav_depth, 'version': __version__})) document = etree.ElementTree(root) ncx = document.getroot() #Create the docTitle element doctitle = etree.SubElement(ncx, 'docTitle') doctitle_text = etree.SubElement(doctitle, 'text') doctitle_text.text = self.title #Create the docAuthor elements for contributor in self.contributors: if contributor.role == 'author': docauthor = etree.SubElement(ncx, 'docAuthor') docauthor_text = etree.SubElement(docauthor, 'text') docauthor_text.text = contributor.name #Create the navMap element ncx.append(make_navMap()) if self.figures_list: navlist = etree.SubElement(ncx, 'navList') navlist.append(make_navlabel('List of Figures')) for nav_pt in self.figures_list: navtarget = etree.SubElement(navlist, 'navTarget') navtarget.attrib['id'] = nav_pt.id navtarget.append(self.make_navlabel(nav_pt.label)) content = etree.SubElement(navtarget, 'content') content.attrib['src'] = nav_pt.source if self.tables_list: navlist = etree.SubElement(ncx, 'navList') navlist.append(make_navlabel('List of Tables')) for nav_pt in self.tables_list: navtarget = etree.SubElement(navlist, 'navTarget') navtarget.attrib['id'] = nav_pt.id navtarget.append(self.make_navlabel(nav_pt.label)) content = etree.SubElement(navtarget, 'content') content.attrib['src'] = nav_pt.source with open(os.path.join(location, 'EPUB', 'toc.ncx'), 'wb') as output: output.write(etree.tostring(document, encoding='utf-8', pretty_print=True))
[ "def", "render_EPUB2", "(", "self", ",", "location", ")", ":", "def", "make_navlabel", "(", "text", ")", ":", "\"\"\"\n Creates and returns a navLabel element with the supplied text.\n \"\"\"", "navlabel", "=", "etree", ".", "Element", "(", "'navLabel'...
Creates the NCX specified file for EPUB2
[ "Creates", "the", "NCX", "specified", "file", "for", "EPUB2" ]
python
train
CalebBell/ht
ht/hx.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/hx.py#L557-L604
def calc_Cmin(mh, mc, Cph, Cpc): r'''Returns the heat capacity rate for the minimum stream having flows `mh` and `mc`, with averaged heat capacities `Cph` and `Cpc`. .. math:: C_c = m_cC_{p,c} C_h = m_h C_{p,h} C_{min} = \min(C_c, C_h) Parameters ---------- mh : float Mass flow rate of hot stream, [kg/s] mc : float Mass flow rate of cold stream, [kg/s] Cph : float Averaged heat capacity of hot stream, [J/kg/K] Cpc : float Averaged heat capacity of cold stream, [J/kg/K] Returns ------- Cmin : float The heat capacity rate of the smaller fluid, [W/K] Notes ----- Used with the effectiveness method for heat exchanger design. Technically, it doesn't matter if the hot and cold streams are in the right order for the input, but it is easiest to use this function when the order is specified. Examples -------- >>> calc_Cmin(mh=22., mc=5.5, Cph=2200, Cpc=4400.) 24200.0 References ---------- .. [1] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ: Wiley, 2011. ''' Ch = mh*Cph Cc = mc*Cpc return min(Ch, Cc)
[ "def", "calc_Cmin", "(", "mh", ",", "mc", ",", "Cph", ",", "Cpc", ")", ":", "Ch", "=", "mh", "*", "Cph", "Cc", "=", "mc", "*", "Cpc", "return", "min", "(", "Ch", ",", "Cc", ")" ]
r'''Returns the heat capacity rate for the minimum stream having flows `mh` and `mc`, with averaged heat capacities `Cph` and `Cpc`. .. math:: C_c = m_cC_{p,c} C_h = m_h C_{p,h} C_{min} = \min(C_c, C_h) Parameters ---------- mh : float Mass flow rate of hot stream, [kg/s] mc : float Mass flow rate of cold stream, [kg/s] Cph : float Averaged heat capacity of hot stream, [J/kg/K] Cpc : float Averaged heat capacity of cold stream, [J/kg/K] Returns ------- Cmin : float The heat capacity rate of the smaller fluid, [W/K] Notes ----- Used with the effectiveness method for heat exchanger design. Technically, it doesn't matter if the hot and cold streams are in the right order for the input, but it is easiest to use this function when the order is specified. Examples -------- >>> calc_Cmin(mh=22., mc=5.5, Cph=2200, Cpc=4400.) 24200.0 References ---------- .. [1] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ: Wiley, 2011.
[ "r", "Returns", "the", "heat", "capacity", "rate", "for", "the", "minimum", "stream", "having", "flows", "mh", "and", "mc", "with", "averaged", "heat", "capacities", "Cph", "and", "Cpc", "." ]
python
train
Erotemic/utool
utool/util_list.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1477-L1507
def sortedby(item_list, key_list, reverse=False): """ sorts ``item_list`` using key_list Args: list_ (list): list to sort key_list (list): list to sort by reverse (bool): sort order is descending (largest first) if reverse is True else acscending (smallest first) Returns: list : ``list_`` sorted by the values of another ``list``. defaults to ascending order SeeAlso: sortedby2 Examples: >>> # ENABLE_DOCTEST >>> import utool >>> list_ = [1, 2, 3, 4, 5] >>> key_list = [2, 5, 3, 1, 5] >>> result = utool.sortedby(list_, key_list, reverse=True) >>> print(result) [5, 2, 3, 1, 4] """ assert len(item_list) == len(key_list), ( 'Expected same len. Got: %r != %r' % (len(item_list), len(key_list))) sorted_list = [item for (key, item) in sorted(list(zip(key_list, item_list)), reverse=reverse)] return sorted_list
[ "def", "sortedby", "(", "item_list", ",", "key_list", ",", "reverse", "=", "False", ")", ":", "assert", "len", "(", "item_list", ")", "==", "len", "(", "key_list", ")", ",", "(", "'Expected same len. Got: %r != %r'", "%", "(", "len", "(", "item_list", ")",...
sorts ``item_list`` using key_list Args: list_ (list): list to sort key_list (list): list to sort by reverse (bool): sort order is descending (largest first) if reverse is True else acscending (smallest first) Returns: list : ``list_`` sorted by the values of another ``list``. defaults to ascending order SeeAlso: sortedby2 Examples: >>> # ENABLE_DOCTEST >>> import utool >>> list_ = [1, 2, 3, 4, 5] >>> key_list = [2, 5, 3, 1, 5] >>> result = utool.sortedby(list_, key_list, reverse=True) >>> print(result) [5, 2, 3, 1, 4]
[ "sorts", "item_list", "using", "key_list" ]
python
train
atbaker/imgur-uploader
imgur_uploader.py
https://github.com/atbaker/imgur-uploader/blob/4e663265c18b53a1d178fb2cfd569a75e2efea5b/imgur_uploader.py#L8-L24
def upload_gif(gif): """Uploads an image file to Imgur""" client_id = os.environ.get('IMGUR_API_ID') client_secret = os.environ.get('IMGUR_API_SECRET') if client_id is None or client_secret is None: click.echo('Cannot upload - could not find IMGUR_API_ID or IMGUR_API_SECRET environment variables') return client = ImgurClient(client_id, client_secret) click.echo('Uploading file {}'.format(click.format_filename(gif))) response = client.upload_from_path(gif) click.echo('File uploaded - see your gif at {}'.format(response['link']))
[ "def", "upload_gif", "(", "gif", ")", ":", "client_id", "=", "os", ".", "environ", ".", "get", "(", "'IMGUR_API_ID'", ")", "client_secret", "=", "os", ".", "environ", ".", "get", "(", "'IMGUR_API_SECRET'", ")", "if", "client_id", "is", "None", "or", "cli...
Uploads an image file to Imgur
[ "Uploads", "an", "image", "file", "to", "Imgur" ]
python
valid
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py#L1241-L1258
def hide_routemap_holder_route_map_content_continue_holder_cont(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") continue_holder = ET.SubElement(content, "continue-holder") cont = ET.SubElement(continue_holder, "continue") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "hide_routemap_holder_route_map_content_continue_holder_cont", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "hide_routemap_holder", "=", "ET", ".", "SubElement", "(", "config", ",", "\"hide-rou...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
mozilla/crontabber
crontabber/connection_factory.py
https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/connection_factory.py#L129-L143
def close_connection(self, connection, force=False): """overriding the baseclass function, this routine will decline to close a connection at the end of a transaction context. This allows for reuse of connections.""" if force: try: connection.close() except self.operational_exceptions: self.config.logger.error('ConnectionFactory - failed closing') for name, conn in self.pool.iteritems(): if conn is connection: break del self.pool[name] else: pass
[ "def", "close_connection", "(", "self", ",", "connection", ",", "force", "=", "False", ")", ":", "if", "force", ":", "try", ":", "connection", ".", "close", "(", ")", "except", "self", ".", "operational_exceptions", ":", "self", ".", "config", ".", "logg...
overriding the baseclass function, this routine will decline to close a connection at the end of a transaction context. This allows for reuse of connections.
[ "overriding", "the", "baseclass", "function", "this", "routine", "will", "decline", "to", "close", "a", "connection", "at", "the", "end", "of", "a", "transaction", "context", ".", "This", "allows", "for", "reuse", "of", "connections", "." ]
python
train
casebeer/factual
factual/v2/responses.py
https://github.com/casebeer/factual/blob/f2795a8c9fd447c5d62887ae0f960481ce13be84/factual/v2/responses.py#L20-L24
def records(self): '''Return a list of dicts corresponding to the data returned by Factual.''' if self._records == None: self._records = self._get_records() return self._records
[ "def", "records", "(", "self", ")", ":", "if", "self", ".", "_records", "==", "None", ":", "self", ".", "_records", "=", "self", ".", "_get_records", "(", ")", "return", "self", ".", "_records" ]
Return a list of dicts corresponding to the data returned by Factual.
[ "Return", "a", "list", "of", "dicts", "corresponding", "to", "the", "data", "returned", "by", "Factual", "." ]
python
train
quodlibet/mutagen
mutagen/id3/_tags.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/id3/_tags.py#L117-L167
def determine_bpi(data, frames, EMPTY=b"\x00" * 10): """Takes id3v2.4 frame data and determines if ints or bitpaddedints should be used for parsing. Needed because iTunes used to write normal ints for frame sizes. """ # count number of tags found as BitPaddedInt and how far past o = 0 asbpi = 0 while o < len(data) - 10: part = data[o:o + 10] if part == EMPTY: bpioff = -((len(data) - o) % 10) break name, size, flags = struct.unpack('>4sLH', part) size = BitPaddedInt(size) o += 10 + size if PY3: try: name = name.decode("ascii") except UnicodeDecodeError: continue if name in frames: asbpi += 1 else: bpioff = o - len(data) # count number of tags found as int and how far past o = 0 asint = 0 while o < len(data) - 10: part = data[o:o + 10] if part == EMPTY: intoff = -((len(data) - o) % 10) break name, size, flags = struct.unpack('>4sLH', part) o += 10 + size if PY3: try: name = name.decode("ascii") except UnicodeDecodeError: continue if name in frames: asint += 1 else: intoff = o - len(data) # if more tags as int, or equal and bpi is past and int is not if asint > asbpi or (asint == asbpi and (bpioff >= 1 and intoff <= 1)): return int return BitPaddedInt
[ "def", "determine_bpi", "(", "data", ",", "frames", ",", "EMPTY", "=", "b\"\\x00\"", "*", "10", ")", ":", "# count number of tags found as BitPaddedInt and how far past", "o", "=", "0", "asbpi", "=", "0", "while", "o", "<", "len", "(", "data", ")", "-", "10"...
Takes id3v2.4 frame data and determines if ints or bitpaddedints should be used for parsing. Needed because iTunes used to write normal ints for frame sizes.
[ "Takes", "id3v2", ".", "4", "frame", "data", "and", "determines", "if", "ints", "or", "bitpaddedints", "should", "be", "used", "for", "parsing", ".", "Needed", "because", "iTunes", "used", "to", "write", "normal", "ints", "for", "frame", "sizes", "." ]
python
train
resync/resync
resync/hashes.py
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/hashes.py#L64-L83
def compute_for_file(self, file, block_size=2**14): """Compute hash digests for a file. Calculate the hashes based on one read through the file. Optional block_size parameter controls memory used to do calculations. This should be a multiple of 128 bytes. """ self.initialize_hashes() f = open(file, 'rb') while True: data = f.read(block_size) if not data: break if (self.md5_calc is not None): self.md5_calc.update(data) if (self.sha1_calc is not None): self.sha1_calc.update(data) if (self.sha256_calc is not None): self.sha256_calc.update(data) f.close()
[ "def", "compute_for_file", "(", "self", ",", "file", ",", "block_size", "=", "2", "**", "14", ")", ":", "self", ".", "initialize_hashes", "(", ")", "f", "=", "open", "(", "file", ",", "'rb'", ")", "while", "True", ":", "data", "=", "f", ".", "read"...
Compute hash digests for a file. Calculate the hashes based on one read through the file. Optional block_size parameter controls memory used to do calculations. This should be a multiple of 128 bytes.
[ "Compute", "hash", "digests", "for", "a", "file", "." ]
python
train
inveniosoftware/invenio-migrator
invenio_migrator/cli.py
https://github.com/inveniosoftware/invenio-migrator/blob/6902c6968a39b747d15e32363f43b7dffe2622c2/invenio_migrator/cli.py#L44-L60
def _loadrecord(record_dump, source_type, eager=False): """Load a single record into the database. :param record_dump: Record dump. :type record_dump: dict :param source_type: 'json' or 'marcxml' :param eager: If ``True`` execute the task synchronously. """ if eager: import_record.s(record_dump, source_type=source_type).apply(throw=True) elif current_migrator.records_post_task: chain( import_record.s(record_dump, source_type=source_type), current_migrator.records_post_task.s() )() else: import_record.delay(record_dump, source_type=source_type)
[ "def", "_loadrecord", "(", "record_dump", ",", "source_type", ",", "eager", "=", "False", ")", ":", "if", "eager", ":", "import_record", ".", "s", "(", "record_dump", ",", "source_type", "=", "source_type", ")", ".", "apply", "(", "throw", "=", "True", "...
Load a single record into the database. :param record_dump: Record dump. :type record_dump: dict :param source_type: 'json' or 'marcxml' :param eager: If ``True`` execute the task synchronously.
[ "Load", "a", "single", "record", "into", "the", "database", "." ]
python
test
wbond/certvalidator
certvalidator/validate.py
https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/validate.py#L1784-L1839
def _find_cert_in_list(cert, issuer, certificate_list, crl_issuer): """ Looks for a cert in the list of revoked certificates :param cert: An asn1crypto.x509.Certificate object of the cert being checked :param issuer: An asn1crypto.x509.Certificate object of the cert issuer :param certificate_list: An ans1crypto.crl.CertificateList object to look in for the cert :param crl_issuer: An asn1crypto.x509.Certificate object of the CRL issuer :return: A tuple of (None, None) if not present, otherwise a tuple of (asn1crypto.x509.Time object, asn1crypto.crl.CRLReason object) representing the date/time the object was revoked and why """ revoked_certificates = certificate_list['tbs_cert_list']['revoked_certificates'] cert_serial = cert.serial_number issuer_name = issuer.subject known_extensions = set([ 'crl_reason', 'hold_instruction_code', 'invalidity_date', 'certificate_issuer' ]) last_issuer_name = crl_issuer.subject for revoked_cert in revoked_certificates: # If any unknown critical extensions, the entry can not be used if revoked_cert.critical_extensions - known_extensions: raise NotImplementedError() if revoked_cert.issuer_name and revoked_cert.issuer_name != last_issuer_name: last_issuer_name = revoked_cert.issuer_name if last_issuer_name != issuer_name: continue if revoked_cert['user_certificate'].native != cert_serial: continue if not revoked_cert.crl_reason_value: crl_reason = crl.CRLReason('unspecified') else: crl_reason = revoked_cert.crl_reason_value return (revoked_cert['revocation_date'], crl_reason) return (None, None)
[ "def", "_find_cert_in_list", "(", "cert", ",", "issuer", ",", "certificate_list", ",", "crl_issuer", ")", ":", "revoked_certificates", "=", "certificate_list", "[", "'tbs_cert_list'", "]", "[", "'revoked_certificates'", "]", "cert_serial", "=", "cert", ".", "serial_...
Looks for a cert in the list of revoked certificates :param cert: An asn1crypto.x509.Certificate object of the cert being checked :param issuer: An asn1crypto.x509.Certificate object of the cert issuer :param certificate_list: An ans1crypto.crl.CertificateList object to look in for the cert :param crl_issuer: An asn1crypto.x509.Certificate object of the CRL issuer :return: A tuple of (None, None) if not present, otherwise a tuple of (asn1crypto.x509.Time object, asn1crypto.crl.CRLReason object) representing the date/time the object was revoked and why
[ "Looks", "for", "a", "cert", "in", "the", "list", "of", "revoked", "certificates" ]
python
train
ly0/baidupcsapi
baidupcsapi/api.py
https://github.com/ly0/baidupcsapi/blob/6f6feeef0767a75b3b968924727460eb09242d76/baidupcsapi/api.py#L1443-L1465
def add_download_task(self, source_url, remote_path, selected_idx=(), **kwargs): """ :param source_url: 离线下载目标的URL :param remote_path: 欲保存到百度网盘的目录, 注意以 / 打头 :param selected_idx: 在 BT 或者磁力链的下载类型中, 选择哪些idx下载, 不填写为全部 添加离线任务,支持所有百度网盘支持的类型 """ if source_url.startswith('magnet:?'): print('Magnet: "%s"' % source_url) return self.add_magnet_task(source_url, remote_path, selected_idx, **kwargs) elif source_url.endswith('.torrent'): print('BitTorrent: "%s"' % source_url) return self.add_torrent_task(source_url, remote_path, selected_idx, **kwargs) else: print('Others: "%s"' % source_url) data = { 'method': 'add_task', 'source_url': source_url, 'save_path': remote_path, } url = 'http://{0}/rest/2.0/services/cloud_dl'.format(BAIDUPAN_SERVER) return self._request('services/cloud_dl', 'add_task', url=url, data=data, **kwargs)
[ "def", "add_download_task", "(", "self", ",", "source_url", ",", "remote_path", ",", "selected_idx", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "if", "source_url", ".", "startswith", "(", "'magnet:?'", ")", ":", "print", "(", "'Magnet: \"%s\"'", "%"...
:param source_url: 离线下载目标的URL :param remote_path: 欲保存到百度网盘的目录, 注意以 / 打头 :param selected_idx: 在 BT 或者磁力链的下载类型中, 选择哪些idx下载, 不填写为全部 添加离线任务,支持所有百度网盘支持的类型
[ ":", "param", "source_url", ":", "离线下载目标的URL", ":", "param", "remote_path", ":", "欲保存到百度网盘的目录", "注意以", "/", "打头", ":", "param", "selected_idx", ":", "在", "BT", "或者磁力链的下载类型中", "选择哪些idx下载", "不填写为全部", "添加离线任务,支持所有百度网盘支持的类型" ]
python
train
PythonSanSebastian/docstamp
docstamp/template.py
https://github.com/PythonSanSebastian/docstamp/blob/b43808f2e15351b0b2f0b7eade9c7ef319c9e646/docstamp/template.py#L142-L163
def save_content(self, file_path, encoding='utf-8'): """ Save the content of the .txt file in a text file. Parameters ---------- file_path: str Path to the output file. """ if self.file_content_ is None: msg = 'Template content has not been updated. \ Please fill the template before rendering it.' log.exception(msg) raise ValueError(msg) try: write_to_file(file_path, content=self.file_content_, encoding=encoding) except Exception as exc: msg = 'Document of type {} got an error when \ writing content.'.format(self.__class__) log.exception(msg) raise Exception(msg) from exc
[ "def", "save_content", "(", "self", ",", "file_path", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "self", ".", "file_content_", "is", "None", ":", "msg", "=", "'Template content has not been updated. \\\n Please fill the template before rendering it.'...
Save the content of the .txt file in a text file. Parameters ---------- file_path: str Path to the output file.
[ "Save", "the", "content", "of", "the", ".", "txt", "file", "in", "a", "text", "file", "." ]
python
test
adamrehn/ue4cli
ue4cli/ThirdPartyLibraryDetails.py
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/ThirdPartyLibraryDetails.py#L112-L116
def getLibraryFiles(self, engineRoot, delimiter=' '): """ Returns the list of library files for this library, joined using the specified delimiter """ return delimiter.join(self.resolveRoot(self.libs, engineRoot))
[ "def", "getLibraryFiles", "(", "self", ",", "engineRoot", ",", "delimiter", "=", "' '", ")", ":", "return", "delimiter", ".", "join", "(", "self", ".", "resolveRoot", "(", "self", ".", "libs", ",", "engineRoot", ")", ")" ]
Returns the list of library files for this library, joined using the specified delimiter
[ "Returns", "the", "list", "of", "library", "files", "for", "this", "library", "joined", "using", "the", "specified", "delimiter" ]
python
train
python-astrodynamics/spacetrack
spacetrack/aio.py
https://github.com/python-astrodynamics/spacetrack/blob/18f63b7de989a31b983d140a11418e01bd6fd398/spacetrack/aio.py#L70-L213
async def generic_request(self, class_, iter_lines=False, iter_content=False, controller=None, parse_types=False, **kwargs): """Generic Space-Track query coroutine. The request class methods use this method internally; the public API is as follows: .. code-block:: python st.tle_publish(*args, **st) st.basicspacedata.tle_publish(*args, **st) st.file(*args, **st) st.fileshare.file(*args, **st) st.spephemeris.file(*args, **st) They resolve to the following calls respectively: .. code-block:: python st.generic_request('tle_publish', *args, **st) st.generic_request('tle_publish', *args, controller='basicspacedata', **st) st.generic_request('file', *args, **st) st.generic_request('file', *args, controller='fileshare', **st) st.generic_request('file', *args, controller='spephemeris', **st) Parameters: class_: Space-Track request class name iter_lines: Yield result line by line iter_content: Yield result in 100 KiB chunks. controller: Optionally specify request controller to use. parse_types: Parse string values in response according to type given in predicate information, e.g. ``'2017-01-01'`` -> ``datetime.date(2017, 1, 1)``. **kwargs: These keywords must match the predicate fields on Space-Track. You may check valid keywords with the following snippet: .. code-block:: python spacetrack = AsyncSpaceTrackClient(...) await spacetrack.tle.get_predicates() # or await spacetrack.get_predicates('tle') See :func:`~spacetrack.operators._stringify_predicate_value` for which Python objects are converted appropriately. Yields: Lines—stripped of newline characters—if ``iter_lines=True`` Yields: 100 KiB chunks if ``iter_content=True`` Returns: Parsed JSON object, unless ``format`` keyword argument is passed. .. warning:: Passing ``format='json'`` will return the JSON **unparsed**. Do not set ``format`` if you want the parsed JSON object returned! """ if iter_lines and iter_content: raise ValueError('iter_lines and iter_content cannot both be True') if 'format' in kwargs and parse_types: raise ValueError('parse_types can only be used if format is unset.') if controller is None: controller = self._find_controller(class_) else: classes = self.request_controllers.get(controller, None) if classes is None: raise ValueError( 'Unknown request controller {!r}'.format(controller)) if class_ not in classes: raise ValueError( 'Unknown request class {!r} for controller {!r}' .format(class_, controller)) # Decode unicode unless class == download, including conversion of # CRLF newlines to LF. decode = (class_ != 'download') if not decode and iter_lines: error = ( 'iter_lines disabled for binary data, since CRLF newlines ' 'split over chunk boundaries would yield extra blank lines. ' 'Use iter_content=True instead.') raise ValueError(error) await self.authenticate() url = ('{0}{1}/query/class/{2}' .format(self.base_url, controller, class_)) offline_check = (class_, controller) in self.offline_predicates valid_fields = {p.name for p in self.rest_predicates} predicates = None if not offline_check: predicates = await self.get_predicates(class_) predicate_fields = {p.name for p in predicates} valid_fields = predicate_fields | {p.name for p in self.rest_predicates} else: valid_fields |= self.offline_predicates[(class_, controller)] for key, value in kwargs.items(): if key not in valid_fields: raise TypeError( "'{class_}' got an unexpected argument '{key}'" .format(class_=class_, key=key)) value = _stringify_predicate_value(value) url += '/{key}/{value}'.format(key=key, value=value) logger.debug(url) resp = await self._ratelimited_get(url) await _raise_for_status(resp) if iter_lines: return _AsyncLineIterator(resp, decode_unicode=decode) elif iter_content: return _AsyncChunkIterator(resp, decode_unicode=decode) else: # If format is specified, return that format unparsed. Otherwise, # parse the default JSON response. if 'format' in kwargs: if decode: # Replace CRLF newlines with LF, Python will handle platform # specific newlines if written to file. data = await resp.text() data = data.replace('\r', '') else: data = await resp.read() return data else: data = await resp.json() if predicates is None or not parse_types: return data else: return self._parse_types(data, predicates)
[ "async", "def", "generic_request", "(", "self", ",", "class_", ",", "iter_lines", "=", "False", ",", "iter_content", "=", "False", ",", "controller", "=", "None", ",", "parse_types", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "iter_lines", "a...
Generic Space-Track query coroutine. The request class methods use this method internally; the public API is as follows: .. code-block:: python st.tle_publish(*args, **st) st.basicspacedata.tle_publish(*args, **st) st.file(*args, **st) st.fileshare.file(*args, **st) st.spephemeris.file(*args, **st) They resolve to the following calls respectively: .. code-block:: python st.generic_request('tle_publish', *args, **st) st.generic_request('tle_publish', *args, controller='basicspacedata', **st) st.generic_request('file', *args, **st) st.generic_request('file', *args, controller='fileshare', **st) st.generic_request('file', *args, controller='spephemeris', **st) Parameters: class_: Space-Track request class name iter_lines: Yield result line by line iter_content: Yield result in 100 KiB chunks. controller: Optionally specify request controller to use. parse_types: Parse string values in response according to type given in predicate information, e.g. ``'2017-01-01'`` -> ``datetime.date(2017, 1, 1)``. **kwargs: These keywords must match the predicate fields on Space-Track. You may check valid keywords with the following snippet: .. code-block:: python spacetrack = AsyncSpaceTrackClient(...) await spacetrack.tle.get_predicates() # or await spacetrack.get_predicates('tle') See :func:`~spacetrack.operators._stringify_predicate_value` for which Python objects are converted appropriately. Yields: Lines—stripped of newline characters—if ``iter_lines=True`` Yields: 100 KiB chunks if ``iter_content=True`` Returns: Parsed JSON object, unless ``format`` keyword argument is passed. .. warning:: Passing ``format='json'`` will return the JSON **unparsed**. Do not set ``format`` if you want the parsed JSON object returned!
[ "Generic", "Space", "-", "Track", "query", "coroutine", "." ]
python
train
odlgroup/odl
odl/contrib/datasets/mri/tugraz.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/mri/tugraz.py#L130-L158
def mri_head_reco_op_32_channel(): """Reconstruction operator for 32 channel MRI of a head. This is a T2 weighted TSE scan of a healthy volunteer. The reconstruction operator is the sum of the modulus of each channel. See the data source with DOI `10.5281/zenodo.800527`_ or the `project webpage`_ for further information. See Also -------- mri_head_data_32_channel References ---------- .. _10.5281/zenodo.800529: https://zenodo.org/record/800527 .. _project webpage: http://imsc.uni-graz.at/mobis/internal/\ platform_aktuell.html """ # To get the same rotation as in the reference article space = odl.uniform_discr(min_pt=[-115.2, -115.2], max_pt=[115.2, 115.2], shape=[256, 256], dtype=complex) trafo = odl.trafos.FourierTransform(space) return odl.ReductionOperator(odl.ComplexModulus(space) * trafo.inverse, 32)
[ "def", "mri_head_reco_op_32_channel", "(", ")", ":", "# To get the same rotation as in the reference article", "space", "=", "odl", ".", "uniform_discr", "(", "min_pt", "=", "[", "-", "115.2", ",", "-", "115.2", "]", ",", "max_pt", "=", "[", "115.2", ",", "115.2...
Reconstruction operator for 32 channel MRI of a head. This is a T2 weighted TSE scan of a healthy volunteer. The reconstruction operator is the sum of the modulus of each channel. See the data source with DOI `10.5281/zenodo.800527`_ or the `project webpage`_ for further information. See Also -------- mri_head_data_32_channel References ---------- .. _10.5281/zenodo.800529: https://zenodo.org/record/800527 .. _project webpage: http://imsc.uni-graz.at/mobis/internal/\ platform_aktuell.html
[ "Reconstruction", "operator", "for", "32", "channel", "MRI", "of", "a", "head", "." ]
python
train
nuSTORM/gnomon
gnomon/Graph.py
https://github.com/nuSTORM/gnomon/blob/7616486ecd6e26b76f677c380e62db1c0ade558a/gnomon/Graph.py#L29-L40
def CreateVertices(self, points): """ Returns a dictionary object with keys that are 2tuples represnting a point. """ gr = digraph() for z, x, Q in points: node = (z, x, Q) gr.add_nodes([node]) return gr
[ "def", "CreateVertices", "(", "self", ",", "points", ")", ":", "gr", "=", "digraph", "(", ")", "for", "z", ",", "x", ",", "Q", "in", "points", ":", "node", "=", "(", "z", ",", "x", ",", "Q", ")", "gr", ".", "add_nodes", "(", "[", "node", "]",...
Returns a dictionary object with keys that are 2tuples represnting a point.
[ "Returns", "a", "dictionary", "object", "with", "keys", "that", "are", "2tuples", "represnting", "a", "point", "." ]
python
train
MAVENSDC/cdflib
cdflib/cdfread.py
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfread.py#L177-L239
def varinq(self, variable): """ Returns a dictionary that shows the basic variable information. This information includes +-----------------+--------------------------------------------------------------------------------+ | ['Variable'] | the name of the variable | +-----------------+--------------------------------------------------------------------------------+ | ['Num'] | the variable number | +-----------------+--------------------------------------------------------------------------------+ | ['Var_Type'] | the variable type: zVariable or rVariable | +-----------------+--------------------------------------------------------------------------------+ | ['Data_Type'] | the variable's CDF data type | +-----------------+--------------------------------------------------------------------------------+ | ['Num_Elements']| the number of elements of the variable | +-----------------+--------------------------------------------------------------------------------+ | ['Num_Dims'] | the dimensionality of the variable record | +-----------------+--------------------------------------------------------------------------------+ | ['Dim_Sizes'] | the shape of the variable record | +-----------------+--------------------------------------------------------------------------------+ | ['Sparse'] | the variable's record sparseness | +-----------------+--------------------------------------------------------------------------------+ | ['Last_Rec'] | the maximum written record number (0-based) | +-----------------+--------------------------------------------------------------------------------+ | ['Dim_Vary'] | the dimensional variance(s) | +-----------------+--------------------------------------------------------------------------------+ | ['Rec_Vary'] | the record variance | +-----------------+--------------------------------------------------------------------------------+ | ['Pad'] | the padded value if set | +-----------------+--------------------------------------------------------------------------------+ | ['Compress'] | the GZIP compression level, 0 to 9. 0 if not compressed | +-----------------+--------------------------------------------------------------------------------+ | ['Block_Factor']| the blocking factor if the variable is compressed | +-----------------+--------------------------------------------------------------------------------+ Parameters ---------- variable : """ vdr_info = self.varget(variable=variable, inq=True) if vdr_info is None: raise KeyError("Variable {} not found.".format(variable)) var = {} var['Variable'] = vdr_info['name'] var['Num'] = vdr_info['variable_number'] var['Var_Type'] = CDF._variable_token(vdr_info['section_type']) var['Data_Type'] = vdr_info['data_type'] var['Data_Type_Description'] = CDF._datatype_token(vdr_info['data_type']) var['Num_Elements'] = vdr_info['num_elements'] var['Num_Dims'] = vdr_info['num_dims'] var['Dim_Sizes'] = vdr_info['dim_sizes'] var['Sparse'] = CDF._sparse_token(vdr_info['sparse']) var['Last_Rec'] = vdr_info['max_records'] var['Rec_Vary'] = vdr_info['record_vary'] var['Dim_Vary'] = vdr_info['dim_vary'] if ('pad' in vdr_info): var['Pad'] = vdr_info['pad'] var['Compress'] = vdr_info['compression_level'] if ('blocking_factor' in vdr_info): var['Block_Factor'] = vdr_info['blocking_factor'] return var
[ "def", "varinq", "(", "self", ",", "variable", ")", ":", "vdr_info", "=", "self", ".", "varget", "(", "variable", "=", "variable", ",", "inq", "=", "True", ")", "if", "vdr_info", "is", "None", ":", "raise", "KeyError", "(", "\"Variable {} not found.\"", ...
Returns a dictionary that shows the basic variable information. This information includes +-----------------+--------------------------------------------------------------------------------+ | ['Variable'] | the name of the variable | +-----------------+--------------------------------------------------------------------------------+ | ['Num'] | the variable number | +-----------------+--------------------------------------------------------------------------------+ | ['Var_Type'] | the variable type: zVariable or rVariable | +-----------------+--------------------------------------------------------------------------------+ | ['Data_Type'] | the variable's CDF data type | +-----------------+--------------------------------------------------------------------------------+ | ['Num_Elements']| the number of elements of the variable | +-----------------+--------------------------------------------------------------------------------+ | ['Num_Dims'] | the dimensionality of the variable record | +-----------------+--------------------------------------------------------------------------------+ | ['Dim_Sizes'] | the shape of the variable record | +-----------------+--------------------------------------------------------------------------------+ | ['Sparse'] | the variable's record sparseness | +-----------------+--------------------------------------------------------------------------------+ | ['Last_Rec'] | the maximum written record number (0-based) | +-----------------+--------------------------------------------------------------------------------+ | ['Dim_Vary'] | the dimensional variance(s) | +-----------------+--------------------------------------------------------------------------------+ | ['Rec_Vary'] | the record variance | +-----------------+--------------------------------------------------------------------------------+ | ['Pad'] | the padded value if set | +-----------------+--------------------------------------------------------------------------------+ | ['Compress'] | the GZIP compression level, 0 to 9. 0 if not compressed | +-----------------+--------------------------------------------------------------------------------+ | ['Block_Factor']| the blocking factor if the variable is compressed | +-----------------+--------------------------------------------------------------------------------+ Parameters ---------- variable :
[ "Returns", "a", "dictionary", "that", "shows", "the", "basic", "variable", "information", "." ]
python
train
psss/did
did/plugins/jira.py
https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/plugins/jira.py#L115-L126
def updated(self, user, options): """ True if the issue was commented by given user """ for comment in self.comments: created = dateutil.parser.parse(comment["created"]).date() try: if (comment["author"]["emailAddress"] == user.email and created >= options.since.date and created < options.until.date): return True except KeyError: pass return False
[ "def", "updated", "(", "self", ",", "user", ",", "options", ")", ":", "for", "comment", "in", "self", ".", "comments", ":", "created", "=", "dateutil", ".", "parser", ".", "parse", "(", "comment", "[", "\"created\"", "]", ")", ".", "date", "(", ")", ...
True if the issue was commented by given user
[ "True", "if", "the", "issue", "was", "commented", "by", "given", "user" ]
python
train
tylucaskelley/licenser
licenser/licenser.py
https://github.com/tylucaskelley/licenser/blob/6b7394fdaab7707c4c33201c4d023097452b46bc/licenser/licenser.py#L172-L189
def get_license(name): ''' Returns the closest match to the requested license. Arguments: - name (str) License to use Returns: - (str) License that most closely matches the 'name' parameter ''' filenames = os.listdir(cwd + licenses_loc) licenses = dict(zip(filenames, [-1] * len(filenames))) for l in licenses: licenses[l] = compute_distance(name, l) return min(licenses, key=(lambda k: licenses[k]))
[ "def", "get_license", "(", "name", ")", ":", "filenames", "=", "os", ".", "listdir", "(", "cwd", "+", "licenses_loc", ")", "licenses", "=", "dict", "(", "zip", "(", "filenames", ",", "[", "-", "1", "]", "*", "len", "(", "filenames", ")", ")", ")", ...
Returns the closest match to the requested license. Arguments: - name (str) License to use Returns: - (str) License that most closely matches the 'name' parameter
[ "Returns", "the", "closest", "match", "to", "the", "requested", "license", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2618-L2671
def masked_within_block_local_attention_1d(q, k, v, block_length=64, name=None): """Attention to the source and a neighborhood to the left within a block. The sequence is divided into blocks of length block_length. Attention for a given query position can only see memory positions less than or equal to the query position in the corresponding block. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v] """ with tf.variable_scope( name, default_name="within_local_attention_1d", values=[q, k, v]): batch, heads, length, depth_k = common_layers.shape_list(q) depth_v = common_layers.shape_list(v)[-1] if isinstance(block_length, tf.Tensor): const = tf.contrib.util.constant_value(block_length) if const is not None: block_length = int(const) # Pad query, key, value to ensure multiple of block length. original_length = length padding_size = tf.mod(-length, block_length) length += padding_size padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] q = tf.pad(q, padding) k = tf.pad(k, padding) v = tf.pad(v, padding) # Compute attention for all subsequent query blocks. num_blocks = tf.div(length, block_length) q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) # [batch, heads, num_blocks, block_length, block_length] attention = tf.matmul(q, k, transpose_b=True) attention += tf.reshape(attention_bias_lower_triangle(block_length), [1, 1, 1, block_length, block_length]) attention = tf.nn.softmax(attention) # [batch, heads, num_blocks, block_length, depth_v] output = tf.matmul(attention, v) output = tf.reshape(output, [batch, heads, -1, depth_v]) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in (batch, heads, length, depth_v)]) return output
[ "def", "masked_within_block_local_attention_1d", "(", "q", ",", "k", ",", "v", ",", "block_length", "=", "64", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"within_local_attention_1d\"", ",",...
Attention to the source and a neighborhood to the left within a block. The sequence is divided into blocks of length block_length. Attention for a given query position can only see memory positions less than or equal to the query position in the corresponding block. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v]
[ "Attention", "to", "the", "source", "and", "a", "neighborhood", "to", "the", "left", "within", "a", "block", "." ]
python
train
softwarefactory-project/rdopkg
rdopkg/utils/specfile.py
https://github.com/softwarefactory-project/rdopkg/blob/2d2bed4e7cd329558a36d0dd404ec4ac8f9f254c/rdopkg/utils/specfile.py#L125-L145
def release_parts(version): """ Split RPM Release string into (numeric X.Y.Z part, milestone, rest). :returns: a three-element tuple (number, milestone, rest). If we cannot determine the "milestone" or "rest", those will be an empty string. """ numver, tail = version_parts(version) if numver and not re.match(r'\d', numver): # entire release is macro a la %{release} tail = numver numver = '' m = re.match(r'(\.?(?:%\{\?milestone\}|[^%.]+))(.*)$', tail) if m: milestone = m.group(1) rest = m.group(2) else: milestone = '' rest = tail return numver, milestone, rest
[ "def", "release_parts", "(", "version", ")", ":", "numver", ",", "tail", "=", "version_parts", "(", "version", ")", "if", "numver", "and", "not", "re", ".", "match", "(", "r'\\d'", ",", "numver", ")", ":", "# entire release is macro a la %{release}", "tail", ...
Split RPM Release string into (numeric X.Y.Z part, milestone, rest). :returns: a three-element tuple (number, milestone, rest). If we cannot determine the "milestone" or "rest", those will be an empty string.
[ "Split", "RPM", "Release", "string", "into", "(", "numeric", "X", ".", "Y", ".", "Z", "part", "milestone", "rest", ")", "." ]
python
train
mitsei/dlkit
dlkit/json_/types.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/types.py#L109-L122
def get_type_data(self, name): """Return dictionary representation of type.""" try: return { 'authority': 'DLKIT', 'namespace': 'relationship.Relationship', 'identifier': name.lower(), 'domain': 'Generic Types', 'display_name': name.title() + ' Type', 'display_label': name.title(), 'description': ('The ' + name.title() + ' Type.') } except IndexError: raise NotFound('RelationshipType: ' + name.title())
[ "def", "get_type_data", "(", "self", ",", "name", ")", ":", "try", ":", "return", "{", "'authority'", ":", "'DLKIT'", ",", "'namespace'", ":", "'relationship.Relationship'", ",", "'identifier'", ":", "name", ".", "lower", "(", ")", ",", "'domain'", ":", "'...
Return dictionary representation of type.
[ "Return", "dictionary", "representation", "of", "type", "." ]
python
train