repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
bio2bel/bio2bel
src/bio2bel/manager/cli_manager.py
https://github.com/bio2bel/bio2bel/blob/d80762d891fa18b248709ff0b0f97ebb65ec64c2/src/bio2bel/manager/cli_manager.py#L23-L41
def get_cli(cls) -> click.Group: """Build a :mod:`click` CLI main function. :param Type[AbstractManager] cls: A Manager class :return: The main function for click """ group_help = 'Default connection at {}\n\nusing Bio2BEL v{}'.format(cls._get_connection(), get_version()) @click.group(help=group_help) @click.option('-c', '--connection', default=cls._get_connection(), help='Defaults to {}'.format(cls._get_connection())) @click.pass_context def main(ctx, connection): """Bio2BEL CLI.""" logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") logging.getLogger('bio2bel.utils').setLevel(logging.WARNING) ctx.obj = cls(connection=connection) return main
[ "def", "get_cli", "(", "cls", ")", "->", "click", ".", "Group", ":", "group_help", "=", "'Default connection at {}\\n\\nusing Bio2BEL v{}'", ".", "format", "(", "cls", ".", "_get_connection", "(", ")", ",", "get_version", "(", ")", ")", "@", "click", ".", "g...
Build a :mod:`click` CLI main function. :param Type[AbstractManager] cls: A Manager class :return: The main function for click
[ "Build", "a", ":", "mod", ":", "click", "CLI", "main", "function", "." ]
python
valid
saltstack/salt
salt/modules/saltutil.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/saltutil.py#L1175-L1204
def clear_cache(days=-1): ''' Forcibly removes all caches on a minion. .. versionadded:: 2014.7.0 WARNING: The safest way to clear a minion cache is by first stopping the minion and then deleting the cache files before restarting it. CLI Example: .. code-block:: bash salt '*' saltutil.clear_cache days=7 ''' threshold = time.time() - days * 24 * 60 * 60 for root, dirs, files in salt.utils.files.safe_walk(__opts__['cachedir'], followlinks=False): for name in files: try: file = os.path.join(root, name) mtime = os.path.getmtime(file) if mtime < threshold: os.remove(file) except OSError as exc: log.error( 'Attempt to clear cache with saltutil.clear_cache ' 'FAILED with: %s', exc ) return False return True
[ "def", "clear_cache", "(", "days", "=", "-", "1", ")", ":", "threshold", "=", "time", ".", "time", "(", ")", "-", "days", "*", "24", "*", "60", "*", "60", "for", "root", ",", "dirs", ",", "files", "in", "salt", ".", "utils", ".", "files", ".", ...
Forcibly removes all caches on a minion. .. versionadded:: 2014.7.0 WARNING: The safest way to clear a minion cache is by first stopping the minion and then deleting the cache files before restarting it. CLI Example: .. code-block:: bash salt '*' saltutil.clear_cache days=7
[ "Forcibly", "removes", "all", "caches", "on", "a", "minion", "." ]
python
train
mrstephenneal/mysql-toolkit
mysql/toolkit/datatypes/numeric.py
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/datatypes/numeric.py#L41-L48
def _is_numeric_data(self, data_type): """Private method for testing text data types.""" dt = DATA_TYPES[data_type] if dt['min'] and dt['max']: if type(self.data) is dt['type'] and dt['min'] < self.data < dt['max']: self.type = data_type.upper() self.len = len(str(self.data)) return True
[ "def", "_is_numeric_data", "(", "self", ",", "data_type", ")", ":", "dt", "=", "DATA_TYPES", "[", "data_type", "]", "if", "dt", "[", "'min'", "]", "and", "dt", "[", "'max'", "]", ":", "if", "type", "(", "self", ".", "data", ")", "is", "dt", "[", ...
Private method for testing text data types.
[ "Private", "method", "for", "testing", "text", "data", "types", "." ]
python
train
saltstack/salt
salt/modules/consul.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/consul.py#L540-L594
def agent_maintenance(consul_url=None, token=None, **kwargs): ''' Manages node maintenance mode :param consul_url: The Consul server URL. :param enable: The enable flag is required. Acceptable values are either true (to enter maintenance mode) or false (to resume normal operation). :param reason: If provided, its value should be a text string explaining the reason for placing the node into maintenance mode. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_maintenance enable='False' reason='Upgrade in progress' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/maintenance' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Agent maintenance mode ' '{0}ed.'.format(kwargs['enable'])) else: ret['res'] = True ret['message'] = 'Unable to change maintenance mode for agent.' return ret
[ "def", "agent_maintenance", "(", "consul_url", "=", "None", ",", "token", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "}", "query_params", "=", "{", "}", "if", "not", "consul_url", ":", "consul_url", "=", "_get_config", "(", ")", ...
Manages node maintenance mode :param consul_url: The Consul server URL. :param enable: The enable flag is required. Acceptable values are either true (to enter maintenance mode) or false (to resume normal operation). :param reason: If provided, its value should be a text string explaining the reason for placing the node into maintenance mode. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_maintenance enable='False' reason='Upgrade in progress'
[ "Manages", "node", "maintenance", "mode" ]
python
train
istresearch/scrapy-cluster
rest/rest_service.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/rest/rest_service.py#L480-L491
def _close_thread(self, thread, thread_name): """Closes daemon threads @param thread: the thread to close @param thread_name: a human readable name of the thread """ if thread is not None and thread.isAlive(): self.logger.debug("Waiting for {} thread to close".format(thread_name)) thread.join(timeout=self.settings['DAEMON_THREAD_JOIN_TIMEOUT']) if thread.isAlive(): self.logger.warn("{} daemon thread unable to be shutdown" " within timeout".format(thread_name))
[ "def", "_close_thread", "(", "self", ",", "thread", ",", "thread_name", ")", ":", "if", "thread", "is", "not", "None", "and", "thread", ".", "isAlive", "(", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Waiting for {} thread to close\"", ".", "for...
Closes daemon threads @param thread: the thread to close @param thread_name: a human readable name of the thread
[ "Closes", "daemon", "threads" ]
python
train
nielstron/pysyncthru
pysyncthru/__init__.py
https://github.com/nielstron/pysyncthru/blob/850a85ba0a74cbd5c408102bb02fd005d8b61ffb/pysyncthru/__init__.py#L131-L144
def input_tray_status(self, filter_supported: bool = True) -> Dict[int, Any]: """Return the state of all input trays.""" tray_status = {} for i in range(1, 5): try: tray_stat = self.data.get('{}{}'.format(SyncThru.TRAY, i), {}) if filter_supported and tray_stat.get('opt', 0) == 0: continue else: tray_status[i] = tray_stat except (KeyError, AttributeError): tray_status[i] = {} return tray_status
[ "def", "input_tray_status", "(", "self", ",", "filter_supported", ":", "bool", "=", "True", ")", "->", "Dict", "[", "int", ",", "Any", "]", ":", "tray_status", "=", "{", "}", "for", "i", "in", "range", "(", "1", ",", "5", ")", ":", "try", ":", "t...
Return the state of all input trays.
[ "Return", "the", "state", "of", "all", "input", "trays", "." ]
python
train
pschmitt/pyteleloisirs
pyteleloisirs/pyteleloisirs.py
https://github.com/pschmitt/pyteleloisirs/blob/d63610fd3729862455ac42afca440469f8063fba/pyteleloisirs/pyteleloisirs.py#L235-L249
async def async_get_current_program(channel, no_cache=False): ''' Get the current program info ''' chan = await async_determine_channel(channel) guide = await async_get_program_guide(chan, no_cache) if not guide: _LOGGER.warning('Could not retrieve TV program for %s', channel) return now = datetime.datetime.now() for prog in guide: start = prog.get('start_time') end = prog.get('end_time') if now > start and now < end: return prog
[ "async", "def", "async_get_current_program", "(", "channel", ",", "no_cache", "=", "False", ")", ":", "chan", "=", "await", "async_determine_channel", "(", "channel", ")", "guide", "=", "await", "async_get_program_guide", "(", "chan", ",", "no_cache", ")", "if",...
Get the current program info
[ "Get", "the", "current", "program", "info" ]
python
train
flyte/upnpclient
upnpclient/ssdp.py
https://github.com/flyte/upnpclient/blob/5529b950df33c0eaf0c24a9a307cf00fe627d0ad/upnpclient/ssdp.py#L7-L22
def discover(timeout=5): """ Convenience method to discover UPnP devices on the network. Returns a list of `upnp.Device` instances. Any invalid servers are silently ignored. """ devices = {} for entry in scan(timeout): if entry.location in devices: continue try: devices[entry.location] = Device(entry.location) except Exception as exc: log = _getLogger("ssdp") log.error('Error \'%s\' for %s', exc, entry.location) return list(devices.values())
[ "def", "discover", "(", "timeout", "=", "5", ")", ":", "devices", "=", "{", "}", "for", "entry", "in", "scan", "(", "timeout", ")", ":", "if", "entry", ".", "location", "in", "devices", ":", "continue", "try", ":", "devices", "[", "entry", ".", "lo...
Convenience method to discover UPnP devices on the network. Returns a list of `upnp.Device` instances. Any invalid servers are silently ignored.
[ "Convenience", "method", "to", "discover", "UPnP", "devices", "on", "the", "network", ".", "Returns", "a", "list", "of", "upnp", ".", "Device", "instances", ".", "Any", "invalid", "servers", "are", "silently", "ignored", "." ]
python
train
yhat/db.py
db/utils.py
https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/utils.py#L5-L8
def profile_path(profile_id, profile): """Create full path to given provide for the current user.""" user = os.path.expanduser("~") return os.path.join(user, profile_id + profile)
[ "def", "profile_path", "(", "profile_id", ",", "profile", ")", ":", "user", "=", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", "return", "os", ".", "path", ".", "join", "(", "user", ",", "profile_id", "+", "profile", ")" ]
Create full path to given provide for the current user.
[ "Create", "full", "path", "to", "given", "provide", "for", "the", "current", "user", "." ]
python
train
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/commands/command_orchestrator.py
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/commands/command_orchestrator.py#L523-L551
def orchestration_save(self, context, mode="shallow", custom_params=None): """ Creates a snapshot with a unique name and returns SavedResults as JSON :param context: resource context of the vCenterShell :param mode: Snapshot save mode, default shallow. Currently not it use :param custom_params: Set of custom parameter to be supported in the future :return: SavedResults serialized as JSON :rtype: SavedResults """ resource_details = self._parse_remote_model(context) created_date = datetime.now() snapshot_name = created_date.strftime('%y_%m_%d %H_%M_%S_%f') created_snapshot_path = self.save_snapshot(context=context, snapshot_name=snapshot_name) created_snapshot_path = self._strip_double_quotes(created_snapshot_path) orchestration_saved_artifact = OrchestrationSavedArtifact() orchestration_saved_artifact.artifact_type = 'vcenter_snapshot' orchestration_saved_artifact.identifier = created_snapshot_path saved_artifacts_info = OrchestrationSavedArtifactsInfo( resource_name=resource_details.cloud_provider, created_date=created_date, restore_rules={'requires_same_resource': True}, saved_artifact=orchestration_saved_artifact) orchestration_save_result = OrchestrationSaveResult(saved_artifacts_info) return set_command_result(result=orchestration_save_result, unpicklable=False)
[ "def", "orchestration_save", "(", "self", ",", "context", ",", "mode", "=", "\"shallow\"", ",", "custom_params", "=", "None", ")", ":", "resource_details", "=", "self", ".", "_parse_remote_model", "(", "context", ")", "created_date", "=", "datetime", ".", "now...
Creates a snapshot with a unique name and returns SavedResults as JSON :param context: resource context of the vCenterShell :param mode: Snapshot save mode, default shallow. Currently not it use :param custom_params: Set of custom parameter to be supported in the future :return: SavedResults serialized as JSON :rtype: SavedResults
[ "Creates", "a", "snapshot", "with", "a", "unique", "name", "and", "returns", "SavedResults", "as", "JSON", ":", "param", "context", ":", "resource", "context", "of", "the", "vCenterShell", ":", "param", "mode", ":", "Snapshot", "save", "mode", "default", "sh...
python
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/utils.py
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/utils.py#L435-L445
def get_sanitized_endpoint(url): """ Sanitize an endpoint, as removing unneeded parameters """ # sanitize esri sanitized_url = url.rstrip() esri_string = '/rest/services' if esri_string in url: match = re.search(esri_string, sanitized_url) sanitized_url = url[0:(match.start(0)+len(esri_string))] return sanitized_url
[ "def", "get_sanitized_endpoint", "(", "url", ")", ":", "# sanitize esri", "sanitized_url", "=", "url", ".", "rstrip", "(", ")", "esri_string", "=", "'/rest/services'", "if", "esri_string", "in", "url", ":", "match", "=", "re", ".", "search", "(", "esri_string"...
Sanitize an endpoint, as removing unneeded parameters
[ "Sanitize", "an", "endpoint", "as", "removing", "unneeded", "parameters" ]
python
train
lark-parser/lark
lark/tree.py
https://github.com/lark-parser/lark/blob/a798dec77907e74520dd7e90c7b6a4acc680633a/lark/tree.py#L144-L180
def pydot__tree_to_png(tree, filename, rankdir="LR"): """Creates a colorful image that represents the tree (data+children, without meta) Possible values for `rankdir` are "TB", "LR", "BT", "RL", corresponding to directed graphs drawn from top to bottom, from left to right, from bottom to top, and from right to left, respectively. See: https://www.graphviz.org/doc/info/attrs.html#k:rankdir """ import pydot graph = pydot.Dot(graph_type='digraph', rankdir=rankdir) i = [0] def new_leaf(leaf): node = pydot.Node(i[0], label=repr(leaf)) i[0] += 1 graph.add_node(node) return node def _to_pydot(subtree): color = hash(subtree.data) & 0xffffff color |= 0x808080 subnodes = [_to_pydot(child) if isinstance(child, Tree) else new_leaf(child) for child in subtree.children] node = pydot.Node(i[0], style="filled", fillcolor="#%x"%color, label=subtree.data) i[0] += 1 graph.add_node(node) for subnode in subnodes: graph.add_edge(pydot.Edge(node, subnode)) return node _to_pydot(tree) graph.write_png(filename)
[ "def", "pydot__tree_to_png", "(", "tree", ",", "filename", ",", "rankdir", "=", "\"LR\"", ")", ":", "import", "pydot", "graph", "=", "pydot", ".", "Dot", "(", "graph_type", "=", "'digraph'", ",", "rankdir", "=", "rankdir", ")", "i", "=", "[", "0", "]",...
Creates a colorful image that represents the tree (data+children, without meta) Possible values for `rankdir` are "TB", "LR", "BT", "RL", corresponding to directed graphs drawn from top to bottom, from left to right, from bottom to top, and from right to left, respectively. See: https://www.graphviz.org/doc/info/attrs.html#k:rankdir
[ "Creates", "a", "colorful", "image", "that", "represents", "the", "tree", "(", "data", "+", "children", "without", "meta", ")" ]
python
train
Ouranosinc/xclim
xclim/utils.py
https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/utils.py#L692-L717
def adjust_doy_calendar(source, target): """Interpolate from one set of dayofyear range to another calendar. Interpolate an array defined over a `dayofyear` range (say 1 to 360) to another `dayofyear` range (say 1 to 365). Parameters ---------- source : xarray.DataArray Array with `dayofyear` coordinates. target : xarray.DataArray Array with `time` coordinate. Returns ------- xarray.DataArray Interpolated source array over coordinates spanning the target `dayofyear` range. """ doy_max_source = source.dayofyear.max() doy_max = infer_doy_max(target) if doy_max_source == doy_max: return source return _interpolate_doy_calendar(source, doy_max)
[ "def", "adjust_doy_calendar", "(", "source", ",", "target", ")", ":", "doy_max_source", "=", "source", ".", "dayofyear", ".", "max", "(", ")", "doy_max", "=", "infer_doy_max", "(", "target", ")", "if", "doy_max_source", "==", "doy_max", ":", "return", "sourc...
Interpolate from one set of dayofyear range to another calendar. Interpolate an array defined over a `dayofyear` range (say 1 to 360) to another `dayofyear` range (say 1 to 365). Parameters ---------- source : xarray.DataArray Array with `dayofyear` coordinates. target : xarray.DataArray Array with `time` coordinate. Returns ------- xarray.DataArray Interpolated source array over coordinates spanning the target `dayofyear` range.
[ "Interpolate", "from", "one", "set", "of", "dayofyear", "range", "to", "another", "calendar", "." ]
python
train
skorch-dev/skorch
skorch/history.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/history.py#L27-L37
def _getitem(item, i): """Extract value or values from dicts. Covers the case of a single key or multiple keys. If not found, return placeholders instead. """ if not isinstance(i, (tuple, list)): return item.get(i, _none) type_ = list if isinstance(item, list) else tuple return type_(item.get(j, _none) for j in i)
[ "def", "_getitem", "(", "item", ",", "i", ")", ":", "if", "not", "isinstance", "(", "i", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "item", ".", "get", "(", "i", ",", "_none", ")", "type_", "=", "list", "if", "isinstance", "(", "i...
Extract value or values from dicts. Covers the case of a single key or multiple keys. If not found, return placeholders instead.
[ "Extract", "value", "or", "values", "from", "dicts", "." ]
python
train
googleapis/google-cloud-python
storage/google/cloud/storage/blob.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/blob.py#L730-L759
def _get_writable_metadata(self): """Get the object / blob metadata which is writable. This is intended to be used when creating a new object / blob. See the `API reference docs`_ for more information, the fields marked as writable are: * ``acl`` * ``cacheControl`` * ``contentDisposition`` * ``contentEncoding`` * ``contentLanguage`` * ``contentType`` * ``crc32c`` * ``md5Hash`` * ``metadata`` * ``name`` * ``storageClass`` For now, we don't support ``acl``, access control lists should be managed directly through :class:`ObjectACL` methods. """ # NOTE: This assumes `self.name` is unicode. object_metadata = {"name": self.name} for key in self._changes: if key in _WRITABLE_FIELDS: object_metadata[key] = self._properties[key] return object_metadata
[ "def", "_get_writable_metadata", "(", "self", ")", ":", "# NOTE: This assumes `self.name` is unicode.", "object_metadata", "=", "{", "\"name\"", ":", "self", ".", "name", "}", "for", "key", "in", "self", ".", "_changes", ":", "if", "key", "in", "_WRITABLE_FIELDS",...
Get the object / blob metadata which is writable. This is intended to be used when creating a new object / blob. See the `API reference docs`_ for more information, the fields marked as writable are: * ``acl`` * ``cacheControl`` * ``contentDisposition`` * ``contentEncoding`` * ``contentLanguage`` * ``contentType`` * ``crc32c`` * ``md5Hash`` * ``metadata`` * ``name`` * ``storageClass`` For now, we don't support ``acl``, access control lists should be managed directly through :class:`ObjectACL` methods.
[ "Get", "the", "object", "/", "blob", "metadata", "which", "is", "writable", "." ]
python
train
saltstack/salt
salt/modules/btrfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/btrfs.py#L802-L832
def subvolume_find_new(name, last_gen): ''' List the recently modified files in a subvolume name Name of the subvolume last_gen Last transid marker from where to compare CLI Example: .. code-block:: bash salt '*' btrfs.subvolume_find_new /var/volumes/tmp 1024 ''' cmd = ['btrfs', 'subvolume', 'find-new', name, last_gen] res = __salt__['cmd.run_all'](cmd) salt.utils.fsutils._verify_run(res) lines = res['stdout'].splitlines() # Filenames are at the end of each inode line files = [l.split()[-1] for l in lines if l.startswith('inode')] # The last transid is in the last line transid = lines[-1].split()[-1] return { 'files': files, 'transid': transid, }
[ "def", "subvolume_find_new", "(", "name", ",", "last_gen", ")", ":", "cmd", "=", "[", "'btrfs'", ",", "'subvolume'", ",", "'find-new'", ",", "name", ",", "last_gen", "]", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")", "salt", ".", ...
List the recently modified files in a subvolume name Name of the subvolume last_gen Last transid marker from where to compare CLI Example: .. code-block:: bash salt '*' btrfs.subvolume_find_new /var/volumes/tmp 1024
[ "List", "the", "recently", "modified", "files", "in", "a", "subvolume" ]
python
train
sprockets/sprockets-influxdb
sprockets_influxdb.py
https://github.com/sprockets/sprockets-influxdb/blob/cce73481b8f26b02e65e3f9914a9a22eceff3063/sprockets_influxdb.py#L423-L435
def set_timeout(milliseconds): """Override the maximum duration to wait for submitting measurements to InfluxDB. :param int milliseconds: Maximum wait in milliseconds """ global _timeout, _timeout_interval LOGGER.debug('Setting batch wait timeout to %i ms', milliseconds) _timeout_interval = milliseconds _maybe_stop_timeout() _timeout = ioloop.IOLoop.current().add_timeout(milliseconds, _on_timeout)
[ "def", "set_timeout", "(", "milliseconds", ")", ":", "global", "_timeout", ",", "_timeout_interval", "LOGGER", ".", "debug", "(", "'Setting batch wait timeout to %i ms'", ",", "milliseconds", ")", "_timeout_interval", "=", "milliseconds", "_maybe_stop_timeout", "(", ")"...
Override the maximum duration to wait for submitting measurements to InfluxDB. :param int milliseconds: Maximum wait in milliseconds
[ "Override", "the", "maximum", "duration", "to", "wait", "for", "submitting", "measurements", "to", "InfluxDB", "." ]
python
train
azavea/python-omgeo
omgeo/services/us_census.py
https://github.com/azavea/python-omgeo/blob/40f4e006f087dbc795a5d954ffa2c0eab433f8c9/omgeo/services/us_census.py#L53-L78
def _street_addr_from_response(self, match): """Construct a street address (no city, region, etc.) from a geocoder response. :param match: The match object returned by the geocoder. """ # Same caveat as above regarding the ordering of these fields; the # documentation is not explicit about the correct ordering for # reconstructing a full address, but implies that this is the ordering. ordered_fields = ['preQualifier', 'preDirection', 'preType', 'streetName', 'suffixType', 'suffixDirection', 'suffixQualifier'] result = [] # The address components only contain a from and to address, not the # actual number of the address that was matched, so we need to cheat a # bit and extract it from the full address string. This is likely to # miss some edge cases (hopefully only a few since this is a US-only # geocoder). addr_num_re = re.match(r'([0-9]+)', match['matchedAddress']) if not addr_num_re: # Give up return '' result.append(addr_num_re.group(0)) for field in ordered_fields: result.append(match['addressComponents'].get(field, '')) if any(result): return ' '.join([s for s in result if s]) # Filter out empty strings. else: return ''
[ "def", "_street_addr_from_response", "(", "self", ",", "match", ")", ":", "# Same caveat as above regarding the ordering of these fields; the", "# documentation is not explicit about the correct ordering for", "# reconstructing a full address, but implies that this is the ordering.", "ordered_...
Construct a street address (no city, region, etc.) from a geocoder response. :param match: The match object returned by the geocoder.
[ "Construct", "a", "street", "address", "(", "no", "city", "region", "etc", ".", ")", "from", "a", "geocoder", "response", "." ]
python
train
mangalam-research/selenic
selenic/util.py
https://github.com/mangalam-research/selenic/blob/2284c68e15fa3d34b88aa2eec1a2e8ecd37f44ad/selenic/util.py#L479-L525
def locations_within(a, b, tolerance): """ Verifies whether two positions are the same. A tolerance value determines how close the two positions must be to be considered "same". The two locations must be dictionaries that have the same keys. If a key is pesent in one but not in the other, this is an error. The values must be integers or anything that can be converted to an integer through ``int``. (If somehow you need floating point precision, this is not the function for you.) Do not rely on this function to determine whether two object have the same keys. If the function finds the locations to be within tolerances, then the two objects have the same keys. Otherwise, you cannot infer anything regarding the keys because the function will return as soon as it knows that the two locations are **not** within tolerance. :param a: First position. :type a: :class:`dict` :param b: Second position. :type b: :class:`dict` :param tolerance: The tolerance within which the two positions must be. :return: An empty string if the comparison is successful. Otherwise, the string contains a description of the differences. :rtype: :class:`str` :raises ValueError: When a key is present in one object but not the other. """ ret = '' # Clone b so that we can destroy it. b = dict(b) for (key, value) in a.items(): if key not in b: raise ValueError("b does not have the key: " + key) if abs(int(value) - int(b[key])) > tolerance: ret += 'key {0} differs: {1} {2}'.format(key, int(value), int(b[key])) del b[key] if b: raise ValueError("keys in b not seen in a: " + ", ".join(b.keys())) return ret
[ "def", "locations_within", "(", "a", ",", "b", ",", "tolerance", ")", ":", "ret", "=", "''", "# Clone b so that we can destroy it.", "b", "=", "dict", "(", "b", ")", "for", "(", "key", ",", "value", ")", "in", "a", ".", "items", "(", ")", ":", "if", ...
Verifies whether two positions are the same. A tolerance value determines how close the two positions must be to be considered "same". The two locations must be dictionaries that have the same keys. If a key is pesent in one but not in the other, this is an error. The values must be integers or anything that can be converted to an integer through ``int``. (If somehow you need floating point precision, this is not the function for you.) Do not rely on this function to determine whether two object have the same keys. If the function finds the locations to be within tolerances, then the two objects have the same keys. Otherwise, you cannot infer anything regarding the keys because the function will return as soon as it knows that the two locations are **not** within tolerance. :param a: First position. :type a: :class:`dict` :param b: Second position. :type b: :class:`dict` :param tolerance: The tolerance within which the two positions must be. :return: An empty string if the comparison is successful. Otherwise, the string contains a description of the differences. :rtype: :class:`str` :raises ValueError: When a key is present in one object but not the other.
[ "Verifies", "whether", "two", "positions", "are", "the", "same", ".", "A", "tolerance", "value", "determines", "how", "close", "the", "two", "positions", "must", "be", "to", "be", "considered", "same", "." ]
python
train
GaretJax/lancet
lancet/commands/workflow.py
https://github.com/GaretJax/lancet/blob/cf438c5c6166b18ee0dc5ffce55220793019bb95/lancet/commands/workflow.py#L158-L179
def pause(ctx): """ Pause work on the current issue. This command puts the issue in the configured paused status and stops the current Harvest timer. """ lancet = ctx.obj paused_status = lancet.config.get("tracker", "paused_status") # Get the issue issue = get_issue(lancet) # Make sure the issue is in a correct status transition = get_transition(ctx, lancet, issue, paused_status) # Activate environment set_issue_status(lancet, issue, paused_status, transition) with taskstatus("Pausing harvest timer") as ts: lancet.timer.pause() ts.ok("Harvest timer paused")
[ "def", "pause", "(", "ctx", ")", ":", "lancet", "=", "ctx", ".", "obj", "paused_status", "=", "lancet", ".", "config", ".", "get", "(", "\"tracker\"", ",", "\"paused_status\"", ")", "# Get the issue", "issue", "=", "get_issue", "(", "lancet", ")", "# Make ...
Pause work on the current issue. This command puts the issue in the configured paused status and stops the current Harvest timer.
[ "Pause", "work", "on", "the", "current", "issue", "." ]
python
train
scanny/python-pptx
pptx/shapes/shapetree.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/shapes/shapetree.py#L867-L873
def _pic(self): """Return the new `p:pic` element referencing the video.""" return CT_Picture.new_video_pic( self._shape_id, self._shape_name, self._video_rId, self._media_rId, self._poster_frame_rId, self._x, self._y, self._cx, self._cy )
[ "def", "_pic", "(", "self", ")", ":", "return", "CT_Picture", ".", "new_video_pic", "(", "self", ".", "_shape_id", ",", "self", ".", "_shape_name", ",", "self", ".", "_video_rId", ",", "self", ".", "_media_rId", ",", "self", ".", "_poster_frame_rId", ",", ...
Return the new `p:pic` element referencing the video.
[ "Return", "the", "new", "p", ":", "pic", "element", "referencing", "the", "video", "." ]
python
train
opencobra/cobrapy
cobra/io/json.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/io/json.py#L69-L112
def save_json_model(model, filename, sort=False, pretty=False, **kwargs): """ Write the cobra model to a file in JSON format. ``kwargs`` are passed on to ``json.dump``. Parameters ---------- model : cobra.Model The cobra model to represent. filename : str or file-like File path or descriptor that the JSON representation should be written to. sort : bool, optional Whether to sort the metabolites, reactions, and genes or maintain the order defined in the model. pretty : bool, optional Whether to format the JSON more compactly (default) or in a more verbose but easier to read fashion. Can be partially overwritten by the ``kwargs``. See Also -------- to_json : Return a string representation. json.dump : Base function. """ obj = model_to_dict(model, sort=sort) obj[u"version"] = JSON_SPEC if pretty: dump_opts = { "indent": 4, "separators": (",", ": "), "sort_keys": True, "allow_nan": False} else: dump_opts = { "indent": 0, "separators": (",", ":"), "sort_keys": False, "allow_nan": False} dump_opts.update(**kwargs) if isinstance(filename, string_types): with open(filename, "w") as file_handle: json.dump(obj, file_handle, **dump_opts) else: json.dump(obj, filename, **dump_opts)
[ "def", "save_json_model", "(", "model", ",", "filename", ",", "sort", "=", "False", ",", "pretty", "=", "False", ",", "*", "*", "kwargs", ")", ":", "obj", "=", "model_to_dict", "(", "model", ",", "sort", "=", "sort", ")", "obj", "[", "u\"version\"", ...
Write the cobra model to a file in JSON format. ``kwargs`` are passed on to ``json.dump``. Parameters ---------- model : cobra.Model The cobra model to represent. filename : str or file-like File path or descriptor that the JSON representation should be written to. sort : bool, optional Whether to sort the metabolites, reactions, and genes or maintain the order defined in the model. pretty : bool, optional Whether to format the JSON more compactly (default) or in a more verbose but easier to read fashion. Can be partially overwritten by the ``kwargs``. See Also -------- to_json : Return a string representation. json.dump : Base function.
[ "Write", "the", "cobra", "model", "to", "a", "file", "in", "JSON", "format", "." ]
python
valid
RLBot/RLBot
src/main/python/rlbot/gui/qt_root.py
https://github.com/RLBot/RLBot/blob/3f9b6bec8b9baf4dcfff0f6cf3103c8744ac6234/src/main/python/rlbot/gui/qt_root.py#L79-L95
def fixed_indices(self): """ Agents in the GUI might not have following overall indices, thereby a file saved through the GUI would cause other bots to start than the GUI when ran :return: CustomConfig instance, copy of the overall config which has the indices sorted out """ config = self.overall_config.copy() used_indices = sorted(self.index_manager.numbers) not_used_indices = [e for e in range(MAX_PLAYERS) if e not in used_indices] order = used_indices + not_used_indices header = config[PARTICIPANT_CONFIGURATION_HEADER] for name, config_value in header.values.items(): old_values = list(config_value.value) for i in range(MAX_PLAYERS): config_value.set_value(old_values[order[i]], index=i) return config
[ "def", "fixed_indices", "(", "self", ")", ":", "config", "=", "self", ".", "overall_config", ".", "copy", "(", ")", "used_indices", "=", "sorted", "(", "self", ".", "index_manager", ".", "numbers", ")", "not_used_indices", "=", "[", "e", "for", "e", "in"...
Agents in the GUI might not have following overall indices, thereby a file saved through the GUI would cause other bots to start than the GUI when ran :return: CustomConfig instance, copy of the overall config which has the indices sorted out
[ "Agents", "in", "the", "GUI", "might", "not", "have", "following", "overall", "indices", "thereby", "a", "file", "saved", "through", "the", "GUI", "would", "cause", "other", "bots", "to", "start", "than", "the", "GUI", "when", "ran", ":", "return", ":", ...
python
train
saltstack/salt
salt/modules/boto_iam.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_iam.py#L567-L593
def delete_group_policy(group_name, policy_name, region=None, key=None, keyid=None, profile=None): ''' Delete a group policy. CLI Example:: .. code-block:: bash salt myminion boto_iam.delete_group_policy mygroup mypolicy ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False _policy = get_group_policy( group_name, policy_name, region, key, keyid, profile ) if not _policy: return True try: conn.delete_group_policy(group_name, policy_name) log.info('Successfully deleted policy %s for IAM group %s.', policy_name, group_name) return True except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to delete policy %s for IAM group %s.', policy_name, group_name) return False
[ "def", "delete_group_policy", "(", "group_name", ",", "policy_name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "ke...
Delete a group policy. CLI Example:: .. code-block:: bash salt myminion boto_iam.delete_group_policy mygroup mypolicy
[ "Delete", "a", "group", "policy", "." ]
python
train
OCA/openupgradelib
openupgradelib/openupgrade.py
https://github.com/OCA/openupgradelib/blob/b220b6498075d62c1b64073cc934513a465cfd85/openupgradelib/openupgrade.py#L836-L956
def set_defaults(cr, pool, default_spec, force=False, use_orm=False): """ Set default value. Useful for fields that are newly required. Uses orm, so call from the post script. :param pool: you can pass 'env' as well. :param default_spec: a hash with model names as keys. Values are lists \ of tuples (field, value). None as a value has a special meaning: it \ assigns the default value. If this value is provided by a function, the \ function is called as the user that created the resource. :param force: overwrite existing values. To be used for assigning a non- \ default value (presumably in the case of a new column). The ORM assigns \ the default value as declared in the model in an earlier stage of the \ process. Beware of issues with resources loaded from new data that \ actually do require the model's default, in combination with the post \ script possible being run multiple times. :param use_orm: If set to True, the write operation of the default value \ will be triggered using ORM instead on an SQL clause (default). """ def write_value(ids, field, value): logger.debug( "model %s, field %s: setting default value of resources %s to %s", model, field, ids, unicode(value)) if use_orm: if version_info[0] <= 7: for res_id in ids: # Iterating over ids here as a workaround for lp:1131653 obj.write(cr, SUPERUSER_ID, [res_id], {field: value}) else: if api and isinstance(pool, api.Environment): obj.browse(ids).write({field: value}) else: obj.write(cr, SUPERUSER_ID, ids, {field: value}) else: query, params = "UPDATE %s SET %s = %%s WHERE id IN %%s" % ( obj._table, field), (value, tuple(ids)) # handle fields inherited from somewhere else if version_info[0] >= 10: columns = obj._fields else: columns = obj._columns if field not in columns: query, params = None, None for model_name in obj._inherits: if obj._inherit_fields[field][0] != model_name: continue col = obj._inherits[model_name] # this is blatantly stolen and adapted from # https://github.com/OCA/OCB/blob/def7db0b93e45eda7b51b3b61 # bae1e975d07968b/openerp/osv/orm.py#L4307 nids = [] for sub_ids in cr.split_for_in_conditions(ids): cr.execute( 'SELECT DISTINCT %s FROM %s WHERE id IN %%s' % ( col, obj._table), (sub_ids,)) nids.extend(x for x, in cr.fetchall()) query, params = "UPDATE %s SET %s = %%s WHERE id IN %%s" %\ (pool[model_name]._table, field), (value, tuple(nids)) if not query: do_raise("Can't set default for %s on %s!" % ( field, obj._name)) # cope with really big tables for sub_ids in cr.split_for_in_conditions(params[1]): cr.execute(query, (params[0], sub_ids)) for model in default_spec.keys(): try: obj = pool[model] except KeyError: do_raise( "Migration: error setting default, no such model: %s" % model) for field, value in default_spec[model]: domain = not force and [(field, '=', False)] or [] if api and isinstance(pool, api.Environment): ids = obj.search(domain).ids else: ids = obj.search(cr, SUPERUSER_ID, domain) if not ids: continue if value is None: if version_info[0] > 7: if api and isinstance(pool, api.Environment): value = obj.default_get([field]).get(field) else: value = obj.default_get( cr, SUPERUSER_ID, [field]).get(field) if value: write_value(ids, field, value) else: # For older versions, compute defaults per user anymore # if the default is a method. If we need this in newer # versions, make it a parameter. if field in obj._defaults: if not callable(obj._defaults[field]): write_value(ids, field, obj._defaults[field]) else: cr.execute( "SELECT id, COALESCE(create_uid, 1) FROM %s " % obj._table + "WHERE id in %s", (tuple(ids),)) # Execute the function once per user_id user_id_map = {} for row in cr.fetchall(): user_id_map.setdefault(row[1], []).append( row[0]) for user_id in user_id_map: write_value( user_id_map[user_id], field, obj._defaults[field]( obj, cr, user_id, None)) else: error = ( "OpenUpgrade: error setting default, field %s " "with None default value not in %s' _defaults" % ( field, model)) logger.error(error) # this exc. seems to get lost in a higher up try block except_orm("OpenUpgrade", error) else: write_value(ids, field, value)
[ "def", "set_defaults", "(", "cr", ",", "pool", ",", "default_spec", ",", "force", "=", "False", ",", "use_orm", "=", "False", ")", ":", "def", "write_value", "(", "ids", ",", "field", ",", "value", ")", ":", "logger", ".", "debug", "(", "\"model %s, fi...
Set default value. Useful for fields that are newly required. Uses orm, so call from the post script. :param pool: you can pass 'env' as well. :param default_spec: a hash with model names as keys. Values are lists \ of tuples (field, value). None as a value has a special meaning: it \ assigns the default value. If this value is provided by a function, the \ function is called as the user that created the resource. :param force: overwrite existing values. To be used for assigning a non- \ default value (presumably in the case of a new column). The ORM assigns \ the default value as declared in the model in an earlier stage of the \ process. Beware of issues with resources loaded from new data that \ actually do require the model's default, in combination with the post \ script possible being run multiple times. :param use_orm: If set to True, the write operation of the default value \ will be triggered using ORM instead on an SQL clause (default).
[ "Set", "default", "value", ".", "Useful", "for", "fields", "that", "are", "newly", "required", ".", "Uses", "orm", "so", "call", "from", "the", "post", "script", "." ]
python
train
PmagPy/PmagPy
dialogs/pmag_er_magic_dialogs.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L577-L681
def InitSiteCheck(self): """make an interactive grid in which users can edit site names as well as which location a site belongs to""" self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER) text = """Step 3: Check that all sites are correctly named, and that they belong to the correct location. Fill in the additional columns with controlled vocabularies. The columns for class, lithology, and type can take multiple values in the form of a colon-delimited list. You may use the drop-down menus to add as many values as needed in these columns. (see the help button for more details) note: Changes to site_class, site_lithology, or site_type will overwrite er_samples.txt However, you will be able to edit sample_class, sample_lithology, and sample_type in step 4 **Denotes controlled vocabulary""" label = wx.StaticText(self.panel, label=text) #self.Data_hierarchy = self.ErMagic.Data_hierarchy self.sites = sorted(self.er_magic_data.make_name_list(self.er_magic_data.sites)) #for val in ['er_citation_names', 'er_location_name', 'er_site_name', 'site_class', 'site_lithology', 'site_type', 'site_definition', 'site_lat', 'site_lon']: # # try: # self.er_magic_data.headers['site']['er'][0].remove(val) # except ValueError: # pass self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'site', self.er_magic_data.headers, self.panel, 'location') self.site_grid = self.grid_builder.make_grid(incl_pmag=False) self.site_grid.InitUI() self.grid_builder.add_data_to_grid(self.site_grid, 'site', incl_pmag=False) self.grid = self.site_grid # populate site_definition as 's' by default if no value is provided (indicates that site is single, not composite) rows = self.site_grid.GetNumberRows() col = 6 for row in range(rows): cell = self.site_grid.GetCellValue(row, col) if not cell: self.site_grid.SetCellValue(row, col, 's') # initialize all needed drop-down menus locations = sorted(self.er_magic_data.make_name_list(self.er_magic_data.locations)) self.drop_down_menu = drop_down_menus.Menus("site", self, self.site_grid, locations) ### Create Buttons ### hbox_one = wx.BoxSizer(wx.HORIZONTAL) self.addLocButton = wx.Button(self.panel, label="Add a new location") self.Bind(wx.EVT_BUTTON, self.on_addLocButton, self.addLocButton) hbox_one.Add(self.addLocButton, flag=wx.RIGHT, border=10) self.helpButton = wx.Button(self.panel, label="Help") self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicSiteHelp.html"), self.helpButton) hbox_one.Add(self.helpButton) hboxok = wx.BoxSizer(wx.HORIZONTAL) self.saveButton = wx.Button(self.panel, id=-1, label='Save') self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.site_grid), self.saveButton) self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel') self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton) self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue') self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.site_grid, next_dia=self.InitSampCheck), self.continueButton) self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back") previous_dia = self.InitSampCheck self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia=previous_dia), self.backButton) hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10) hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10) hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10) hboxok.Add(self.backButton) # hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'site', self.grid) self.deleteRowButton = hboxgrid.deleteRowButton self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid) ### Make Containers ### vbox = wx.BoxSizer(wx.VERTICAL) vbox.Add(label, flag=wx.ALIGN_CENTER|wx.BOTTOM|wx.TOP, border=20) vbox.Add(hbox_one, flag=wx.BOTTOM|wx.LEFT, border=10) vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10) vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10) vbox.Add(self.site_grid, flag=wx.ALL|wx.EXPAND, border=10) # EXPAND ?? vbox.AddSpacer(20) self.hbox_all = wx.BoxSizer(wx.HORIZONTAL) self.hbox_all.AddSpacer(20) self.hbox_all.Add(vbox) self.hbox_all.AddSpacer(20) self.panel.SetSizer(self.hbox_all) #if sys.platform in ['win32', 'win64']: # self.panel.SetScrollbars(20, 20, 50, 50) self.hbox_all.Fit(self) self.Centre() self.Show() # this combination prevents a display error that (without the fix) only resolves on manually resizing the window self.site_grid.ForceRefresh() self.panel.Refresh() self.Hide() self.Show()
[ "def", "InitSiteCheck", "(", "self", ")", ":", "self", ".", "panel", "=", "wx", ".", "Panel", "(", "self", ",", "style", "=", "wx", ".", "SIMPLE_BORDER", ")", "text", "=", "\"\"\"Step 3:\nCheck that all sites are correctly named, and that they belong to the correct lo...
make an interactive grid in which users can edit site names as well as which location a site belongs to
[ "make", "an", "interactive", "grid", "in", "which", "users", "can", "edit", "site", "names", "as", "well", "as", "which", "location", "a", "site", "belongs", "to" ]
python
train
kshlm/gant
gant/utils/docker_helper.py
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L61-L71
def container_by_name(self, name): """ Returns container with given name """ if not name: return None # docker prepends a '/' to container names in the container dict name = '/'+name return next((container for container in self.containers(all=True) if name in container['Names']), None)
[ "def", "container_by_name", "(", "self", ",", "name", ")", ":", "if", "not", "name", ":", "return", "None", "# docker prepends a '/' to container names in the container dict", "name", "=", "'/'", "+", "name", "return", "next", "(", "(", "container", "for", "contai...
Returns container with given name
[ "Returns", "container", "with", "given", "name" ]
python
train
notifiers/notifiers
notifiers/logging.py
https://github.com/notifiers/notifiers/blob/6dd8aafff86935dbb4763db9c56f9cdd7fc08b65/notifiers/logging.py#L41-L52
def emit(self, record): """ Override the :meth:`~logging.Handler.emit` method that takes the ``msg`` attribute from the log record passed :param record: :class:`logging.LogRecord` """ data = copy.deepcopy(self.defaults) data["message"] = self.format(record) try: self.provider.notify(raise_on_errors=True, **data) except Exception: self.handleError(record)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "data", "=", "copy", ".", "deepcopy", "(", "self", ".", "defaults", ")", "data", "[", "\"message\"", "]", "=", "self", ".", "format", "(", "record", ")", "try", ":", "self", ".", "provider", ".",...
Override the :meth:`~logging.Handler.emit` method that takes the ``msg`` attribute from the log record passed :param record: :class:`logging.LogRecord`
[ "Override", "the", ":", "meth", ":", "~logging", ".", "Handler", ".", "emit", "method", "that", "takes", "the", "msg", "attribute", "from", "the", "log", "record", "passed" ]
python
train
brbsix/subnuker
subnuker.py
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L283-L292
def renumber(self): """Re-number cells.""" num = 0 for cell in self.cells: cell_split = cell.splitlines() if len(cell_split) >= 2: num += 1 cell_split[0] = str(num) yield '\n'.join(cell_split)
[ "def", "renumber", "(", "self", ")", ":", "num", "=", "0", "for", "cell", "in", "self", ".", "cells", ":", "cell_split", "=", "cell", ".", "splitlines", "(", ")", "if", "len", "(", "cell_split", ")", ">=", "2", ":", "num", "+=", "1", "cell_split", ...
Re-number cells.
[ "Re", "-", "number", "cells", "." ]
python
train
briney/abutils
abutils/core/pair.py
https://github.com/briney/abutils/blob/944755fc7d28bfc7d4f1ffad94ca0bf9d74ec54b/abutils/core/pair.py#L305-L325
def fasta(self, key='vdj_nt', append_chain=True): ''' Returns the sequence pair as a fasta string. If the Pair object contains both heavy and light chain sequences, both will be returned as a single string. By default, the fasta string contains the 'vdj_nt' sequence for each chain. To change, use the <key> option to select an alternate sequence. By default, the chain (heavy or light) will be appended to the sequence name: >MySequence_heavy To just use the pair name (which will result in duplicate sequence names for Pair objects with both heavy and light chains), set <append_chain> to False. ''' fastas = [] for s, chain in [(self.heavy, 'heavy'), (self.light, 'light')]: if s is not None: c = '_{}'.format(chain) if append_chain else '' fastas.append('>{}{}\n{}'.format(s['seq_id'], c, s[key])) return '\n'.join(fastas)
[ "def", "fasta", "(", "self", ",", "key", "=", "'vdj_nt'", ",", "append_chain", "=", "True", ")", ":", "fastas", "=", "[", "]", "for", "s", ",", "chain", "in", "[", "(", "self", ".", "heavy", ",", "'heavy'", ")", ",", "(", "self", ".", "light", ...
Returns the sequence pair as a fasta string. If the Pair object contains both heavy and light chain sequences, both will be returned as a single string. By default, the fasta string contains the 'vdj_nt' sequence for each chain. To change, use the <key> option to select an alternate sequence. By default, the chain (heavy or light) will be appended to the sequence name: >MySequence_heavy To just use the pair name (which will result in duplicate sequence names for Pair objects with both heavy and light chains), set <append_chain> to False.
[ "Returns", "the", "sequence", "pair", "as", "a", "fasta", "string", ".", "If", "the", "Pair", "object", "contains", "both", "heavy", "and", "light", "chain", "sequences", "both", "will", "be", "returned", "as", "a", "single", "string", "." ]
python
train
proycon/clam
clam/common/parameters.py
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/parameters.py#L459-L487
def xml(self, indent = ""): """This methods renders an XML representation of this parameter, along with its selected value, and feedback on validation errors""" xml = indent + "<" + self.__class__.__name__ xml += ' id="'+self.id + '"' xml += ' name="'+xmlescape(self.name) + '"' xml += ' description="'+xmlescape(self.description) + '"' if self.paramflag: xml += ' flag="'+self.paramflag + '"' if self.multi: xml += ' multi="true"' for key, value in self.kwargs.items(): if key != 'choices' and key != 'default' and key != 'flag' and key != 'paramflag': if isinstance(value, bool): xml += ' ' + key + '="' + str(int(value))+ '"' elif isinstance(value, list): xml += ' ' + key + '="'+",".join(value)+ '"' else: xml += ' ' + key + '="'+xmlescape(value)+ '"' if self.error: xml += ' error="'+self.error + '"' xml += ">" for key, value in self.choices: if self.value == key or (isinstance(self.value ,list) and key in self.value): xml += " <choice id=\""+key+"\" selected=\"1\">" + xmlescape(value) + "</choice>" else: xml += " <choice id=\""+key+"\">" + xmlescape(value) + "</choice>" xml += "</" + self.__class__.__name__ + ">" return xml
[ "def", "xml", "(", "self", ",", "indent", "=", "\"\"", ")", ":", "xml", "=", "indent", "+", "\"<\"", "+", "self", ".", "__class__", ".", "__name__", "xml", "+=", "' id=\"'", "+", "self", ".", "id", "+", "'\"'", "xml", "+=", "' name=\"'", "+", "xmle...
This methods renders an XML representation of this parameter, along with its selected value, and feedback on validation errors
[ "This", "methods", "renders", "an", "XML", "representation", "of", "this", "parameter", "along", "with", "its", "selected", "value", "and", "feedback", "on", "validation", "errors" ]
python
train
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L117-L149
def insert(self, data): """Insert a row into the .csv file. Parameters ---------- data : :obj:`dict` A dictionary mapping keys (header strings) to values. Returns ------- int The UID for the new row. Raises ------ Exception If the value for a given header is not of the appropriate type. """ row = {key:self._default_entry for key in self._headers} row['_uid'] = self._get_new_uid() for key, val in data.items(): if key in ('_uid', '_default'): logging.warn("Cannot manually set columns _uid or _default of a row! Given data: {0}".format(data)) continue if not isinstance(val, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]]): raise Exception('Data type mismatch for column {0}. Expected: {1}, got: {2}'.format(key, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]], type(val))) row[key] = val self._table.append(row) self._save() return row['_uid']
[ "def", "insert", "(", "self", ",", "data", ")", ":", "row", "=", "{", "key", ":", "self", ".", "_default_entry", "for", "key", "in", "self", ".", "_headers", "}", "row", "[", "'_uid'", "]", "=", "self", ".", "_get_new_uid", "(", ")", "for", "key", ...
Insert a row into the .csv file. Parameters ---------- data : :obj:`dict` A dictionary mapping keys (header strings) to values. Returns ------- int The UID for the new row. Raises ------ Exception If the value for a given header is not of the appropriate type.
[ "Insert", "a", "row", "into", "the", ".", "csv", "file", "." ]
python
train
crazy-canux/arguspy
scripts/check_mssql.py
https://github.com/crazy-canux/arguspy/blob/e9486b5df61978a990d56bf43de35f3a4cdefcc3/scripts/check_mssql.py#L475-L485
def main(): """Register your own mode and handle method here.""" plugin = Register() if plugin.args.option == 'sql': plugin.sql_handle() elif plugin.args.option == 'database-used': plugin.database_used_handle() elif plugin.args.option == 'databaselog-used': plugin.database_log_used_handle() else: plugin.unknown("Unknown actions.")
[ "def", "main", "(", ")", ":", "plugin", "=", "Register", "(", ")", "if", "plugin", ".", "args", ".", "option", "==", "'sql'", ":", "plugin", ".", "sql_handle", "(", ")", "elif", "plugin", ".", "args", ".", "option", "==", "'database-used'", ":", "plu...
Register your own mode and handle method here.
[ "Register", "your", "own", "mode", "and", "handle", "method", "here", "." ]
python
valid
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/query.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/query.py#L165-L206
def select(self, field_paths): """Project documents matching query to a limited set of fields. See :meth:`~.firestore_v1beta1.client.Client.field_path` for more information on **field paths**. If the current query already has a projection set (i.e. has already called :meth:`~.firestore_v1beta1.query.Query.select`), this will overwrite it. Args: field_paths (Iterable[str, ...]): An iterable of field paths (``.``-delimited list of field names) to use as a projection of document fields in the query results. Returns: ~.firestore_v1beta1.query.Query: A "projected" query. Acts as a copy of the current query, modified with the newly added projection. Raises: ValueError: If any ``field_path`` is invalid. """ field_paths = list(field_paths) for field_path in field_paths: field_path_module.split_field_path(field_path) # raises new_projection = query_pb2.StructuredQuery.Projection( fields=[ query_pb2.StructuredQuery.FieldReference(field_path=field_path) for field_path in field_paths ] ) return self.__class__( self._parent, projection=new_projection, field_filters=self._field_filters, orders=self._orders, limit=self._limit, offset=self._offset, start_at=self._start_at, end_at=self._end_at, )
[ "def", "select", "(", "self", ",", "field_paths", ")", ":", "field_paths", "=", "list", "(", "field_paths", ")", "for", "field_path", "in", "field_paths", ":", "field_path_module", ".", "split_field_path", "(", "field_path", ")", "# raises", "new_projection", "=...
Project documents matching query to a limited set of fields. See :meth:`~.firestore_v1beta1.client.Client.field_path` for more information on **field paths**. If the current query already has a projection set (i.e. has already called :meth:`~.firestore_v1beta1.query.Query.select`), this will overwrite it. Args: field_paths (Iterable[str, ...]): An iterable of field paths (``.``-delimited list of field names) to use as a projection of document fields in the query results. Returns: ~.firestore_v1beta1.query.Query: A "projected" query. Acts as a copy of the current query, modified with the newly added projection. Raises: ValueError: If any ``field_path`` is invalid.
[ "Project", "documents", "matching", "query", "to", "a", "limited", "set", "of", "fields", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/fc_sans/san_managers.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/fc_sans/san_managers.py#L161-L173
def get_by_name(self, name): """ Gets a SAN Manager by name. Args: name: Name of the SAN Manager Returns: dict: SAN Manager. """ san_managers = self._client.get_all() result = [x for x in san_managers if x['name'] == name] return result[0] if result else None
[ "def", "get_by_name", "(", "self", ",", "name", ")", ":", "san_managers", "=", "self", ".", "_client", ".", "get_all", "(", ")", "result", "=", "[", "x", "for", "x", "in", "san_managers", "if", "x", "[", "'name'", "]", "==", "name", "]", "return", ...
Gets a SAN Manager by name. Args: name: Name of the SAN Manager Returns: dict: SAN Manager.
[ "Gets", "a", "SAN", "Manager", "by", "name", "." ]
python
train
vtkiorg/vtki
vtki/filters.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/filters.py#L897-L911
def triangulate(dataset): """ Returns an all triangle mesh. More complex polygons will be broken down into triangles. Returns ------- mesh : vtki.UnstructuredGrid Mesh containing only triangles. """ alg = vtk.vtkDataSetTriangleFilter() alg.SetInputData(dataset) alg.Update() return _get_output(alg)
[ "def", "triangulate", "(", "dataset", ")", ":", "alg", "=", "vtk", ".", "vtkDataSetTriangleFilter", "(", ")", "alg", ".", "SetInputData", "(", "dataset", ")", "alg", ".", "Update", "(", ")", "return", "_get_output", "(", "alg", ")" ]
Returns an all triangle mesh. More complex polygons will be broken down into triangles. Returns ------- mesh : vtki.UnstructuredGrid Mesh containing only triangles.
[ "Returns", "an", "all", "triangle", "mesh", ".", "More", "complex", "polygons", "will", "be", "broken", "down", "into", "triangles", "." ]
python
train
ioos/cc-plugin-ncei
cc_plugin_ncei/ncei_base.py
https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_base.py#L92-L115
def _check_min_max_range(self, var, test_ctx): """ Checks that either both valid_min and valid_max exist, or valid_range exists. """ if 'valid_range' in var.ncattrs(): test_ctx.assert_true(var.valid_range.dtype == var.dtype and len(var.valid_range) == 2 and var.valid_range[0] <= var.valid_range[1], "valid_range must be a two element vector of min followed by max with the same data type as {}".format(var.name) ) else: for bound in ('valid_min', 'valid_max'): v_bound = getattr(var, bound, '') warn_msg = '{} attribute should exist, have the same type as {}, and not be empty or valid_range should be defined'.format(bound, var.name) # need to special case str attributes since they aren't directly # comparable to numpy dtypes if isinstance(v_bound, six.string_types): test_ctx.assert_true(v_bound != '' and var.dtype.char == 'S', warn_msg) # otherwise compare the numpy types directly else: test_ctx.assert_true(v_bound.dtype == var.dtype, warn_msg) return test_ctx
[ "def", "_check_min_max_range", "(", "self", ",", "var", ",", "test_ctx", ")", ":", "if", "'valid_range'", "in", "var", ".", "ncattrs", "(", ")", ":", "test_ctx", ".", "assert_true", "(", "var", ".", "valid_range", ".", "dtype", "==", "var", ".", "dtype",...
Checks that either both valid_min and valid_max exist, or valid_range exists.
[ "Checks", "that", "either", "both", "valid_min", "and", "valid_max", "exist", "or", "valid_range", "exists", "." ]
python
train
abilian/abilian-core
abilian/i18n.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/i18n.py#L323-L343
def get_template_i18n(template_name, locale): """Build template list with preceding locale if found.""" if locale is None: return [template_name] template_list = [] parts = template_name.rsplit(".", 1) root = parts[0] suffix = parts[1] if locale.territory is not None: locale_string = "_".join([locale.language, locale.territory]) localized_template_path = ".".join([root, locale_string, suffix]) template_list.append(localized_template_path) localized_template_path = ".".join([root, locale.language, suffix]) template_list.append(localized_template_path) # append the default template_list.append(template_name) return template_list
[ "def", "get_template_i18n", "(", "template_name", ",", "locale", ")", ":", "if", "locale", "is", "None", ":", "return", "[", "template_name", "]", "template_list", "=", "[", "]", "parts", "=", "template_name", ".", "rsplit", "(", "\".\"", ",", "1", ")", ...
Build template list with preceding locale if found.
[ "Build", "template", "list", "with", "preceding", "locale", "if", "found", "." ]
python
train
edx/XBlock
xblock/core.py
https://github.com/edx/XBlock/blob/368bf46e2c0ee69bbb21817f428c4684936e18ee/xblock/core.py#L157-L174
def load_tagged_classes(cls, tag, fail_silently=True): """ Produce a sequence of all XBlock classes tagged with `tag`. fail_silently causes the code to simply log warnings if a plugin cannot import. The goal is to be able to use part of libraries from an XBlock (and thus have it installed), even if the overall XBlock cannot be used (e.g. depends on Django in a non-Django application). There is diagreement about whether this is a good idea, or whether we should see failures early (e.g. on startup or first page load), and in what contexts. Hence, the flag. """ # Allow this method to access the `_class_tags` # pylint: disable=W0212 for name, class_ in cls.load_classes(fail_silently): if tag in class_._class_tags: yield name, class_
[ "def", "load_tagged_classes", "(", "cls", ",", "tag", ",", "fail_silently", "=", "True", ")", ":", "# Allow this method to access the `_class_tags`", "# pylint: disable=W0212", "for", "name", ",", "class_", "in", "cls", ".", "load_classes", "(", "fail_silently", ")", ...
Produce a sequence of all XBlock classes tagged with `tag`. fail_silently causes the code to simply log warnings if a plugin cannot import. The goal is to be able to use part of libraries from an XBlock (and thus have it installed), even if the overall XBlock cannot be used (e.g. depends on Django in a non-Django application). There is diagreement about whether this is a good idea, or whether we should see failures early (e.g. on startup or first page load), and in what contexts. Hence, the flag.
[ "Produce", "a", "sequence", "of", "all", "XBlock", "classes", "tagged", "with", "tag", "." ]
python
train
pudo/jsongraph
jsongraph/context.py
https://github.com/pudo/jsongraph/blob/35e4f397dbe69cd5553cf9cb9ab98859c3620f03/jsongraph/context.py#L29-L36
def add(self, schema, data): """ Stage ``data`` as a set of statements, based on the given ``schema`` definition. """ binding = self.get_binding(schema, data) uri, triples = triplify(binding) for triple in triples: self.graph.add(triple) return uri
[ "def", "add", "(", "self", ",", "schema", ",", "data", ")", ":", "binding", "=", "self", ".", "get_binding", "(", "schema", ",", "data", ")", "uri", ",", "triples", "=", "triplify", "(", "binding", ")", "for", "triple", "in", "triples", ":", "self", ...
Stage ``data`` as a set of statements, based on the given ``schema`` definition.
[ "Stage", "data", "as", "a", "set", "of", "statements", "based", "on", "the", "given", "schema", "definition", "." ]
python
train
thespacedoctor/astrocalc
astrocalc/coords/unit_conversion.py
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/coords/unit_conversion.py#L451-L503
def ra_dec_to_cartesian( self, ra, dec): """*Convert an RA, DEC coordinate set to x, y, z cartesian coordinates* **Key Arguments:** - ``ra`` -- right ascension in sexegesimal or decimal degress. - ``dec`` -- declination in sexegesimal or decimal degress. **Return:** - ``cartesians`` -- tuple of (x, y, z) coordinates .. todo:: - replace calculate_cartesians in all code **Usage:** .. code-block:: python from astrocalc.coords import unit_conversion converter = unit_conversion( log=log ) x, y, z = converter.ra_dec_to_cartesian( ra="23 45 21.23232", dec="+01:58:5.45341" ) print x, y, z # OUTPUT: 0.9973699780687104, -0.06382462462791459, 0.034344492110465606 """ self.log.info('starting the ``ra_dec_to_cartesian`` method') ra = self.ra_sexegesimal_to_decimal( ra=ra ) dec = self.dec_sexegesimal_to_decimal( dec=dec ) ra = math.radians(ra) dec = math.radians(dec) cos_dec = math.cos(dec) cx = math.cos(ra) * cos_dec cy = math.sin(ra) * cos_dec cz = math.sin(dec) cartesians = (cx, cy, cz) self.log.info('completed the ``ra_dec_to_cartesian`` method') return cartesians
[ "def", "ra_dec_to_cartesian", "(", "self", ",", "ra", ",", "dec", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``ra_dec_to_cartesian`` method'", ")", "ra", "=", "self", ".", "ra_sexegesimal_to_decimal", "(", "ra", "=", "ra", ")", "dec", "="...
*Convert an RA, DEC coordinate set to x, y, z cartesian coordinates* **Key Arguments:** - ``ra`` -- right ascension in sexegesimal or decimal degress. - ``dec`` -- declination in sexegesimal or decimal degress. **Return:** - ``cartesians`` -- tuple of (x, y, z) coordinates .. todo:: - replace calculate_cartesians in all code **Usage:** .. code-block:: python from astrocalc.coords import unit_conversion converter = unit_conversion( log=log ) x, y, z = converter.ra_dec_to_cartesian( ra="23 45 21.23232", dec="+01:58:5.45341" ) print x, y, z # OUTPUT: 0.9973699780687104, -0.06382462462791459, 0.034344492110465606
[ "*", "Convert", "an", "RA", "DEC", "coordinate", "set", "to", "x", "y", "z", "cartesian", "coordinates", "*" ]
python
train
JensRantil/rewind
rewind/server/eventstores.py
https://github.com/JensRantil/rewind/blob/7f645d20186c1db55cfe53a0310c9fd6292f91ea/rewind/server/eventstores.py#L803-L823
def _find_batch_containing_event(self, uuid): """Find the batch number that contains a certain event. Parameters: uuid -- the event uuid to search for. returns -- a batch number, or None if not found. """ if self.estore.key_exists(uuid): # Reusing already opened DB if possible return self.batchno else: for batchno in range(self.batchno - 1, -1, -1): # Iterating backwards here because we are more likely to find # the event in an later archive, than earlier. db = self._open_event_store(batchno) with contextlib.closing(db): if db.key_exists(uuid): return batchno return None
[ "def", "_find_batch_containing_event", "(", "self", ",", "uuid", ")", ":", "if", "self", ".", "estore", ".", "key_exists", "(", "uuid", ")", ":", "# Reusing already opened DB if possible", "return", "self", ".", "batchno", "else", ":", "for", "batchno", "in", ...
Find the batch number that contains a certain event. Parameters: uuid -- the event uuid to search for. returns -- a batch number, or None if not found.
[ "Find", "the", "batch", "number", "that", "contains", "a", "certain", "event", "." ]
python
train
digmore/pypushed
pushed/pushed.py
https://github.com/digmore/pypushed/blob/4240fc27323b89d59f0c652dcea4b65f78437c5b/pushed/pushed.py#L24-L35
def push_app(self, content, content_url=None): '''Push a notification to a Pushed application. Param: content -> content of Pushed notification message content_url (optional) -> enrich message with URL Returns Shipment ID as string ''' parameters = { 'app_key': self.app_key, 'app_secret': self.app_secret } return self._push(content, 'app', parameters, content_url)
[ "def", "push_app", "(", "self", ",", "content", ",", "content_url", "=", "None", ")", ":", "parameters", "=", "{", "'app_key'", ":", "self", ".", "app_key", ",", "'app_secret'", ":", "self", ".", "app_secret", "}", "return", "self", ".", "_push", "(", ...
Push a notification to a Pushed application. Param: content -> content of Pushed notification message content_url (optional) -> enrich message with URL Returns Shipment ID as string
[ "Push", "a", "notification", "to", "a", "Pushed", "application", "." ]
python
train
mrstephenneal/mysql-toolkit
mysql/toolkit/components/connector.py
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/connector.py#L74-L87
def _connect(self, config): """Establish a connection with a MySQL database.""" if 'connection_timeout' not in self._config: self._config['connection_timeout'] = 480 try: self._cnx = connect(**config) self._cursor = self._cnx.cursor() self._printer('\tMySQL DB connection established with db', config['database']) except Error as err: if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: print("Something is wrong with your user name or password") elif err.errno == errorcode.ER_BAD_DB_ERROR: print("Database does not exist") raise err
[ "def", "_connect", "(", "self", ",", "config", ")", ":", "if", "'connection_timeout'", "not", "in", "self", ".", "_config", ":", "self", ".", "_config", "[", "'connection_timeout'", "]", "=", "480", "try", ":", "self", ".", "_cnx", "=", "connect", "(", ...
Establish a connection with a MySQL database.
[ "Establish", "a", "connection", "with", "a", "MySQL", "database", "." ]
python
train
pantsbuild/pants
src/python/pants/backend/jvm/tasks/run_jvm_prep_command.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/run_jvm_prep_command.py#L50-L57
def prepare(cls, options, round_manager): """ :API: public """ super(RunJvmPrepCommandBase, cls).prepare(options, round_manager) round_manager.require_data('compile_classpath') if not cls.classpath_product_only: round_manager.require_data('runtime_classpath')
[ "def", "prepare", "(", "cls", ",", "options", ",", "round_manager", ")", ":", "super", "(", "RunJvmPrepCommandBase", ",", "cls", ")", ".", "prepare", "(", "options", ",", "round_manager", ")", "round_manager", ".", "require_data", "(", "'compile_classpath'", "...
:API: public
[ ":", "API", ":", "public" ]
python
train
senaite/senaite.core
bika/lims/browser/publish/emailview.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/publish/emailview.py#L91-L97
def fail(self, message, status=500, **kw): """Set a JSON error object and a status to the response """ self.request.response.setStatus(status) result = {"success": False, "errors": message, "status": status} result.update(kw) return result
[ "def", "fail", "(", "self", ",", "message", ",", "status", "=", "500", ",", "*", "*", "kw", ")", ":", "self", ".", "request", ".", "response", ".", "setStatus", "(", "status", ")", "result", "=", "{", "\"success\"", ":", "False", ",", "\"errors\"", ...
Set a JSON error object and a status to the response
[ "Set", "a", "JSON", "error", "object", "and", "a", "status", "to", "the", "response" ]
python
train
nicodv/kmodes
kmodes/kmodes.py
https://github.com/nicodv/kmodes/blob/cdb19fe5448aba1bf501626694bb52e68eafab45/kmodes/kmodes.py#L21-L50
def init_huang(X, n_clusters, dissim, random_state): """Initialize centroids according to method by Huang [1997].""" n_attrs = X.shape[1] centroids = np.empty((n_clusters, n_attrs), dtype='object') # determine frequencies of attributes for iattr in range(n_attrs): freq = defaultdict(int) for curattr in X[:, iattr]: freq[curattr] += 1 # Sample centroids using the probabilities of attributes. # (I assume that's what's meant in the Huang [1998] paper; it works, # at least) # Note: sampling using population in static list with as many choices # as frequency counts. Since the counts are small integers, # memory consumption is low. choices = [chc for chc, wght in freq.items() for _ in range(wght)] # So that we are consistent between Python versions, # each with different dict ordering. choices = sorted(choices) centroids[:, iattr] = random_state.choice(choices, n_clusters) # The previously chosen centroids could result in empty clusters, # so set centroid to closest point in X. for ik in range(n_clusters): ndx = np.argsort(dissim(X, centroids[ik])) # We want the centroid to be unique, if possible. while np.all(X[ndx[0]] == centroids, axis=1).any() and ndx.shape[0] > 1: ndx = np.delete(ndx, 0) centroids[ik] = X[ndx[0]] return centroids
[ "def", "init_huang", "(", "X", ",", "n_clusters", ",", "dissim", ",", "random_state", ")", ":", "n_attrs", "=", "X", ".", "shape", "[", "1", "]", "centroids", "=", "np", ".", "empty", "(", "(", "n_clusters", ",", "n_attrs", ")", ",", "dtype", "=", ...
Initialize centroids according to method by Huang [1997].
[ "Initialize", "centroids", "according", "to", "method", "by", "Huang", "[", "1997", "]", "." ]
python
train
django-danceschool/django-danceschool
danceschool/financial/helpers.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/financial/helpers.py#L280-L508
def createExpenseItemsForEvents(request=None, datetimeTuple=None, rule=None, event=None): ''' For each StaffMember-related Repeated Expense Rule, look for EventStaffMember instances in the designated time window that do not already have expenses associated with them. For hourly rental expenses, then generate new expenses that are associated with this rule. For non-hourly expenses, generate new expenses based on the non-overlapping intervals of days, weeks or months for which there is not already an ExpenseItem associated with the rule in question. ''' # This is used repeatedly, so it is put at the top submissionUser = getattr(request, 'user', None) # Return the number of new expense items created generate_count = 0 # First, construct the set of rules that need to be checked for affiliated events rule_filters = Q(disabled=False) & Q(rentalRate__gt=0) & \ Q(Q(staffmemberwageinfo__isnull=False) | Q(staffdefaultwage__isnull=False)) if rule: rule_filters = rule_filters & Q(id=rule.id) rulesToCheck = RepeatedExpenseRule.objects.filter( rule_filters).distinct().order_by( '-staffmemberwageinfo__category', '-staffdefaultwage__category' ) # These are the filters placed on Events that overlap the window in which # expenses are being generated. event_timefilters = Q() if datetimeTuple and len(datetimeTuple) == 2: timelist = list(datetimeTuple) timelist.sort() event_timefilters = event_timefilters & ( Q(event__startTime__gte=timelist[0]) & Q(event__startTime__lte=timelist[1]) ) if event: event_timefilters = event_timefilters & Q(event__id=event.id) # Now, we loop through the set of rules that need to be applied, then loop # through the Events in the window in question that involved the staff # member indicated by the rule. for rule in rulesToCheck: staffMember = getattr(rule, 'staffMember', None) staffCategory = getattr(rule, 'category', None) # No need to continue if expenses are not to be generated if ( (not staffMember and not staffCategory) or ( not staffMember and not getConstant('financial__autoGenerateFromStaffCategoryDefaults') ) ): continue # For construction of expense descriptions replacements = { 'type': _('Staff'), 'to': _('payment to'), 'for': _('for'), } # This is the generic category for all Event staff, but it may be overridden below expense_category = getConstant('financial__otherStaffExpenseCat') if staffCategory: if staffMember: # This staff member in this category eventstaff_filter = Q(staffMember=staffMember) & Q(category=staffCategory) elif getConstant('financial__autoGenerateFromStaffCategoryDefaults'): # Any staff member who does not already have a rule specified this category eventstaff_filter = ( Q(category=staffCategory) & ~Q(staffMember__expenserules__category=staffCategory) ) replacements['type'] = staffCategory.name # For standard categories of staff, map the EventStaffCategory to # an ExpenseCategory using the stored constants. Otherwise, the # ExpenseCategory is a generic one. if staffCategory == getConstant('general__eventStaffCategoryAssistant'): expense_category = getConstant('financial__assistantClassInstructionExpenseCat') elif staffCategory in [ getConstant('general__eventStaffCategoryInstructor'), getConstant('general__eventStaffCategorySubstitute') ]: expense_category = getConstant('financial__classInstructionExpenseCat') else: # We don't want to generate duplicate expenses when there is both a category-limited # rule and a non-limited rule for the same person, so we have to construct the list # of categories that are to be excluded if no category is specified by this rule. coveredCategories = list(staffMember.expenserules.filter( category__isnull=False).values_list('category__id', flat=True)) eventstaff_filter = Q(staffMember=staffMember) & ~Q(category__id__in=coveredCategories) if rule.advanceDays is not None: if rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.end: event_timefilters = event_timefilters & Q( event__endTime__lte=timezone.now() + timedelta(days=rule.advanceDays) ) elif rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.start: event_timefilters = event_timefilters & Q( event__startTime__lte=timezone.now() + timedelta(days=rule.advanceDays) ) if rule.priorDays is not None: if rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.end: event_timefilters = event_timefilters & Q( event__endTime__gte=timezone.now() - timedelta(days=rule.priorDays) ) elif rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.start: event_timefilters = event_timefilters & Q( event__startTime__gte=timezone.now() - timedelta(days=rule.priorDays) ) if rule.startDate: event_timefilters = event_timefilters & Q(event__startTime__gte=timezone.now().replace( year=rule.startDate.year, month=rule.startDate.month, day=rule.startDate.day, hour=0, minute=0, second=0, microsecond=0, )) if rule.endDate: event_timefilters = event_timefilters & Q(event__startTime__lte=timezone.now().replace( year=rule.endDate.year, month=rule.endDate.month, day=rule.endDate.day, hour=0, minute=0, second=0, microsecond=0, )) # Loop through EventStaffMembers for which there are not already # directly allocated expenses under this rule, and create new # ExpenseItems for them depending on whether the rule requires hourly # expenses or non-hourly ones to be generated. staffers = EventStaffMember.objects.filter(eventstaff_filter & event_timefilters).exclude( Q(event__expenseitem__expenseRule=rule)).distinct() if rule.applyRateRule == rule.RateRuleChoices.hourly: for staffer in staffers: # Hourly expenses are always generated without checking for # overlapping windows, because the periods over which hourly # expenses are defined are disjoint. However, hourly expenses # are allocated directly to events, so we just need to create # expenses for any events that do not already have an Expense # Item generate under this rule. replacements['event'] = staffer.event.name replacements['name'] = staffer.staffMember.fullName replacements['dates'] = staffer.event.startTime.strftime('%Y-%m-%d') if ( staffer.event.startTime.strftime('%Y-%m-%d') != staffer.event.endTime.strftime('%Y-%m-%d') ): replacements['dates'] += ' %s %s' % ( _('to'), staffer.event.endTime.strftime('%Y-%m-%d') ) # Find or create the TransactionParty associated with the staff member. staffer_party = TransactionParty.objects.get_or_create( staffMember=staffer.staffMember, defaults={ 'name': staffer.staffMember.fullName, 'user': getattr(staffer.staffMember, 'userAccount', None) } )[0] params = { 'event': staffer.event, 'category': expense_category, 'expenseRule': rule, 'description': '%(type)s %(to)s %(name)s %(for)s: %(event)s, %(dates)s' % \ replacements, 'submissionUser': submissionUser, 'hours': staffer.netHours, 'wageRate': rule.rentalRate, 'total': staffer.netHours * rule.rentalRate, 'accrualDate': staffer.event.startTime, 'payTo': staffer_party, } ExpenseItem.objects.create(**params) generate_count += 1 else: # Non-hourly expenses are generated by constructing the time # intervals in which the occurrence occurs, and removing from that # interval any intervals in which an expense has already been # generated under this rule (so, for example, monthly rentals will # now show up multiple times). So, we just need to construct the set # of intervals for which to construct expenses. We first need to # split the set of EventStaffMember objects by StaffMember (in case # this rule is not person-specific) and then run this provedure # separated by StaffMember. members = StaffMember.objects.filter(eventstaffmember__in=staffers) for member in members: events = [x.event for x in staffers.filter(staffMember=member)] # Find or create the TransactionParty associated with the staff member. staffer_party = TransactionParty.objects.get_or_create( staffMember=member, defaults={ 'name': member.fullName, 'user': getattr(member, 'userAccount', None) } )[0] intervals = [ (x.localStartTime, x.localEndTime) for x in EventOccurrence.objects.filter(event__in=events) ] remaining_intervals = rule.getWindowsAndTotals(intervals) for startTime, endTime, total, description in remaining_intervals: replacements['when'] = description replacements['name'] = member.fullName params = { 'category': expense_category, 'expenseRule': rule, 'periodStart': startTime, 'periodEnd': endTime, 'description': '%(type)s %(to)s %(name)s %(for)s %(when)s' % replacements, 'submissionUser': submissionUser, 'total': total, 'accrualDate': startTime, 'payTo': staffer_party, } ExpenseItem.objects.create(**params) generate_count += 1 rulesToCheck.update(lastRun=timezone.now()) return generate_count
[ "def", "createExpenseItemsForEvents", "(", "request", "=", "None", ",", "datetimeTuple", "=", "None", ",", "rule", "=", "None", ",", "event", "=", "None", ")", ":", "# This is used repeatedly, so it is put at the top\r", "submissionUser", "=", "getattr", "(", "reque...
For each StaffMember-related Repeated Expense Rule, look for EventStaffMember instances in the designated time window that do not already have expenses associated with them. For hourly rental expenses, then generate new expenses that are associated with this rule. For non-hourly expenses, generate new expenses based on the non-overlapping intervals of days, weeks or months for which there is not already an ExpenseItem associated with the rule in question.
[ "For", "each", "StaffMember", "-", "related", "Repeated", "Expense", "Rule", "look", "for", "EventStaffMember", "instances", "in", "the", "designated", "time", "window", "that", "do", "not", "already", "have", "expenses", "associated", "with", "them", ".", "For"...
python
train
stevelittlefish/littlefish
littlefish/viewutil.py
https://github.com/stevelittlefish/littlefish/blob/6deee7f81fab30716c743efe2e94e786c6e17016/littlefish/viewutil.py#L15-L63
def internal_error(exception, template_path, is_admin, db=None): """ Render an "internal error" page. The following variables will be populated when rendering the template: title: The page title message: The body of the error message to display to the user preformat: Boolean stating whether to wrap the error message in a pre As well as rendering the error message to the user, this will also log the exception :param exception: The exception that was caught :param template_path: The template to render (i.e. "main/error.html") :param is_admin: Can the logged in user always view detailed error reports? :param db: The Flask-SQLAlchemy instance :return: Flask Response """ if db: try: db.session.rollback() except: # noqa: E722 pass title = str(exception) message = traceback.format_exc() preformat = True log.error('Exception caught: {}\n{}'.format(title, message)) if current_app.config.get('TEST_MODE'): show_detailed_error = True message = 'Note: You are seeing this error message because the server is in test mode.\n\n{}'.format(message) elif is_admin: show_detailed_error = True message = 'Note: You are seeing this error message because you are a member of staff.\n\n{}'.format(message) else: title = '500 Internal Server Error' message = 'Something went wrong while processing your request.' preformat = False show_detailed_error = False try: return render_template(template_path, title=title, message=message, preformat=preformat, exception=exception, is_admin=is_admin, show_detailed_error=show_detailed_error), 500 except: # noqa: E722 log.exception('Error rendering error page!') return '500 Internal Server Error', 500
[ "def", "internal_error", "(", "exception", ",", "template_path", ",", "is_admin", ",", "db", "=", "None", ")", ":", "if", "db", ":", "try", ":", "db", ".", "session", ".", "rollback", "(", ")", "except", ":", "# noqa: E722", "pass", "title", "=", "str"...
Render an "internal error" page. The following variables will be populated when rendering the template: title: The page title message: The body of the error message to display to the user preformat: Boolean stating whether to wrap the error message in a pre As well as rendering the error message to the user, this will also log the exception :param exception: The exception that was caught :param template_path: The template to render (i.e. "main/error.html") :param is_admin: Can the logged in user always view detailed error reports? :param db: The Flask-SQLAlchemy instance :return: Flask Response
[ "Render", "an", "internal", "error", "page", ".", "The", "following", "variables", "will", "be", "populated", "when", "rendering", "the", "template", ":", "title", ":", "The", "page", "title", "message", ":", "The", "body", "of", "the", "error", "message", ...
python
test
ellethee/argparseinator
argparseinator/utils.py
https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/utils.py#L228-L247
def get_shared(func): """ return shared. """ shared = [] if not hasattr(func, '__cls__'): return shared if not hasattr(func.__cls__, '__shared_arguments__'): return shared if hasattr(func, '__no_share__'): if func.__no_share__ is True: return shared else: shared += [ s for s in func.__cls__.__shared_arguments__ if (s[0][-1].replace('--', '').replace('-', '_')) not in func.__no_share__] else: shared = func.__cls__.__shared_arguments__ return shared
[ "def", "get_shared", "(", "func", ")", ":", "shared", "=", "[", "]", "if", "not", "hasattr", "(", "func", ",", "'__cls__'", ")", ":", "return", "shared", "if", "not", "hasattr", "(", "func", ".", "__cls__", ",", "'__shared_arguments__'", ")", ":", "ret...
return shared.
[ "return", "shared", "." ]
python
train
echonest/pyechonest
pyechonest/catalog.py
https://github.com/echonest/pyechonest/blob/d8c7af6c1da699b50b2f4b1bd3c0febe72e7f1ee/pyechonest/catalog.py#L293-L331
def get_feed(self, buckets=None, since=None, results=15, start=0): """ Returns feed (news, blogs, reviews, audio, video) for the catalog artists; response depends on requested buckets Args: Kwargs: buckets (list): A list of strings specifying which feed items to retrieve results (int): An integer number of results to return start (int): An integer starting value for the result set Returns: A list of news, blogs, reviews, audio or video document dicts; Example: >>> c <catalog - my_artists> >>> c.get_feed(results=15) {u'date_found': u'2011-02-06T07:50:25', u'date_posted': u'2011-02-06T07:50:23', u'id': u'caec686c0dff361e4c53dceb58fb9d2f', u'name': u'Linkin Park \u2013 \u201cWaiting For The End\u201d + \u201cWhen They Come For Me\u201d 2/5 SNL', u'references': [{u'artist_id': u'ARQUMH41187B9AF699', u'artist_name': u'Linkin Park'}], u'summary': u'<span>Linkin</span> <span>Park</span> performed "Waiting For The End" and "When They Come For Me" on Saturday Night Live. Watch the videos below and pick up their album A Thousand Suns on iTunes, Amazon MP3, CD Social Bookmarking ... ', u'type': u'blogs', u'url': u'http://theaudioperv.com/2011/02/06/linkin-park-waiting-for-the-end-when-they-come-for-me-25-snl/'} >>> """ kwargs = {} kwargs['bucket'] = buckets or [] if since: kwargs['since']=since response = self.get_attribute("feed", results=results, start=start, **kwargs) rval = ResultList(response['feed']) return rval
[ "def", "get_feed", "(", "self", ",", "buckets", "=", "None", ",", "since", "=", "None", ",", "results", "=", "15", ",", "start", "=", "0", ")", ":", "kwargs", "=", "{", "}", "kwargs", "[", "'bucket'", "]", "=", "buckets", "or", "[", "]", "if", ...
Returns feed (news, blogs, reviews, audio, video) for the catalog artists; response depends on requested buckets Args: Kwargs: buckets (list): A list of strings specifying which feed items to retrieve results (int): An integer number of results to return start (int): An integer starting value for the result set Returns: A list of news, blogs, reviews, audio or video document dicts; Example: >>> c <catalog - my_artists> >>> c.get_feed(results=15) {u'date_found': u'2011-02-06T07:50:25', u'date_posted': u'2011-02-06T07:50:23', u'id': u'caec686c0dff361e4c53dceb58fb9d2f', u'name': u'Linkin Park \u2013 \u201cWaiting For The End\u201d + \u201cWhen They Come For Me\u201d 2/5 SNL', u'references': [{u'artist_id': u'ARQUMH41187B9AF699', u'artist_name': u'Linkin Park'}], u'summary': u'<span>Linkin</span> <span>Park</span> performed "Waiting For The End" and "When They Come For Me" on Saturday Night Live. Watch the videos below and pick up their album A Thousand Suns on iTunes, Amazon MP3, CD Social Bookmarking ... ', u'type': u'blogs', u'url': u'http://theaudioperv.com/2011/02/06/linkin-park-waiting-for-the-end-when-they-come-for-me-25-snl/'} >>>
[ "Returns", "feed", "(", "news", "blogs", "reviews", "audio", "video", ")", "for", "the", "catalog", "artists", ";", "response", "depends", "on", "requested", "buckets" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/transforms/linear.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/transforms/linear.py#L223-L242
def from_mapping(cls, x0, x1): """ Create an STTransform from the given mapping See `set_mapping` for details. Parameters ---------- x0 : array-like Start. x1 : array-like End. Returns ------- t : instance of STTransform The transform. """ t = cls() t.set_mapping(x0, x1) return t
[ "def", "from_mapping", "(", "cls", ",", "x0", ",", "x1", ")", ":", "t", "=", "cls", "(", ")", "t", ".", "set_mapping", "(", "x0", ",", "x1", ")", "return", "t" ]
Create an STTransform from the given mapping See `set_mapping` for details. Parameters ---------- x0 : array-like Start. x1 : array-like End. Returns ------- t : instance of STTransform The transform.
[ "Create", "an", "STTransform", "from", "the", "given", "mapping" ]
python
train
stephrdev/django-formwizard
formwizard/views.py
https://github.com/stephrdev/django-formwizard/blob/7b35165f0340aae4e8302d5b05b0cb443f6c9904/formwizard/views.py#L463-L475
def get_next_step(self, step=None): """ Returns the next step after the given `step`. If no more steps are available, None will be returned. If the `step` argument is None, the current step will be determined automatically. """ if step is None: step = self.steps.current form_list = self.get_form_list() key = form_list.keyOrder.index(step) + 1 if len(form_list.keyOrder) > key: return form_list.keyOrder[key] return None
[ "def", "get_next_step", "(", "self", ",", "step", "=", "None", ")", ":", "if", "step", "is", "None", ":", "step", "=", "self", ".", "steps", ".", "current", "form_list", "=", "self", ".", "get_form_list", "(", ")", "key", "=", "form_list", ".", "keyO...
Returns the next step after the given `step`. If no more steps are available, None will be returned. If the `step` argument is None, the current step will be determined automatically.
[ "Returns", "the", "next", "step", "after", "the", "given", "step", ".", "If", "no", "more", "steps", "are", "available", "None", "will", "be", "returned", ".", "If", "the", "step", "argument", "is", "None", "the", "current", "step", "will", "be", "determ...
python
train
quodlibet/mutagen
mutagen/easymp4.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/easymp4.py#L83-L101
def RegisterTextKey(cls, key, atomid): """Register a text key. If the key you need to register is a simple one-to-one mapping of MP4 atom name to EasyMP4Tags key, then you can use this function:: EasyMP4Tags.RegisterTextKey("artist", "\xa9ART") """ def getter(tags, key): return tags[atomid] def setter(tags, key, value): tags[atomid] = value def deleter(tags, key): del(tags[atomid]) cls.RegisterKey(key, getter, setter, deleter)
[ "def", "RegisterTextKey", "(", "cls", ",", "key", ",", "atomid", ")", ":", "def", "getter", "(", "tags", ",", "key", ")", ":", "return", "tags", "[", "atomid", "]", "def", "setter", "(", "tags", ",", "key", ",", "value", ")", ":", "tags", "[", "a...
Register a text key. If the key you need to register is a simple one-to-one mapping of MP4 atom name to EasyMP4Tags key, then you can use this function:: EasyMP4Tags.RegisterTextKey("artist", "\xa9ART")
[ "Register", "a", "text", "key", "." ]
python
train
saltstack/salt
salt/key.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/key.py#L314-L325
def _check_minions_directories(self): ''' Return the minion keys directory paths ''' minions_accepted = os.path.join(self.opts['pki_dir'], self.ACC) minions_pre = os.path.join(self.opts['pki_dir'], self.PEND) minions_rejected = os.path.join(self.opts['pki_dir'], self.REJ) minions_denied = os.path.join(self.opts['pki_dir'], self.DEN) return minions_accepted, minions_pre, minions_rejected, minions_denied
[ "def", "_check_minions_directories", "(", "self", ")", ":", "minions_accepted", "=", "os", ".", "path", ".", "join", "(", "self", ".", "opts", "[", "'pki_dir'", "]", ",", "self", ".", "ACC", ")", "minions_pre", "=", "os", ".", "path", ".", "join", "(",...
Return the minion keys directory paths
[ "Return", "the", "minion", "keys", "directory", "paths" ]
python
train
rq/rq-scheduler
rq_scheduler/scheduler.py
https://github.com/rq/rq-scheduler/blob/ee60c19e42a46ba787f762733a0036aa0cf2f7b7/rq_scheduler/scheduler.py#L60-L66
def register_death(self): """Registers its own death.""" self.log.info('Registering death') with self.connection.pipeline() as p: p.hset(self.scheduler_key, 'death', time.time()) p.expire(self.scheduler_key, 60) p.execute()
[ "def", "register_death", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "'Registering death'", ")", "with", "self", ".", "connection", ".", "pipeline", "(", ")", "as", "p", ":", "p", ".", "hset", "(", "self", ".", "scheduler_key", ",", ...
Registers its own death.
[ "Registers", "its", "own", "death", "." ]
python
train
draperjames/qtpandas
qtpandas/views/BigIntSpinbox.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/views/BigIntSpinbox.py#L57-L72
def setValue(self, value): """setter function to _lineEdit.text. Sets minimum/maximum as new value if value is out of bounds. Args: value (int/long): new value to set. Returns True if all went fine. """ if value >= self.minimum() and value <= self.maximum(): self._lineEdit.setText(str(value)) elif value < self.minimum(): self._lineEdit.setText(str(self.minimum())) elif value > self.maximum(): self._lineEdit.setText(str(self.maximum())) return True
[ "def", "setValue", "(", "self", ",", "value", ")", ":", "if", "value", ">=", "self", ".", "minimum", "(", ")", "and", "value", "<=", "self", ".", "maximum", "(", ")", ":", "self", ".", "_lineEdit", ".", "setText", "(", "str", "(", "value", ")", "...
setter function to _lineEdit.text. Sets minimum/maximum as new value if value is out of bounds. Args: value (int/long): new value to set. Returns True if all went fine.
[ "setter", "function", "to", "_lineEdit", ".", "text", ".", "Sets", "minimum", "/", "maximum", "as", "new", "value", "if", "value", "is", "out", "of", "bounds", "." ]
python
train
click-contrib/click-configfile
tasks/clean.py
https://github.com/click-contrib/click-configfile/blob/a616204cb9944125fd5051556f27a7ccef611e22/tasks/clean.py#L191-L205
def path_glob(pattern, current_dir=None): """Use pathlib for ant-like patterns, like: "**/*.py" :param pattern: File/directory pattern to use (as string). :param current_dir: Current working directory (as Path, pathlib.Path, str) :return Resolved Path (as path.Path). """ if not current_dir: current_dir = pathlib.Path.cwd() elif not isinstance(current_dir, pathlib.Path): # -- CASE: string, path.Path (string-like) current_dir = pathlib.Path(str(current_dir)) for p in current_dir.glob(pattern): yield Path(str(p))
[ "def", "path_glob", "(", "pattern", ",", "current_dir", "=", "None", ")", ":", "if", "not", "current_dir", ":", "current_dir", "=", "pathlib", ".", "Path", ".", "cwd", "(", ")", "elif", "not", "isinstance", "(", "current_dir", ",", "pathlib", ".", "Path"...
Use pathlib for ant-like patterns, like: "**/*.py" :param pattern: File/directory pattern to use (as string). :param current_dir: Current working directory (as Path, pathlib.Path, str) :return Resolved Path (as path.Path).
[ "Use", "pathlib", "for", "ant", "-", "like", "patterns", "like", ":", "**", "/", "*", ".", "py" ]
python
train
seb-m/tss
tss.py
https://github.com/seb-m/tss/blob/ab45176b8585ba6bbbcaeffd21ec0c63f615dce0/tss.py#L253-L307
def reconstruct_secret(shares, strict_mode=True): """ shares must be a container with a sufficient number of well-formatted shares used to reconstruct the secret value. If any share format is invalid a TSSError exception is raised. If strict_mode is False all combinations of shares are tried in order to reconstruct the secret. Otherwise this function raises an exception TSSError on the first error encountered (either a duplicate share was detected or the provided hash value didn't match the one computed from the recovered secret). This function must return the secret value or raise TSSError. """ ref_header = None data_shares = [] for share in shares: share = encode(share) if len(share) < 20: raise TSSError('share format invalid') header = parse_header(share[:20]) if ref_header is None: ref_header = header if header[2] > len(shares): raise TSSError('not enough shares for reconstructing secret') if ref_header != header: raise TSSError('invalid share headers %s' % header) data_share = share[20:] if len(data_share) != header[3]: raise TSSError('invalid share data size %d (expected %d)' % \ (len(data_share), header[3])) data_shares.append(data_share) for combination in itertools.combinations(data_shares, ref_header[2]): secret = bytearray() u = [byte_to_ord(share[0]) for share in combination] if len(dict().fromkeys(u)) != len(u): if strict_mode: raise TSSError('invalid share with duplicate index') else: continue for i in range(1, ref_header[3]): v = [byte_to_ord(share[i]) for share in combination] secret.append(lagrange_interpolation(u, v)) secret = bytes(secret) if ref_header[1] != Hash.NONE: d = Hash.to_func(ref_header[1])() digestsize = digest_size(d) d.update(secret[:-digestsize]) if len(secret) < digestsize or d.digest() != secret[-digestsize:]: if strict_mode: raise TSSError('hash values mismatch') else: continue return secret[:-digestsize] return secret raise TSSError('not enough valid shares for reconstructing the secret')
[ "def", "reconstruct_secret", "(", "shares", ",", "strict_mode", "=", "True", ")", ":", "ref_header", "=", "None", "data_shares", "=", "[", "]", "for", "share", "in", "shares", ":", "share", "=", "encode", "(", "share", ")", "if", "len", "(", "share", "...
shares must be a container with a sufficient number of well-formatted shares used to reconstruct the secret value. If any share format is invalid a TSSError exception is raised. If strict_mode is False all combinations of shares are tried in order to reconstruct the secret. Otherwise this function raises an exception TSSError on the first error encountered (either a duplicate share was detected or the provided hash value didn't match the one computed from the recovered secret). This function must return the secret value or raise TSSError.
[ "shares", "must", "be", "a", "container", "with", "a", "sufficient", "number", "of", "well", "-", "formatted", "shares", "used", "to", "reconstruct", "the", "secret", "value", ".", "If", "any", "share", "format", "is", "invalid", "a", "TSSError", "exception"...
python
train
paramiko/paramiko
paramiko/sftp_handle.py
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/sftp_handle.py#L180-L187
def _get_next_files(self): """ Used by the SFTP server code to retrieve a cached directory listing. """ fnlist = self.__files[:16] self.__files = self.__files[16:] return fnlist
[ "def", "_get_next_files", "(", "self", ")", ":", "fnlist", "=", "self", ".", "__files", "[", ":", "16", "]", "self", ".", "__files", "=", "self", ".", "__files", "[", "16", ":", "]", "return", "fnlist" ]
Used by the SFTP server code to retrieve a cached directory listing.
[ "Used", "by", "the", "SFTP", "server", "code", "to", "retrieve", "a", "cached", "directory", "listing", "." ]
python
train
salu133445/pypianoroll
pypianoroll/utilities.py
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/utilities.py#L154-L167
def parse(filepath, beat_resolution=24, name='unknown'): """ Return a :class:`pypianoroll.Multitrack` object loaded from a MIDI (.mid, .midi, .MID, .MIDI) file. Parameters ---------- filepath : str The file path to the MIDI file. """ if not filepath.endswith(('.mid', '.midi', '.MID', '.MIDI')): raise ValueError("Only MIDI files are supported") return Multitrack(filepath, beat_resolution=beat_resolution, name=name)
[ "def", "parse", "(", "filepath", ",", "beat_resolution", "=", "24", ",", "name", "=", "'unknown'", ")", ":", "if", "not", "filepath", ".", "endswith", "(", "(", "'.mid'", ",", "'.midi'", ",", "'.MID'", ",", "'.MIDI'", ")", ")", ":", "raise", "ValueErro...
Return a :class:`pypianoroll.Multitrack` object loaded from a MIDI (.mid, .midi, .MID, .MIDI) file. Parameters ---------- filepath : str The file path to the MIDI file.
[ "Return", "a", ":", "class", ":", "pypianoroll", ".", "Multitrack", "object", "loaded", "from", "a", "MIDI", "(", ".", "mid", ".", "midi", ".", "MID", ".", "MIDI", ")", "file", "." ]
python
train
shichao-an/twitter-photos
twphotos/photos.py
https://github.com/shichao-an/twitter-photos/blob/32de6e8805edcbb431d08af861e9d2f0ab221106/twphotos/photos.py#L62-L84
def get(self, count=None, since_id=None, silent=False): """ Get all photos from the user or members of the list :param count: Number of tweets to try and retrieve. If None, return all photos since `since_id` :param since_id: An integer specifying the oldest tweet id """ if not silent: print('Retrieving photos from Twitter API...') self.auth_user = self.verify_credentials().screen_name self.since_ids = read_since_ids(self.users) for user in self.users: if self.increment: since_id = self.since_ids.get(user) photos = self.load(user=user, count=count, since_id=since_id, num=self.num) self.photos[user] = photos[:self.num] self._total += len(self.photos[user]) if not photos and user in self.max_ids: del self.max_ids[user] return self.photos
[ "def", "get", "(", "self", ",", "count", "=", "None", ",", "since_id", "=", "None", ",", "silent", "=", "False", ")", ":", "if", "not", "silent", ":", "print", "(", "'Retrieving photos from Twitter API...'", ")", "self", ".", "auth_user", "=", "self", "....
Get all photos from the user or members of the list :param count: Number of tweets to try and retrieve. If None, return all photos since `since_id` :param since_id: An integer specifying the oldest tweet id
[ "Get", "all", "photos", "from", "the", "user", "or", "members", "of", "the", "list", ":", "param", "count", ":", "Number", "of", "tweets", "to", "try", "and", "retrieve", ".", "If", "None", "return", "all", "photos", "since", "since_id", ":", "param", ...
python
train
helium/helium-python
helium/resource.py
https://github.com/helium/helium-python/blob/db73480b143da4fc48e95c4414bd69c576a3a390/helium/resource.py#L445-L456
def delete(self): """Delete the resource. Returns: True if the delete is successful. Will throw an error if other errors occur """ session = self._session url = session._build_url(self._resource_path(), self.id) return session.delete(url, CB.boolean(204))
[ "def", "delete", "(", "self", ")", ":", "session", "=", "self", ".", "_session", "url", "=", "session", ".", "_build_url", "(", "self", ".", "_resource_path", "(", ")", ",", "self", ".", "id", ")", "return", "session", ".", "delete", "(", "url", ",",...
Delete the resource. Returns: True if the delete is successful. Will throw an error if other errors occur
[ "Delete", "the", "resource", "." ]
python
train
rembish/cfb
cfb/exceptions.py
https://github.com/rembish/cfb/blob/9bcd75caad4353b186ce518d2da78aeeb52d3131/cfb/exceptions.py#L48-L55
def raise_if(self, exception, message, *args, **kwargs): """ If current exception has smaller priority than minimum, subclass of this class only warns user, otherwise normal exception will be raised. """ if issubclass(exception, self.minimum_defect): raise exception(*args, **kwargs) warn(message, SyntaxWarning, *args, **kwargs)
[ "def", "raise_if", "(", "self", ",", "exception", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "issubclass", "(", "exception", ",", "self", ".", "minimum_defect", ")", ":", "raise", "exception", "(", "*", "args", ",", "*...
If current exception has smaller priority than minimum, subclass of this class only warns user, otherwise normal exception will be raised.
[ "If", "current", "exception", "has", "smaller", "priority", "than", "minimum", "subclass", "of", "this", "class", "only", "warns", "user", "otherwise", "normal", "exception", "will", "be", "raised", "." ]
python
train
novopl/peltak
src/peltak/logic/root.py
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/logic/root.py#L114-L128
def init(quick): # type: () -> None """ Create an empty pelconf.yaml from template """ config_file = 'pelconf.yaml' prompt = "-- <35>{} <32>already exists. Wipe it?<0>".format(config_file) if exists(config_file) and not click.confirm(shell.fmt(prompt)): log.info("Canceled") return form = InitForm().run(quick=quick) log.info('Writing <35>{}'.format(config_file)) pelconf_template = conf.load_template('pelconf.yaml') fs.write_file(config_file, pelconf_template.format(**form.values))
[ "def", "init", "(", "quick", ")", ":", "# type: () -> None", "config_file", "=", "'pelconf.yaml'", "prompt", "=", "\"-- <35>{} <32>already exists. Wipe it?<0>\"", ".", "format", "(", "config_file", ")", "if", "exists", "(", "config_file", ")", "and", "not", "click",...
Create an empty pelconf.yaml from template
[ "Create", "an", "empty", "pelconf", ".", "yaml", "from", "template" ]
python
train
saltstack/salt
salt/renderers/aws_kms.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/renderers/aws_kms.py#L162-L176
def _api_decrypt(): ''' Return the response dictionary from the KMS decrypt API call. ''' kms = _kms() data_key = _cfg_data_key() try: return kms.decrypt(CiphertextBlob=data_key) except botocore.exceptions.ClientError as orig_exc: error_code = orig_exc.response.get('Error', {}).get('Code', '') if error_code != 'InvalidCiphertextException': raise err_msg = 'aws_kms:data_key is not a valid KMS data key' config_error = salt.exceptions.SaltConfigurationError(err_msg) six.raise_from(config_error, orig_exc)
[ "def", "_api_decrypt", "(", ")", ":", "kms", "=", "_kms", "(", ")", "data_key", "=", "_cfg_data_key", "(", ")", "try", ":", "return", "kms", ".", "decrypt", "(", "CiphertextBlob", "=", "data_key", ")", "except", "botocore", ".", "exceptions", ".", "Clien...
Return the response dictionary from the KMS decrypt API call.
[ "Return", "the", "response", "dictionary", "from", "the", "KMS", "decrypt", "API", "call", "." ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/brocade_lag_rpc/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_lag_rpc/__init__.py#L106-L131
def _set_get_port_channel_detail(self, v, load=False): """ Setter method for get_port_channel_detail, mapped from YANG variable /brocade_lag_rpc/get_port_channel_detail (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_port_channel_detail is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_port_channel_detail() directly. YANG Description: A function that returns Link Aggregation Control configuration parameters for all the port channels in the system. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=get_port_channel_detail.get_port_channel_detail, is_leaf=True, yang_name="get-port-channel-detail", rest_name="get-port-channel-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'getlacpportchanneldetails-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-lag', defining_module='brocade-lag', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """get_port_channel_detail must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=get_port_channel_detail.get_port_channel_detail, is_leaf=True, yang_name="get-port-channel-detail", rest_name="get-port-channel-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'getlacpportchanneldetails-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-lag', defining_module='brocade-lag', yang_type='rpc', is_config=True)""", }) self.__get_port_channel_detail = t if hasattr(self, '_set'): self._set()
[ "def", "_set_get_port_channel_detail", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ...
Setter method for get_port_channel_detail, mapped from YANG variable /brocade_lag_rpc/get_port_channel_detail (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_port_channel_detail is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_port_channel_detail() directly. YANG Description: A function that returns Link Aggregation Control configuration parameters for all the port channels in the system.
[ "Setter", "method", "for", "get_port_channel_detail", "mapped", "from", "YANG", "variable", "/", "brocade_lag_rpc", "/", "get_port_channel_detail", "(", "rpc", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "t...
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/generator/mavgen_objc.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/generator/mavgen_objc.py#L14-L113
def generate_mavlink(directory, xml): '''generate MVMavlink header and implementation''' f = open(os.path.join(directory, "MVMavlink.h"), mode='w') t.write(f,''' // // MVMavlink.h // MAVLink communications protocol built from ${basename}.xml // // Created on ${parse_time} by mavgen_objc.py // http://qgroundcontrol.org/mavlink // #import "MVMessage.h" ${{message_definition_files:#import "MV${name_camel_case}Messages.h" }} @class MVMavlink; @protocol MVMessage; @protocol MVMavlinkDelegate <NSObject> /*! Method called on the delegate when a full message has been received. Note that this may be called multiple times when parseData: is called, if the data passed to parseData: contains multiple messages. @param mavlink The MVMavlink object calling this method @param message The id<MVMessage> class containing the parsed message */ - (void)mavlink:(MVMavlink *)mavlink didGetMessage:(id<MVMessage>)message; /*! Method called on the delegate when data should be sent. @param mavlink The MVMavlink object calling this method @param data NSData object containing the bytes to be sent */ - (BOOL)mavlink:(MVMavlink *)mavlink shouldWriteData:(NSData *)data; @end /*! Class for parsing and sending instances of id<MVMessage> @discussion MVMavlink receives a stream of bytes via the parseData: method and calls the delegate method mavlink:didGetMessage: each time a message is fully parsed. Users of MVMavlink can call parseData: anytime they get new data, even if that data does not contain a complete message. */ @interface MVMavlink : NSObject @property (weak, nonatomic) id<MVMavlinkDelegate> delegate; /*! Parse byte data received from a MAVLink byte stream. @param data NSData containing the received bytes */ - (void)parseData:(NSData *)data; /*! Compile MVMessage object into a bytes and pass to the delegate for sending. @param message Object conforming to the MVMessage protocol that represents the data to be sent @return YES if message sending was successful */ - (BOOL)sendMessage:(id<MVMessage>)message; @end ''', xml) f.close() f = open(os.path.join(directory, "MVMavlink.m"), mode='w') t.write(f,''' // // MVMavlink.m // MAVLink communications protocol built from ${basename}.xml // // Created by mavgen_objc.py // http://qgroundcontrol.org/mavlink // #import "MVMavlink.h" @implementation MVMavlink - (void)parseData:(NSData *)data { mavlink_message_t msg; mavlink_status_t status; char *bytes = (char *)[data bytes]; for (NSInteger i = 0; i < [data length]; ++i) { if (mavlink_parse_char(MAVLINK_COMM_0, bytes[i], &msg, &status)) { // Packet received id<MVMessage> message = [MVMessage messageWithCMessage:msg]; [_delegate mavlink:self didGetMessage:message]; } } } - (BOOL)sendMessage:(id<MVMessage>)message { return [_delegate mavlink:self shouldWriteData:[message data]]; } @end ''', xml) f.close()
[ "def", "generate_mavlink", "(", "directory", ",", "xml", ")", ":", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "\"MVMavlink.h\"", ")", ",", "mode", "=", "'w'", ")", "t", ".", "write", "(", "f", ",", "'''\n//\n// MVM...
generate MVMavlink header and implementation
[ "generate", "MVMavlink", "header", "and", "implementation" ]
python
train
woolfson-group/isambard
isambard/ampal/protein.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/protein.py#L585-L684
def n_join(self, other, psi=-40.76, omega=-178.25, phi=-65.07, o_c_n_angle=None, c_n_ca_angle=None, c_n_length=None, relabel=True): """Joins other to self at the N-terminus via a peptide bond. Notes ----- This function directly modifies self. It does not return a new object. Parameters ---------- other: Residue or Polypeptide psi: float Psi torsion angle (degrees) between final `Residue` of other and first `Residue` of self. omega: float Omega torsion angle (degrees) between final `Residue` of other and first `Residue` of self. phi: float Phi torsion angle (degrees) between final `Residue` of other and first `Residue` of self. o_c_n_angle: float or None Desired angle between O, C (final `Residue` of other) and N (first `Residue` of self) atoms. If `None`, default value is taken from `ideal_backbone_bond_angles`. c_n_ca_angle: float or None Desired angle between C (final `Residue` of other) and N, CA (first `Residue` of self) atoms. If `None`, default value is taken from `ideal_backbone_bond_angles`. c_n_length: float or None Desired peptide bond length between final `Residue` of other and first `Residue` of self. If None, default value is taken from ideal_backbone_bond_lengths. relabel: bool If True, relabel_all is run on self before returning. Raises ------ TypeError: If other is not a `Residue` or a `Polypeptide` """ if isinstance(other, Residue): other = Polypeptide([other]) if not isinstance(other, Polypeptide): raise TypeError( 'Only Polypeptide or Residue objects can be joined to a Polypeptide') if abs(omega) >= 90: peptide_conformation = 'trans' else: peptide_conformation = 'cis' if o_c_n_angle is None: o_c_n_angle = ideal_backbone_bond_angles[peptide_conformation]['o_c_n'] if c_n_ca_angle is None: c_n_ca_angle = ideal_backbone_bond_angles[peptide_conformation]['c_n_ca'] if c_n_length is None: c_n_length = ideal_backbone_bond_lengths['c_n'] r1 = self[0] r1_n = r1['N']._vector r1_ca = r1['CA']._vector r1_c = r1['C']._vector # p1 is point that will be used to position the C atom of r2. p1 = r1_ca[:] # rotate p1 by c_n_ca_angle, about axis perpendicular to the # r1_n, r1_ca, r1_c plane, passing through r1_ca. axis = numpy.cross((r1_ca - r1_n), (r1_c - r1_n)) q = Quaternion.angle_and_axis(angle=c_n_ca_angle, axis=axis) p1 = q.rotate_vector(v=p1, point=r1_n) # Ensure p1 is separated from r1_n by the correct distance. p1 = r1_n + (c_n_length * unit_vector(p1 - r1_n)) # translate other so that its final C atom is at p1 other.translate(vector=(p1 - other[-1]['C']._vector)) # Force CA-C=O-N to be in a plane, and fix O=C-N angle accordingly measured_dihedral = dihedral( other[-1]['CA'], other[-1]['C'], other[-1]['O'], r1['N']) desired_dihedral = 180.0 axis = other[-1]['O'] - other[-1]['C'] other.rotate(angle=(measured_dihedral - desired_dihedral), axis=axis, point=other[-1]['C']._vector) axis = (numpy.cross(other[-1]['O'] - other[-1] ['C'], r1['N'] - other[-1]['C'])) measured_o_c_n = angle_between_vectors( other[-1]['O'] - other[-1]['C'], r1['N'] - other[-1]['C']) other.rotate(angle=(measured_o_c_n - o_c_n_angle), axis=axis, point=other[-1]['C']._vector) # rotate other to obtain desired phi, omega, psi values at the join. measured_phi = dihedral(other[-1]['C'], r1['N'], r1['CA'], r1['C']) other.rotate(angle=(phi - measured_phi), axis=(r1_n - r1_ca), point=r1_ca) measured_omega = dihedral( other[-1]['CA'], other[-1]['C'], r1['N'], r1['CA']) other.rotate(angle=(measured_omega - omega), axis=(r1['N'] - other[-1]['C']), point=r1_n) measured_psi = dihedral( other[-1]['N'], other[-1]['CA'], other[-1]['C'], r1['N']) other.rotate(angle=-(measured_psi - psi), axis=(other[-1]['CA'] - other[-1]['C']), point=other[-1]['CA']._vector) self._monomers = other._monomers + self._monomers if relabel: self.relabel_all() self.tags['assigned_ff'] = False return
[ "def", "n_join", "(", "self", ",", "other", ",", "psi", "=", "-", "40.76", ",", "omega", "=", "-", "178.25", ",", "phi", "=", "-", "65.07", ",", "o_c_n_angle", "=", "None", ",", "c_n_ca_angle", "=", "None", ",", "c_n_length", "=", "None", ",", "rel...
Joins other to self at the N-terminus via a peptide bond. Notes ----- This function directly modifies self. It does not return a new object. Parameters ---------- other: Residue or Polypeptide psi: float Psi torsion angle (degrees) between final `Residue` of other and first `Residue` of self. omega: float Omega torsion angle (degrees) between final `Residue` of other and first `Residue` of self. phi: float Phi torsion angle (degrees) between final `Residue` of other and first `Residue` of self. o_c_n_angle: float or None Desired angle between O, C (final `Residue` of other) and N (first `Residue` of self) atoms. If `None`, default value is taken from `ideal_backbone_bond_angles`. c_n_ca_angle: float or None Desired angle between C (final `Residue` of other) and N, CA (first `Residue` of self) atoms. If `None`, default value is taken from `ideal_backbone_bond_angles`. c_n_length: float or None Desired peptide bond length between final `Residue` of other and first `Residue` of self. If None, default value is taken from ideal_backbone_bond_lengths. relabel: bool If True, relabel_all is run on self before returning. Raises ------ TypeError: If other is not a `Residue` or a `Polypeptide`
[ "Joins", "other", "to", "self", "at", "the", "N", "-", "terminus", "via", "a", "peptide", "bond", "." ]
python
train
AtteqCom/zsl
src/zsl/utils/deploy/js_model_generator.py
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/utils/deploy/js_model_generator.py#L56-L70
def _map_table_name(self, model_names): """ Pre foregin_keys potrbejeme pre z nazvu tabulky zistit class, tak si to namapujme """ for model in model_names: if isinstance(model, tuple): model = model[0] try: model_cls = getattr(self.models, model) self.table_to_class[class_mapper(model_cls).tables[0].name] = model except AttributeError: pass
[ "def", "_map_table_name", "(", "self", ",", "model_names", ")", ":", "for", "model", "in", "model_names", ":", "if", "isinstance", "(", "model", ",", "tuple", ")", ":", "model", "=", "model", "[", "0", "]", "try", ":", "model_cls", "=", "getattr", "(",...
Pre foregin_keys potrbejeme pre z nazvu tabulky zistit class, tak si to namapujme
[ "Pre", "foregin_keys", "potrbejeme", "pre", "z", "nazvu", "tabulky", "zistit", "class", "tak", "si", "to", "namapujme" ]
python
train
Erotemic/utool
utool/util_sysreq.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_sysreq.py#L85-L104
def get_local_dist_packages_dir(): """ Attempts to work around virtualenvs and find the system dist_pacakges. Essentially this is implmenented as a lookuptable """ import utool as ut if not ut.in_virtual_env(): # Non venv case return get_site_packages_dir() else: candidates = [] if ut.LINUX: candidates += [ '/usr/local/lib/python2.7/dist-packages', ] else: raise NotImplementedError() for path in candidates: if ut.checkpath(path): return path
[ "def", "get_local_dist_packages_dir", "(", ")", ":", "import", "utool", "as", "ut", "if", "not", "ut", ".", "in_virtual_env", "(", ")", ":", "# Non venv case", "return", "get_site_packages_dir", "(", ")", "else", ":", "candidates", "=", "[", "]", "if", "ut",...
Attempts to work around virtualenvs and find the system dist_pacakges. Essentially this is implmenented as a lookuptable
[ "Attempts", "to", "work", "around", "virtualenvs", "and", "find", "the", "system", "dist_pacakges", ".", "Essentially", "this", "is", "implmenented", "as", "a", "lookuptable" ]
python
train
thejunglejane/datums
datums/models/base.py
https://github.com/thejunglejane/datums/blob/2250b365e37ba952c2426edc615c1487afabae6e/datums/models/base.py#L161-L172
def update(self, response, **kwargs): ''' If a record matching the instance already exists in the database, update both the column and venue column attributes, else create a new record. ''' response_cls = super( LocationResponseClassLegacyAccessor, self)._get_instance(**kwargs) if response_cls: setattr(response_cls, self.column, self.accessor(response)) setattr( response_cls, self.venue_column, self.venue_accessor(response)) _action_and_commit(response_cls, session.add)
[ "def", "update", "(", "self", ",", "response", ",", "*", "*", "kwargs", ")", ":", "response_cls", "=", "super", "(", "LocationResponseClassLegacyAccessor", ",", "self", ")", ".", "_get_instance", "(", "*", "*", "kwargs", ")", "if", "response_cls", ":", "se...
If a record matching the instance already exists in the database, update both the column and venue column attributes, else create a new record.
[ "If", "a", "record", "matching", "the", "instance", "already", "exists", "in", "the", "database", "update", "both", "the", "column", "and", "venue", "column", "attributes", "else", "create", "a", "new", "record", "." ]
python
train
dbtsai/python-mimeparse
mimeparse.py
https://github.com/dbtsai/python-mimeparse/blob/cf605c0994149b1a1936b3a8a597203fe3fbb62e/mimeparse.py#L42-L66
def parse_media_range(range): """Parse a media-range into its component parts. Carves up a media range and returns a tuple of the (type, subtype, params) where 'params' is a dictionary of all the parameters for the media range. For example, the media range 'application/*;q=0.5' would get parsed into: ('application', '*', {'q', '0.5'}) In addition this function also guarantees that there is a value for 'q' in the params dictionary, filling it in with a proper default if necessary. :rtype: (str,str,dict) """ (type, subtype, params) = parse_mime_type(range) params.setdefault('q', params.pop('Q', None)) # q is case insensitive try: if not params['q'] or not 0 <= float(params['q']) <= 1: params['q'] = '1' except ValueError: # from float() params['q'] = '1' return (type, subtype, params)
[ "def", "parse_media_range", "(", "range", ")", ":", "(", "type", ",", "subtype", ",", "params", ")", "=", "parse_mime_type", "(", "range", ")", "params", ".", "setdefault", "(", "'q'", ",", "params", ".", "pop", "(", "'Q'", ",", "None", ")", ")", "# ...
Parse a media-range into its component parts. Carves up a media range and returns a tuple of the (type, subtype, params) where 'params' is a dictionary of all the parameters for the media range. For example, the media range 'application/*;q=0.5' would get parsed into: ('application', '*', {'q', '0.5'}) In addition this function also guarantees that there is a value for 'q' in the params dictionary, filling it in with a proper default if necessary. :rtype: (str,str,dict)
[ "Parse", "a", "media", "-", "range", "into", "its", "component", "parts", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L938-L944
def _update_partition_srvc_node_ip(self, tenant_name, srvc_ip, vrf_prof=None, part_name=None): """Function to update srvc_node address of partition. """ self.dcnm_obj.update_project(tenant_name, part_name, service_node_ip=srvc_ip, vrf_prof=vrf_prof, desc="Service Partition")
[ "def", "_update_partition_srvc_node_ip", "(", "self", ",", "tenant_name", ",", "srvc_ip", ",", "vrf_prof", "=", "None", ",", "part_name", "=", "None", ")", ":", "self", ".", "dcnm_obj", ".", "update_project", "(", "tenant_name", ",", "part_name", ",", "service...
Function to update srvc_node address of partition.
[ "Function", "to", "update", "srvc_node", "address", "of", "partition", "." ]
python
train
great-expectations/great_expectations
great_expectations/dataset/sqlalchemy_dataset.py
https://github.com/great-expectations/great_expectations/blob/08385c40529d4f14a1c46916788aecc47f33ee9d/great_expectations/dataset/sqlalchemy_dataset.py#L30-L160
def column_map_expectation(cls, func): """For SqlAlchemy, this decorator allows individual column_map_expectations to simply return the filter that describes the expected condition on their data. The decorator will then use that filter to obtain unexpected elements, relevant counts, and return the formatted object. """ if PY3: argspec = inspect.getfullargspec(func)[0][1:] else: argspec = inspect.getargspec(func)[0][1:] @cls.expectation(argspec) @wraps(func) def inner_wrapper(self, column, mostly=None, result_format=None, *args, **kwargs): if result_format is None: result_format = self.default_expectation_args["result_format"] result_format = parse_result_format(result_format) if result_format['result_format'] == 'COMPLETE': warnings.warn("Setting result format to COMPLETE for a SqlAlchemyDataset can be dangerous because it will not limit the number of returned results.") unexpected_count_limit = None else: unexpected_count_limit = result_format['partial_unexpected_count'] expected_condition = func(self, column, *args, **kwargs) # FIXME Temporary Fix for counting missing values # Added to compensate when an ignore_values argument is added to the expectation ignore_values = [None] if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']: ignore_values = [] result_format['partial_unexpected_count'] = 0 # Optimization to avoid meaningless computation for these expectations count_query = sa.select([ sa.func.count().label('element_count'), sa.func.sum( sa.case([(sa.or_( sa.column(column).in_(ignore_values), # Below is necessary b/c sa.in_() uses `==` but None != None # But we only consider this if None is actually in the list of ignore values sa.column(column).is_(None) if None in ignore_values else False), 1)], else_=0) ).label('null_count'), sa.func.sum( sa.case([ ( sa.and_( sa.not_(expected_condition), sa.case([ ( sa.column(column).is_(None), False ) ], else_=True) if None in ignore_values else True ), 1 ) ], else_=0) ).label('unexpected_count') ]).select_from(self._table) count_results = dict(self.engine.execute(count_query).fetchone()) # Handle case of empty table gracefully: if "element_count" not in count_results or count_results["element_count"] is None: count_results["element_count"] = 0 if "null_count" not in count_results or count_results["null_count"] is None: count_results["null_count"] = 0 if "unexpected_count" not in count_results or count_results["unexpected_count"] is None: count_results["unexpected_count"] = 0 # Retrieve unexpected values unexpected_query_results = self.engine.execute( sa.select([sa.column(column)]).select_from(self._table).where( sa.and_(sa.not_(expected_condition), sa.or_( # SA normally evaluates `== None` as `IS NONE`. However `sa.in_()` # replaces `None` as `NULL` in the list and incorrectly uses `== NULL` sa.case([ ( sa.column(column).is_(None), False ) ], else_=True) if None in ignore_values else False, # Ignore any other values that are in the ignore list sa.column(column).in_(ignore_values) == False)) ).limit(unexpected_count_limit) ) nonnull_count = count_results['element_count'] - \ count_results['null_count'] if "output_strftime_format" in kwargs: output_strftime_format = kwargs["output_strftime_format"] maybe_limited_unexpected_list = [] for x in unexpected_query_results.fetchall(): if isinstance(x[column], string_types): col = parse(x[column]) else: col = x[column] maybe_limited_unexpected_list.append(datetime.strftime(col, output_strftime_format)) else: maybe_limited_unexpected_list = [x[column] for x in unexpected_query_results.fetchall()] success_count = nonnull_count - count_results['unexpected_count'] success, percent_success = self._calc_map_expectation_success( success_count, nonnull_count, mostly) return_obj = self._format_map_output( result_format, success, count_results['element_count'], nonnull_count, count_results['unexpected_count'], maybe_limited_unexpected_list, None ) if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']: # These results are unnecessary for the above expectations del return_obj['result']['unexpected_percent_nonmissing'] try: del return_obj['result']['partial_unexpected_counts'] except KeyError: pass return return_obj inner_wrapper.__name__ = func.__name__ inner_wrapper.__doc__ = func.__doc__ return inner_wrapper
[ "def", "column_map_expectation", "(", "cls", ",", "func", ")", ":", "if", "PY3", ":", "argspec", "=", "inspect", ".", "getfullargspec", "(", "func", ")", "[", "0", "]", "[", "1", ":", "]", "else", ":", "argspec", "=", "inspect", ".", "getargspec", "(...
For SqlAlchemy, this decorator allows individual column_map_expectations to simply return the filter that describes the expected condition on their data. The decorator will then use that filter to obtain unexpected elements, relevant counts, and return the formatted object.
[ "For", "SqlAlchemy", "this", "decorator", "allows", "individual", "column_map_expectations", "to", "simply", "return", "the", "filter", "that", "describes", "the", "expected", "condition", "on", "their", "data", "." ]
python
train
ilevkivskyi/typing_inspect
typing_inspect.py
https://github.com/ilevkivskyi/typing_inspect/blob/fd81278cc440b6003f8298bcb22d5bc0f82ee3cd/typing_inspect.py#L228-L254
def get_parameters(tp): """Return type parameters of a parameterizable type as a tuple in lexicographic order. Parameterizable types are generic types, unions, tuple types and callable types. Examples:: get_parameters(int) == () get_parameters(Generic) == () get_parameters(Union) == () get_parameters(List[int]) == () get_parameters(Generic[T]) == (T,) get_parameters(Tuple[List[T], List[S_co]]) == (T, S_co) get_parameters(Union[S_co, Tuple[T, T]][int, U]) == (U,) get_parameters(Mapping[T, Tuple[S_co, T]]) == (T, S_co) """ if NEW_TYPING: if (isinstance(tp, _GenericAlias) or isinstance(tp, type) and issubclass(tp, Generic) and tp is not Generic): return tp.__parameters__ return () if ( is_generic_type(tp) or is_union_type(tp) or is_callable_type(tp) or is_tuple_type(tp) ): return tp.__parameters__ if tp.__parameters__ is not None else () return ()
[ "def", "get_parameters", "(", "tp", ")", ":", "if", "NEW_TYPING", ":", "if", "(", "isinstance", "(", "tp", ",", "_GenericAlias", ")", "or", "isinstance", "(", "tp", ",", "type", ")", "and", "issubclass", "(", "tp", ",", "Generic", ")", "and", "tp", "...
Return type parameters of a parameterizable type as a tuple in lexicographic order. Parameterizable types are generic types, unions, tuple types and callable types. Examples:: get_parameters(int) == () get_parameters(Generic) == () get_parameters(Union) == () get_parameters(List[int]) == () get_parameters(Generic[T]) == (T,) get_parameters(Tuple[List[T], List[S_co]]) == (T, S_co) get_parameters(Union[S_co, Tuple[T, T]][int, U]) == (U,) get_parameters(Mapping[T, Tuple[S_co, T]]) == (T, S_co)
[ "Return", "type", "parameters", "of", "a", "parameterizable", "type", "as", "a", "tuple", "in", "lexicographic", "order", ".", "Parameterizable", "types", "are", "generic", "types", "unions", "tuple", "types", "and", "callable", "types", ".", "Examples", "::" ]
python
train
edx/edx-enterprise
enterprise/api/v1/serializers.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api/v1/serializers.py#L618-L633
def validate_lms_user_id(self, value): """ Validates the lms_user_id, if is given, to see if there is an existing EnterpriseCustomerUser for it. """ enterprise_customer = self.context.get('enterprise_customer') try: # Ensure the given user is associated with the enterprise. return models.EnterpriseCustomerUser.objects.get( user_id=value, enterprise_customer=enterprise_customer ) except models.EnterpriseCustomerUser.DoesNotExist: pass return None
[ "def", "validate_lms_user_id", "(", "self", ",", "value", ")", ":", "enterprise_customer", "=", "self", ".", "context", ".", "get", "(", "'enterprise_customer'", ")", "try", ":", "# Ensure the given user is associated with the enterprise.", "return", "models", ".", "E...
Validates the lms_user_id, if is given, to see if there is an existing EnterpriseCustomerUser for it.
[ "Validates", "the", "lms_user_id", "if", "is", "given", "to", "see", "if", "there", "is", "an", "existing", "EnterpriseCustomerUser", "for", "it", "." ]
python
valid
tBuLi/symfit
symfit/core/support.py
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L162-L188
def sympy_to_scipy(func, vars, params): """ Convert a symbolic expression to one scipy digs. Not used by ``symfit`` any more. :param func: sympy expression :param vars: variables :param params: parameters :return: Scipy-style function to be used for numerical evaluation of the model. """ lambda_func = sympy_to_py(func, vars, params) def f(x, p): """ Scipy style function. :param x: list of arrays, NxM :param p: tuple of parameter values. """ x = np.atleast_2d(x) y = [x[i] for i in range(len(x))] if len(x[0]) else [] try: ans = lambda_func(*(y + list(p))) except TypeError: # Possibly this is a constant function in which case it only has Parameters. ans = lambda_func(*list(p))# * np.ones(x_shape) return ans return f
[ "def", "sympy_to_scipy", "(", "func", ",", "vars", ",", "params", ")", ":", "lambda_func", "=", "sympy_to_py", "(", "func", ",", "vars", ",", "params", ")", "def", "f", "(", "x", ",", "p", ")", ":", "\"\"\"\n Scipy style function.\n\n :param x: l...
Convert a symbolic expression to one scipy digs. Not used by ``symfit`` any more. :param func: sympy expression :param vars: variables :param params: parameters :return: Scipy-style function to be used for numerical evaluation of the model.
[ "Convert", "a", "symbolic", "expression", "to", "one", "scipy", "digs", ".", "Not", "used", "by", "symfit", "any", "more", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/interactiveshell.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/interactiveshell.py#L2692-L2735
def run_code(self, code_obj): """Execute a code object. When an exception occurs, self.showtraceback() is called to display a traceback. Parameters ---------- code_obj : code object A compiled code object, to be executed Returns ------- False : successful execution. True : an error occurred. """ # Set our own excepthook in case the user code tries to call it # directly, so that the IPython crash handler doesn't get triggered old_excepthook,sys.excepthook = sys.excepthook, self.excepthook # we save the original sys.excepthook in the instance, in case config # code (such as magics) needs access to it. self.sys_excepthook = old_excepthook outflag = 1 # happens in more places, so it's easier as default try: try: self.hooks.pre_run_code_hook() #rprint('Running code', repr(code_obj)) # dbg exec code_obj in self.user_global_ns, self.user_ns finally: # Reset our crash handler in place sys.excepthook = old_excepthook except SystemExit: self.showtraceback(exception_only=True) warn("To exit: use 'exit', 'quit', or Ctrl-D.", level=1) except self.custom_exceptions: etype,value,tb = sys.exc_info() self.CustomTB(etype,value,tb) except: self.showtraceback() else: outflag = 0 return outflag
[ "def", "run_code", "(", "self", ",", "code_obj", ")", ":", "# Set our own excepthook in case the user code tries to call it", "# directly, so that the IPython crash handler doesn't get triggered", "old_excepthook", ",", "sys", ".", "excepthook", "=", "sys", ".", "excepthook", "...
Execute a code object. When an exception occurs, self.showtraceback() is called to display a traceback. Parameters ---------- code_obj : code object A compiled code object, to be executed Returns ------- False : successful execution. True : an error occurred.
[ "Execute", "a", "code", "object", "." ]
python
test
nchopin/particles
particles/kalman.py
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/kalman.py#L163-L187
def predict_step(F, covX, filt): """Predictive step of Kalman filter. Parameters ---------- F: (dx, dx) numpy array Mean of X_t | X_{t-1} is F * X_{t-1} covX: (dx, dx) numpy array covariance of X_t | X_{t-1} filt: MeanAndCov object filtering distribution at time t-1 Returns ------- pred: MeanAndCov object predictive distribution at time t Note ---- filt.mean may either be of shape (dx,) or (N, dx); in the latter case N predictive steps are performed in parallel. """ pred_mean = np.matmul(filt.mean, F.T) pred_cov = dotdot(F, filt.cov, F.T) + covX return MeanAndCov(mean=pred_mean, cov=pred_cov)
[ "def", "predict_step", "(", "F", ",", "covX", ",", "filt", ")", ":", "pred_mean", "=", "np", ".", "matmul", "(", "filt", ".", "mean", ",", "F", ".", "T", ")", "pred_cov", "=", "dotdot", "(", "F", ",", "filt", ".", "cov", ",", "F", ".", "T", "...
Predictive step of Kalman filter. Parameters ---------- F: (dx, dx) numpy array Mean of X_t | X_{t-1} is F * X_{t-1} covX: (dx, dx) numpy array covariance of X_t | X_{t-1} filt: MeanAndCov object filtering distribution at time t-1 Returns ------- pred: MeanAndCov object predictive distribution at time t Note ---- filt.mean may either be of shape (dx,) or (N, dx); in the latter case N predictive steps are performed in parallel.
[ "Predictive", "step", "of", "Kalman", "filter", "." ]
python
train
ladybug-tools/ladybug
ladybug/sunpath.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/sunpath.py#L96-L100
def latitude(self, value): """Set latitude value.""" self._latitude = math.radians(float(value)) assert -self.PI / 2 <= self._latitude <= self.PI / 2, \ "latitude value should be between -90..90."
[ "def", "latitude", "(", "self", ",", "value", ")", ":", "self", ".", "_latitude", "=", "math", ".", "radians", "(", "float", "(", "value", ")", ")", "assert", "-", "self", ".", "PI", "/", "2", "<=", "self", ".", "_latitude", "<=", "self", ".", "P...
Set latitude value.
[ "Set", "latitude", "value", "." ]
python
train
DMSC-Instrument-Data/lewis
src/lewis/adapters/modbus.py
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/adapters/modbus.py#L218-L229
def is_valid(self): """ Check integrity and validity of this frame. :return: bool True if this frame is structurally valid. """ conditions = [ self.protocol_id == 0, # Modbus always uses protocol 0 2 <= self.length <= 260, # Absolute length limits len(self.data) == self.length - 2, # Total length matches data length ] return all(conditions)
[ "def", "is_valid", "(", "self", ")", ":", "conditions", "=", "[", "self", ".", "protocol_id", "==", "0", ",", "# Modbus always uses protocol 0", "2", "<=", "self", ".", "length", "<=", "260", ",", "# Absolute length limits", "len", "(", "self", ".", "data", ...
Check integrity and validity of this frame. :return: bool True if this frame is structurally valid.
[ "Check", "integrity", "and", "validity", "of", "this", "frame", "." ]
python
train
weld-project/weld
python/grizzly/grizzly/seriesweld.py
https://github.com/weld-project/weld/blob/8ddd6db6b28878bef0892da44b1d2002b564389c/python/grizzly/grizzly/seriesweld.py#L192-L213
def lower(self): """Summary Returns: TYPE: Description """ # TODO : Bug in nested map operating on strings # TODO : Check that self.weld_type is a string type vectype = self.weld_type if isinstance(vectype, WeldVec): elem_type = vectype.elemType if isinstance(elem_type, WeldChar): return SeriesWeld( grizzly_impl.to_lower( self.expr, elem_type ), self.weld_type, self.df, self.column_name ) raise Exception("Cannot call to_lower on non string type")
[ "def", "lower", "(", "self", ")", ":", "# TODO : Bug in nested map operating on strings", "# TODO : Check that self.weld_type is a string type", "vectype", "=", "self", ".", "weld_type", "if", "isinstance", "(", "vectype", ",", "WeldVec", ")", ":", "elem_type", "=", "ve...
Summary Returns: TYPE: Description
[ "Summary" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_maps_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_maps_ext.py#L66-L78
def maps_get_rules_output_rules_rulename(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") maps_get_rules = ET.Element("maps_get_rules") config = maps_get_rules output = ET.SubElement(maps_get_rules, "output") rules = ET.SubElement(output, "rules") rulename = ET.SubElement(rules, "rulename") rulename.text = kwargs.pop('rulename') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "maps_get_rules_output_rules_rulename", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "maps_get_rules", "=", "ET", ".", "Element", "(", "\"maps_get_rules\"", ")", "config", "=", "maps_get_ru...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
inveniosoftware-attic/invenio-utils
invenio_utils/shell.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/shell.py#L378-L400
def split_cli_ids_arg(value): """ Split ids given in the command line Possible formats are: * 1 * 1,2,3,4 * 1-5,20,30,40 Returns respectively * set([1]) * set([1,2,3,4]) * set([1,2,3,4,5,20,30,40]) """ def parse(el): el = el.strip() if not el: ret = [] elif '-' in el: start, end = el.split('-', 1) ret = xrange(int(start), int(end) + 1) else: ret = [int(el)] return ret return set(chain(*(parse(c) for c in value.split(',') if c.strip())))
[ "def", "split_cli_ids_arg", "(", "value", ")", ":", "def", "parse", "(", "el", ")", ":", "el", "=", "el", ".", "strip", "(", ")", "if", "not", "el", ":", "ret", "=", "[", "]", "elif", "'-'", "in", "el", ":", "start", ",", "end", "=", "el", "....
Split ids given in the command line Possible formats are: * 1 * 1,2,3,4 * 1-5,20,30,40 Returns respectively * set([1]) * set([1,2,3,4]) * set([1,2,3,4,5,20,30,40])
[ "Split", "ids", "given", "in", "the", "command", "line", "Possible", "formats", "are", ":", "*", "1", "*", "1", "2", "3", "4", "*", "1", "-", "5", "20", "30", "40", "Returns", "respectively", "*", "set", "(", "[", "1", "]", ")", "*", "set", "("...
python
train
PyMySQL/PyMySQL
pymysql/cursors.py
https://github.com/PyMySQL/PyMySQL/blob/3674bc6fd064bf88524e839c07690e8c35223709/pymysql/cursors.py#L128-L142
def mogrify(self, query, args=None): """ Returns the exact string that is sent to the database by calling the execute() method. This method follows the extension to the DB API 2.0 followed by Psycopg. """ conn = self._get_db() if PY2: # Use bytes on Python 2 always query = self._ensure_bytes(query, encoding=conn.encoding) if args is not None: query = query % self._escape_args(args, conn) return query
[ "def", "mogrify", "(", "self", ",", "query", ",", "args", "=", "None", ")", ":", "conn", "=", "self", ".", "_get_db", "(", ")", "if", "PY2", ":", "# Use bytes on Python 2 always", "query", "=", "self", ".", "_ensure_bytes", "(", "query", ",", "encoding",...
Returns the exact string that is sent to the database by calling the execute() method. This method follows the extension to the DB API 2.0 followed by Psycopg.
[ "Returns", "the", "exact", "string", "that", "is", "sent", "to", "the", "database", "by", "calling", "the", "execute", "()", "method", "." ]
python
train
mollie/mollie-api-python
mollie/api/resources/orders.py
https://github.com/mollie/mollie-api-python/blob/307836b70f0439c066718f1e375fa333dc6e5d77/mollie/api/resources/orders.py#L20-L32
def delete(self, order_id, data=None): """Cancel order and return the order object. Deleting an order causes the order status to change to canceled. The updated order object is returned. """ if not order_id or not order_id.startswith(self.RESOURCE_ID_PREFIX): raise IdentifierError( "Invalid order ID: '{id}'. An order ID should start with '{prefix}'.".format( id=order_id, prefix=self.RESOURCE_ID_PREFIX) ) result = super(Orders, self).delete(order_id, data) return self.get_resource_object(result)
[ "def", "delete", "(", "self", ",", "order_id", ",", "data", "=", "None", ")", ":", "if", "not", "order_id", "or", "not", "order_id", ".", "startswith", "(", "self", ".", "RESOURCE_ID_PREFIX", ")", ":", "raise", "IdentifierError", "(", "\"Invalid order ID: '{...
Cancel order and return the order object. Deleting an order causes the order status to change to canceled. The updated order object is returned.
[ "Cancel", "order", "and", "return", "the", "order", "object", "." ]
python
train
Clinical-Genomics/scout
scout/server/blueprints/variants/controllers.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/variants/controllers.py#L205-L277
def parse_variant(store, institute_obj, case_obj, variant_obj, update=False, genome_build='37', get_compounds = True): """Parse information about variants. - Adds information about compounds - Updates the information about compounds if necessary and 'update=True' Args: store(scout.adapter.MongoAdapter) institute_obj(scout.models.Institute) case_obj(scout.models.Case) variant_obj(scout.models.Variant) update(bool): If variant should be updated in database genome_build(str) """ has_changed = False compounds = variant_obj.get('compounds', []) if compounds and get_compounds: # Check if we need to add compound information # If it is the first time the case is viewed we fill in some compound information if 'not_loaded' not in compounds[0]: new_compounds = store.update_variant_compounds(variant_obj) variant_obj['compounds'] = new_compounds has_changed = True # sort compounds on combined rank score variant_obj['compounds'] = sorted(variant_obj['compounds'], key=lambda compound: -compound['combined_score']) # Update the hgnc symbols if they are incorrect variant_genes = variant_obj.get('genes') if variant_genes is not None: for gene_obj in variant_genes: # If there is no hgnc id there is nothin we can do if not gene_obj['hgnc_id']: continue # Else we collect the gene object and check the id if gene_obj.get('hgnc_symbol') is None: hgnc_gene = store.hgnc_gene(gene_obj['hgnc_id'], build=genome_build) if not hgnc_gene: continue has_changed = True gene_obj['hgnc_symbol'] = hgnc_gene['hgnc_symbol'] # We update the variant if some information was missing from loading # Or if symbold in reference genes have changed if update and has_changed: variant_obj = store.update_variant(variant_obj) variant_obj['comments'] = store.events(institute_obj, case=case_obj, variant_id=variant_obj['variant_id'], comments=True) if variant_genes: variant_obj.update(get_predictions(variant_genes)) if variant_obj.get('category') == 'cancer': variant_obj.update(get_variant_info(variant_genes)) for compound_obj in compounds: compound_obj.update(get_predictions(compound_obj.get('genes', []))) if isinstance(variant_obj.get('acmg_classification'), int): acmg_code = ACMG_MAP[variant_obj['acmg_classification']] variant_obj['acmg_classification'] = ACMG_COMPLETE_MAP[acmg_code] # convert length for SV variants variant_length = variant_obj.get('length') variant_obj['length'] = {100000000000: 'inf', -1: 'n.d.'}.get(variant_length, variant_length) if not 'end_chrom' in variant_obj: variant_obj['end_chrom'] = variant_obj['chromosome'] return variant_obj
[ "def", "parse_variant", "(", "store", ",", "institute_obj", ",", "case_obj", ",", "variant_obj", ",", "update", "=", "False", ",", "genome_build", "=", "'37'", ",", "get_compounds", "=", "True", ")", ":", "has_changed", "=", "False", "compounds", "=", "varia...
Parse information about variants. - Adds information about compounds - Updates the information about compounds if necessary and 'update=True' Args: store(scout.adapter.MongoAdapter) institute_obj(scout.models.Institute) case_obj(scout.models.Case) variant_obj(scout.models.Variant) update(bool): If variant should be updated in database genome_build(str)
[ "Parse", "information", "about", "variants", "." ]
python
test
fastai/fastai
fastai/callbacks/tensorboard.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/tensorboard.py#L330-L333
def _write_median_gradient(self)->None: "Writes the median of the gradients to Tensorboard." median_gradient = statistics.median(x.data.median() for x in self.gradients) self._add_gradient_scalar('median_gradient', scalar_value=median_gradient)
[ "def", "_write_median_gradient", "(", "self", ")", "->", "None", ":", "median_gradient", "=", "statistics", ".", "median", "(", "x", ".", "data", ".", "median", "(", ")", "for", "x", "in", "self", ".", "gradients", ")", "self", ".", "_add_gradient_scalar",...
Writes the median of the gradients to Tensorboard.
[ "Writes", "the", "median", "of", "the", "gradients", "to", "Tensorboard", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/data_structures/gframe.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/gframe.py#L321-L333
def num_rows(self): """ Returns the number of rows. Returns ------- out : int Number of rows in the SFrame. """ if self._is_vertex_frame(): return self.__graph__.summary()['num_vertices'] elif self._is_edge_frame(): return self.__graph__.summary()['num_edges']
[ "def", "num_rows", "(", "self", ")", ":", "if", "self", ".", "_is_vertex_frame", "(", ")", ":", "return", "self", ".", "__graph__", ".", "summary", "(", ")", "[", "'num_vertices'", "]", "elif", "self", ".", "_is_edge_frame", "(", ")", ":", "return", "s...
Returns the number of rows. Returns ------- out : int Number of rows in the SFrame.
[ "Returns", "the", "number", "of", "rows", "." ]
python
train
bsmurphy/PyKrige
pykrige/uk.py
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/uk.py#L618-L633
def get_variogram_points(self): """Returns both the lags and the variogram function evaluated at each of them. The evaluation of the variogram function and the lags are produced internally. This method is convenient when the user wants to access to the lags and the resulting variogram (according to the model provided) for further analysis. Returns ------- (tuple) tuple containing: lags (array) - the lags at which the variogram was evaluated variogram (array) - the variogram function evaluated at the lags """ return self.lags, self.variogram_function(self.variogram_model_parameters, self.lags)
[ "def", "get_variogram_points", "(", "self", ")", ":", "return", "self", ".", "lags", ",", "self", ".", "variogram_function", "(", "self", ".", "variogram_model_parameters", ",", "self", ".", "lags", ")" ]
Returns both the lags and the variogram function evaluated at each of them. The evaluation of the variogram function and the lags are produced internally. This method is convenient when the user wants to access to the lags and the resulting variogram (according to the model provided) for further analysis. Returns ------- (tuple) tuple containing: lags (array) - the lags at which the variogram was evaluated variogram (array) - the variogram function evaluated at the lags
[ "Returns", "both", "the", "lags", "and", "the", "variogram", "function", "evaluated", "at", "each", "of", "them", ".", "The", "evaluation", "of", "the", "variogram", "function", "and", "the", "lags", "are", "produced", "internally", ".", "This", "method", "i...
python
train
pmuller/versions
versions/constraints.py
https://github.com/pmuller/versions/blob/951bc3fd99b6a675190f11ee0752af1d7ff5b440/versions/constraints.py#L109-L224
def merge(constraints): """Merge ``constraints``. It removes dupplicate, pruned and merged constraints. :param constraints: Current constraints. :type constraints: Iterable of :class:`.Constraint` objects. :rtype: :func:`list` of :class:`.Constraint` objects. :raises: :exc:`.ExclusiveConstraints` """ # Dictionary :class:`Operator`: set of :class:`Version`. operators = defaultdict(set) for constraint in constraints: operators[constraint.operator].add(constraint.version) # Get most recent version required by > constraints. if gt in operators: gt_ver = sorted(operators[gt])[-1] else: gt_ver = None # Get most recent version required by >= constraints. if ge in operators: ge_ver = sorted(operators[ge])[-1] else: ge_ver = None # Get least recent version required by < constraints. if lt in operators: lt_ver = sorted(operators[lt])[0] else: lt_ver = None # Get least recent version required by <= constraints. if le in operators: le_ver = sorted(operators[le])[0] else: le_ver = None # Most restrictive LT/LE constraint. l_constraint = None if le_ver: if lt_ver: le_constraint = Constraint(le, le_ver) lt_constraint = Constraint(lt, lt_ver) if le_ver < lt_ver: # <= 1, < 2 l_constraint = le_constraint l_less_restrictive_c = lt_constraint else: # <= 2, < 1 # <= 2, < 2 l_constraint = lt_constraint l_less_restrictive_c = le_constraint LOGGER.debug('Removed constraint %s because it is less ' 'restrictive than %s', l_less_restrictive_c, l_constraint) else: l_constraint = Constraint(le, le_ver) elif lt_ver: l_constraint = Constraint(lt, lt_ver) # Most restrictive GT/GE constraint. g_constraint = None if ge_ver: if gt_ver: gt_constraint = Constraint(gt, gt_ver) ge_constraint = Constraint(ge, ge_ver) if ge_ver <= gt_ver: # >= 1, > 2 # >= 2, > 2 g_constraint = gt_constraint g_less_restrictive_c = ge_constraint else: # >= 2, > 1 g_constraint = ge_constraint g_less_restrictive_c = gt_constraint LOGGER.debug('Removed constraint %s because it is less ' 'restrictive than %s', g_less_restrictive_c, g_constraint) else: g_constraint = Constraint(ge, ge_ver) elif gt_ver: g_constraint = Constraint(gt, gt_ver) # Check if g_constraint and l_constraint are conflicting if g_constraint and l_constraint: if g_constraint.version == l_constraint.version: if g_constraint.operator == ge and l_constraint.operator == le: # Merge >= and <= constraints on same version to a == # constraint operators[eq].add(g_constraint.version) LOGGER.debug('Merged constraints: %s and %s into ==%s', l_constraint, g_constraint, g_constraint.version) l_constraint, g_constraint = None, None else: raise ExclusiveConstraints(g_constraint, [l_constraint]) elif g_constraint.version > l_constraint.version: raise ExclusiveConstraints(g_constraint, [l_constraint]) ne_constraints = [Constraint(ne, v) for v in operators[ne]] eq_constraints = [Constraint(eq, v) for v in operators[eq]] if eq_constraints: eq_constraint = eq_constraints.pop() # An eq constraint conflicts with other constraints if g_constraint or l_constraint or ne_constraints or eq_constraints: conflict_list = [c for c in (g_constraint, l_constraint) if c] conflict_list.extend(ne_constraints) conflict_list.extend(eq_constraints) raise ExclusiveConstraints(eq_constraint, conflict_list) return [eq_constraint] else: constraints = ne_constraints + [g_constraint, l_constraint] return [c for c in constraints if c]
[ "def", "merge", "(", "constraints", ")", ":", "# Dictionary :class:`Operator`: set of :class:`Version`.", "operators", "=", "defaultdict", "(", "set", ")", "for", "constraint", "in", "constraints", ":", "operators", "[", "constraint", ".", "operator", "]", ".", "add...
Merge ``constraints``. It removes dupplicate, pruned and merged constraints. :param constraints: Current constraints. :type constraints: Iterable of :class:`.Constraint` objects. :rtype: :func:`list` of :class:`.Constraint` objects. :raises: :exc:`.ExclusiveConstraints`
[ "Merge", "constraints", "." ]
python
train
ONSdigital/sdc-cryptography
sdc/crypto/scripts/generate_keys.py
https://github.com/ONSdigital/sdc-cryptography/blob/846feb2b27b1c62d35ff2c290c05abcead68b23c/sdc/crypto/scripts/generate_keys.py#L137-L164
def get_private_key(platform, service, purpose, key_use, version, private_key, keys_folder): ''' Loads a private key from the file system and adds it to a dict of keys :param keys: A dict of keys :param platform the platform the key is for :param service the service the key is for :param key_use what the key is used for :param version the version of the key :param purpose: The purpose of the private key :param private_key: The name of the private key to add :param keys_folder: The location on disk where the key exists :param kid_override: This allows the caller to override the generated KID value :return: None ''' private_key_data = get_file_contents(keys_folder, private_key) private_key = load_pem_private_key(private_key_data.encode(), None, backend=backend) pub_key = private_key.public_key() pub_bytes = pub_key.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo) kid = _generate_kid_from_key(pub_bytes.decode()) key = _create_key(platform=platform, service=service, key_use=key_use, key_type="private", purpose=purpose, version=version, public_key=pub_bytes.decode(), private_key=private_key_data) return kid, key
[ "def", "get_private_key", "(", "platform", ",", "service", ",", "purpose", ",", "key_use", ",", "version", ",", "private_key", ",", "keys_folder", ")", ":", "private_key_data", "=", "get_file_contents", "(", "keys_folder", ",", "private_key", ")", "private_key", ...
Loads a private key from the file system and adds it to a dict of keys :param keys: A dict of keys :param platform the platform the key is for :param service the service the key is for :param key_use what the key is used for :param version the version of the key :param purpose: The purpose of the private key :param private_key: The name of the private key to add :param keys_folder: The location on disk where the key exists :param kid_override: This allows the caller to override the generated KID value :return: None
[ "Loads", "a", "private", "key", "from", "the", "file", "system", "and", "adds", "it", "to", "a", "dict", "of", "keys", ":", "param", "keys", ":", "A", "dict", "of", "keys", ":", "param", "platform", "the", "platform", "the", "key", "is", "for", ":", ...
python
test
harabchuk/kibana-dashboard-api
kibana_dashboard_api/paneltools.py
https://github.com/harabchuk/kibana-dashboard-api/blob/8a13d5078fa92fb73f06498757ba9f51632e8a23/kibana_dashboard_api/paneltools.py#L12-L25
def find_shape(bottom_lines, max_len): """ Finds a shape of lowest horizontal lines with step=1 :param bottom_lines: :param max_len: :return: list of levels (row values), list indexes are columns """ shape = [1] * max_len for i in range(max_len): for line in bottom_lines: if line[0] <= i + 1 < line[2]: shape[i] = line[1] break return shape
[ "def", "find_shape", "(", "bottom_lines", ",", "max_len", ")", ":", "shape", "=", "[", "1", "]", "*", "max_len", "for", "i", "in", "range", "(", "max_len", ")", ":", "for", "line", "in", "bottom_lines", ":", "if", "line", "[", "0", "]", "<=", "i", ...
Finds a shape of lowest horizontal lines with step=1 :param bottom_lines: :param max_len: :return: list of levels (row values), list indexes are columns
[ "Finds", "a", "shape", "of", "lowest", "horizontal", "lines", "with", "step", "=", "1", ":", "param", "bottom_lines", ":", ":", "param", "max_len", ":", ":", "return", ":", "list", "of", "levels", "(", "row", "values", ")", "list", "indexes", "are", "c...
python
train
bcbio/bcbio-nextgen
scripts/bcbio_nextgen_install.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/bcbio_nextgen_install.py#L119-L137
def install_anaconda_python(args): """Provide isolated installation of Anaconda python for running bcbio-nextgen. http://docs.continuum.io/anaconda/index.html """ anaconda_dir = os.path.join(args.datadir, "anaconda") bindir = os.path.join(anaconda_dir, "bin") conda = os.path.join(bindir, "conda") if not os.path.exists(anaconda_dir) or not os.path.exists(conda): if os.path.exists(anaconda_dir): shutil.rmtree(anaconda_dir) dist = args.distribution if args.distribution else _guess_distribution() url = REMOTES["anaconda"] % ("MacOSX" if dist.lower() == "macosx" else "Linux") if not os.path.exists(os.path.basename(url)): subprocess.check_call(["wget", "--progress=dot:mega", "--no-check-certificate", url]) subprocess.check_call("bash %s -b -p %s" % (os.path.basename(url), anaconda_dir), shell=True) return {"conda": conda, "pip": os.path.join(bindir, "pip"), "dir": anaconda_dir}
[ "def", "install_anaconda_python", "(", "args", ")", ":", "anaconda_dir", "=", "os", ".", "path", ".", "join", "(", "args", ".", "datadir", ",", "\"anaconda\"", ")", "bindir", "=", "os", ".", "path", ".", "join", "(", "anaconda_dir", ",", "\"bin\"", ")", ...
Provide isolated installation of Anaconda python for running bcbio-nextgen. http://docs.continuum.io/anaconda/index.html
[ "Provide", "isolated", "installation", "of", "Anaconda", "python", "for", "running", "bcbio", "-", "nextgen", ".", "http", ":", "//", "docs", ".", "continuum", ".", "io", "/", "anaconda", "/", "index", ".", "html" ]
python
train
thomasdelaet/python-velbus
velbus/messages/kwh_status.py
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/messages/kwh_status.py#L25-L44
def populate(self, priority, address, rtr, data): """ -DB1 last 2 bits = channel -DB1 first 6 bist = pulses -DB2-5 = pulse counter -DB6-7 = ms/pulse :return: None """ assert isinstance(data, bytes) self.needs_no_rtr(rtr) self.needs_data(data, 7) self.set_attributes(priority, address, rtr) self.channel = (data[0] & 0x03) +1 self.pulses = (data[0] >> 2) * 100 self.counter = (data[1] << 24) + (data[2] << 16) + (data[3] << 8) + data[4] self.kwh = float(float(self.counter)/self.pulses) self.delay = (data[5] << 8) + data[6] self.watt = float((1000 * 1000 * 3600) / (self.delay * self.pulses)) if self.watt < 55: self.watt = 0
[ "def", "populate", "(", "self", ",", "priority", ",", "address", ",", "rtr", ",", "data", ")", ":", "assert", "isinstance", "(", "data", ",", "bytes", ")", "self", ".", "needs_no_rtr", "(", "rtr", ")", "self", ".", "needs_data", "(", "data", ",", "7"...
-DB1 last 2 bits = channel -DB1 first 6 bist = pulses -DB2-5 = pulse counter -DB6-7 = ms/pulse :return: None
[ "-", "DB1", "last", "2", "bits", "=", "channel", "-", "DB1", "first", "6", "bist", "=", "pulses", "-", "DB2", "-", "5", "=", "pulse", "counter", "-", "DB6", "-", "7", "=", "ms", "/", "pulse", ":", "return", ":", "None" ]
python
train
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L4559-L4572
def MoveEndpointByRange(self, srcEndPoint: int, textRange: 'TextRange', targetEndPoint: int, waitTime: float = OPERATION_WAIT_TIME) -> bool: """ Call IUIAutomationTextRange::MoveEndpointByRange. Move one endpoint of the current text range to the specified endpoint of a second text range. srcEndPoint: int, a value in class `TextPatternRangeEndpoint`. textRange: `TextRange`. targetEndPoint: int, a value in class `TextPatternRangeEndpoint`. waitTime: float. Return bool, True if succeed otherwise False. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-moveendpointbyrange """ ret = self.textRange.MoveEndpointByRange(srcEndPoint, textRange.textRange, targetEndPoint) == S_OK time.sleep(waitTime) return ret
[ "def", "MoveEndpointByRange", "(", "self", ",", "srcEndPoint", ":", "int", ",", "textRange", ":", "'TextRange'", ",", "targetEndPoint", ":", "int", ",", "waitTime", ":", "float", "=", "OPERATION_WAIT_TIME", ")", "->", "bool", ":", "ret", "=", "self", ".", ...
Call IUIAutomationTextRange::MoveEndpointByRange. Move one endpoint of the current text range to the specified endpoint of a second text range. srcEndPoint: int, a value in class `TextPatternRangeEndpoint`. textRange: `TextRange`. targetEndPoint: int, a value in class `TextPatternRangeEndpoint`. waitTime: float. Return bool, True if succeed otherwise False. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-moveendpointbyrange
[ "Call", "IUIAutomationTextRange", "::", "MoveEndpointByRange", ".", "Move", "one", "endpoint", "of", "the", "current", "text", "range", "to", "the", "specified", "endpoint", "of", "a", "second", "text", "range", ".", "srcEndPoint", ":", "int", "a", "value", "i...
python
valid
fedora-infra/fedora-messaging
fedora_messaging/config.py
https://github.com/fedora-infra/fedora-messaging/blob/be3e88534e2b15d579bcd24f9c4b7e795cb7e0b7/fedora_messaging/config.py#L421-L439
def validate_client_properties(props): """ Validate the client properties setting. This will add the "version", "information", and "product" keys if they are missing. All other keys are application-specific. Raises: exceptions.ConfigurationException: If any of the basic keys are overridden. """ for key in ("version", "information", "product"): # Nested dictionaries are not merged so key can be missing if key not in props: props[key] = DEFAULTS["client_properties"][key] # Don't let users override these as they identify this library in AMQP if props[key] != DEFAULTS["client_properties"][key]: raise exceptions.ConfigurationException( '"{}" is a reserved keyword in client_properties'.format(key) )
[ "def", "validate_client_properties", "(", "props", ")", ":", "for", "key", "in", "(", "\"version\"", ",", "\"information\"", ",", "\"product\"", ")", ":", "# Nested dictionaries are not merged so key can be missing", "if", "key", "not", "in", "props", ":", "props", ...
Validate the client properties setting. This will add the "version", "information", and "product" keys if they are missing. All other keys are application-specific. Raises: exceptions.ConfigurationException: If any of the basic keys are overridden.
[ "Validate", "the", "client", "properties", "setting", "." ]
python
train
maartenbreddels/ipyvolume
ipyvolume/pylab.py
https://github.com/maartenbreddels/ipyvolume/blob/e68b72852b61276f8e6793bc8811f5b2432a155f/ipyvolume/pylab.py#L1158-L1182
def view(azimuth=None, elevation=None, distance=None): """Set camera angles and distance and return the current. :param float azimuth: rotation around the axis pointing up in degrees :param float elevation: rotation where +90 means 'up', -90 means 'down', in degrees :param float distance: radial distance from the center to the camera. """ fig = gcf() # first calculate the current values x, y, z = fig.camera.position r = np.sqrt(x ** 2 + y ** 2 + z ** 2) az = np.degrees(np.arctan2(x, z)) el = np.degrees(np.arcsin(y / r)) if azimuth is None: azimuth = az if elevation is None: elevation = el if distance is None: distance = r cosaz = np.cos(np.radians(azimuth)) sinaz = np.sin(np.radians(azimuth)) sine = np.sin(np.radians(elevation)) cose = np.cos(np.radians(elevation)) fig.camera.position = (distance * sinaz * cose, distance * sine, distance * cosaz * cose) return azimuth, elevation, distance
[ "def", "view", "(", "azimuth", "=", "None", ",", "elevation", "=", "None", ",", "distance", "=", "None", ")", ":", "fig", "=", "gcf", "(", ")", "# first calculate the current values", "x", ",", "y", ",", "z", "=", "fig", ".", "camera", ".", "position",...
Set camera angles and distance and return the current. :param float azimuth: rotation around the axis pointing up in degrees :param float elevation: rotation where +90 means 'up', -90 means 'down', in degrees :param float distance: radial distance from the center to the camera.
[ "Set", "camera", "angles", "and", "distance", "and", "return", "the", "current", "." ]
python
train
mayfield/cellulario
cellulario/iocell.py
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L248-L252
def clean(self): """ Run all of the cleaners added by the user. """ if self.cleaners: yield from asyncio.wait([x() for x in self.cleaners], loop=self.loop)
[ "def", "clean", "(", "self", ")", ":", "if", "self", ".", "cleaners", ":", "yield", "from", "asyncio", ".", "wait", "(", "[", "x", "(", ")", "for", "x", "in", "self", ".", "cleaners", "]", ",", "loop", "=", "self", ".", "loop", ")" ]
Run all of the cleaners added by the user.
[ "Run", "all", "of", "the", "cleaners", "added", "by", "the", "user", "." ]
python
train