nwo
stringlengths
5
91
sha
stringlengths
40
40
path
stringlengths
5
174
language
stringclasses
1 value
identifier
stringlengths
1
120
parameters
stringlengths
0
3.15k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
24.1k
docstring
stringlengths
0
27.3k
docstring_summary
stringlengths
0
13.8k
docstring_tokens
sequence
function
stringlengths
22
139k
function_tokens
sequence
url
stringlengths
87
283
LumaPictures/pymel
fa88a3f4fa18e09bb8aa9bdf4dab53d984bada72
pymel/util/utilitytypes.py
python
metaReadOnlyAttr.__setattr__
(cls, name, value)
overload __setattr__ to forbid modification of read only class info
overload __setattr__ to forbid modification of read only class info
[ "overload", "__setattr__", "to", "forbid", "modification", "of", "read", "only", "class", "info" ]
def __setattr__(cls, name, value): # @NoSelf """ overload __setattr__ to forbid modification of read only class info """ readonly = {} for c in inspect.getmro(cls): if hasattr(c, '__readonly__'): readonly.update(c.__readonly__) if name in readonly: raise AttributeError("attribute %s is a read only class attribute and cannot be modified on class %s" % (name, cls.__name__)) else: super(metaReadOnlyAttr, cls).__setattr__(name, value)
[ "def", "__setattr__", "(", "cls", ",", "name", ",", "value", ")", ":", "# @NoSelf", "readonly", "=", "{", "}", "for", "c", "in", "inspect", ".", "getmro", "(", "cls", ")", ":", "if", "hasattr", "(", "c", ",", "'__readonly__'", ")", ":", "readonly", ".", "update", "(", "c", ".", "__readonly__", ")", "if", "name", "in", "readonly", ":", "raise", "AttributeError", "(", "\"attribute %s is a read only class attribute and cannot be modified on class %s\"", "%", "(", "name", ",", "cls", ".", "__name__", ")", ")", "else", ":", "super", "(", "metaReadOnlyAttr", ",", "cls", ")", ".", "__setattr__", "(", "name", ",", "value", ")" ]
https://github.com/LumaPictures/pymel/blob/fa88a3f4fa18e09bb8aa9bdf4dab53d984bada72/pymel/util/utilitytypes.py#L292-L301
whyliam/whyliam.workflows.youdao
2dfa7f1de56419dab1c2e70c1a27e5e13ba25a5c
urllib3/util/retry.py
python
Retry.increment
( self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None, )
return new_retry
Return a new Retry object with incremented retry counters. :param response: A response object, or None, if the server did not return a response. :type response: :class:`~urllib3.response.HTTPResponse` :param Exception error: An error encountered during the request, or None if the response was received successfully. :return: A new ``Retry`` object.
Return a new Retry object with incremented retry counters.
[ "Return", "a", "new", "Retry", "object", "with", "incremented", "retry", "counters", "." ]
def increment( self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None, ): """ Return a new Retry object with incremented retry counters. :param response: A response object, or None, if the server did not return a response. :type response: :class:`~urllib3.response.HTTPResponse` :param Exception error: An error encountered during the request, or None if the response was received successfully. :return: A new ``Retry`` object. """ if self.total is False and error: # Disabled, indicate to re-raise the error. raise six.reraise(type(error), error, _stacktrace) total = self.total if total is not None: total -= 1 connect = self.connect read = self.read redirect = self.redirect status_count = self.status cause = "unknown" status = None redirect_location = None if error and self._is_connection_error(error): # Connect retry? if connect is False: raise six.reraise(type(error), error, _stacktrace) elif connect is not None: connect -= 1 elif error and self._is_read_error(error): # Read retry? if read is False or not self._is_method_retryable(method): raise six.reraise(type(error), error, _stacktrace) elif read is not None: read -= 1 elif response and response.get_redirect_location(): # Redirect retry? if redirect is not None: redirect -= 1 cause = "too many redirects" redirect_location = response.get_redirect_location() status = response.status else: # Incrementing because of a server error like a 500 in # status_forcelist and a the given method is in the whitelist cause = ResponseError.GENERIC_ERROR if response and response.status: if status_count is not None: status_count -= 1 cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status) status = response.status history = self.history + ( RequestHistory(method, url, error, status, redirect_location), ) new_retry = self.new( total=total, connect=connect, read=read, redirect=redirect, status=status_count, history=history, ) if new_retry.is_exhausted(): raise MaxRetryError(_pool, url, error or ResponseError(cause)) log.debug("Incremented Retry for (url='%s'): %r", url, new_retry) return new_retry
[ "def", "increment", "(", "self", ",", "method", "=", "None", ",", "url", "=", "None", ",", "response", "=", "None", ",", "error", "=", "None", ",", "_pool", "=", "None", ",", "_stacktrace", "=", "None", ",", ")", ":", "if", "self", ".", "total", "is", "False", "and", "error", ":", "# Disabled, indicate to re-raise the error.", "raise", "six", ".", "reraise", "(", "type", "(", "error", ")", ",", "error", ",", "_stacktrace", ")", "total", "=", "self", ".", "total", "if", "total", "is", "not", "None", ":", "total", "-=", "1", "connect", "=", "self", ".", "connect", "read", "=", "self", ".", "read", "redirect", "=", "self", ".", "redirect", "status_count", "=", "self", ".", "status", "cause", "=", "\"unknown\"", "status", "=", "None", "redirect_location", "=", "None", "if", "error", "and", "self", ".", "_is_connection_error", "(", "error", ")", ":", "# Connect retry?", "if", "connect", "is", "False", ":", "raise", "six", ".", "reraise", "(", "type", "(", "error", ")", ",", "error", ",", "_stacktrace", ")", "elif", "connect", "is", "not", "None", ":", "connect", "-=", "1", "elif", "error", "and", "self", ".", "_is_read_error", "(", "error", ")", ":", "# Read retry?", "if", "read", "is", "False", "or", "not", "self", ".", "_is_method_retryable", "(", "method", ")", ":", "raise", "six", ".", "reraise", "(", "type", "(", "error", ")", ",", "error", ",", "_stacktrace", ")", "elif", "read", "is", "not", "None", ":", "read", "-=", "1", "elif", "response", "and", "response", ".", "get_redirect_location", "(", ")", ":", "# Redirect retry?", "if", "redirect", "is", "not", "None", ":", "redirect", "-=", "1", "cause", "=", "\"too many redirects\"", "redirect_location", "=", "response", ".", "get_redirect_location", "(", ")", "status", "=", "response", ".", "status", "else", ":", "# Incrementing because of a server error like a 500 in", "# status_forcelist and a the given method is in the whitelist", "cause", "=", "ResponseError", ".", "GENERIC_ERROR", "if", "response", "and", "response", ".", "status", ":", "if", "status_count", "is", "not", "None", ":", "status_count", "-=", "1", "cause", "=", "ResponseError", ".", "SPECIFIC_ERROR", ".", "format", "(", "status_code", "=", "response", ".", "status", ")", "status", "=", "response", ".", "status", "history", "=", "self", ".", "history", "+", "(", "RequestHistory", "(", "method", ",", "url", ",", "error", ",", "status", ",", "redirect_location", ")", ",", ")", "new_retry", "=", "self", ".", "new", "(", "total", "=", "total", ",", "connect", "=", "connect", ",", "read", "=", "read", ",", "redirect", "=", "redirect", ",", "status", "=", "status_count", ",", "history", "=", "history", ",", ")", "if", "new_retry", ".", "is_exhausted", "(", ")", ":", "raise", "MaxRetryError", "(", "_pool", ",", "url", ",", "error", "or", "ResponseError", "(", "cause", ")", ")", "log", ".", "debug", "(", "\"Incremented Retry for (url='%s'): %r\"", ",", "url", ",", "new_retry", ")", "return", "new_retry" ]
https://github.com/whyliam/whyliam.workflows.youdao/blob/2dfa7f1de56419dab1c2e70c1a27e5e13ba25a5c/urllib3/util/retry.py#L355-L440
google/grr
8ad8a4d2c5a93c92729206b7771af19d92d4f915
grr/server/grr_response_server/flows/general/discovery.py
python
Interrogate.ClientInfo
(self, responses)
Obtain some information about the GRR client running.
Obtain some information about the GRR client running.
[ "Obtain", "some", "information", "about", "the", "GRR", "client", "running", "." ]
def ClientInfo(self, responses): """Obtain some information about the GRR client running.""" if not responses.success: self.Log("Could not get ClientInfo.") return response = responses.First() if fleetspeak_utils.IsFleetspeakEnabledClient(self.client_id): # Fetch labels for the client from Fleetspeak. If Fleetspeak doesn't # have any labels for the GRR client, fall back to labels reported by # the client. fleetspeak_labels = fleetspeak_utils.GetLabelsFromFleetspeak( self.client_id) if fleetspeak_labels: response.labels = fleetspeak_labels else: FLEETSPEAK_UNLABELED_CLIENTS.Increment() logging.warning("Failed to get labels for Fleetspeak client %s.", self.client_id) sanitized_labels = [] for label in response.labels: try: self._ValidateLabel(label) sanitized_labels.append(label) except ValueError: self.Log("Got invalid label: %s", label) response.labels = sanitized_labels self.state.client.startup_info.client_info = response metadata = data_store.REL_DB.ReadClientMetadata(self.client_id) if metadata and metadata.last_fleetspeak_validation_info: self.state.client.fleetspeak_validation_info = ( metadata.last_fleetspeak_validation_info)
[ "def", "ClientInfo", "(", "self", ",", "responses", ")", ":", "if", "not", "responses", ".", "success", ":", "self", ".", "Log", "(", "\"Could not get ClientInfo.\"", ")", "return", "response", "=", "responses", ".", "First", "(", ")", "if", "fleetspeak_utils", ".", "IsFleetspeakEnabledClient", "(", "self", ".", "client_id", ")", ":", "# Fetch labels for the client from Fleetspeak. If Fleetspeak doesn't", "# have any labels for the GRR client, fall back to labels reported by", "# the client.", "fleetspeak_labels", "=", "fleetspeak_utils", ".", "GetLabelsFromFleetspeak", "(", "self", ".", "client_id", ")", "if", "fleetspeak_labels", ":", "response", ".", "labels", "=", "fleetspeak_labels", "else", ":", "FLEETSPEAK_UNLABELED_CLIENTS", ".", "Increment", "(", ")", "logging", ".", "warning", "(", "\"Failed to get labels for Fleetspeak client %s.\"", ",", "self", ".", "client_id", ")", "sanitized_labels", "=", "[", "]", "for", "label", "in", "response", ".", "labels", ":", "try", ":", "self", ".", "_ValidateLabel", "(", "label", ")", "sanitized_labels", ".", "append", "(", "label", ")", "except", "ValueError", ":", "self", ".", "Log", "(", "\"Got invalid label: %s\"", ",", "label", ")", "response", ".", "labels", "=", "sanitized_labels", "self", ".", "state", ".", "client", ".", "startup_info", ".", "client_info", "=", "response", "metadata", "=", "data_store", ".", "REL_DB", ".", "ReadClientMetadata", "(", "self", ".", "client_id", ")", "if", "metadata", "and", "metadata", ".", "last_fleetspeak_validation_info", ":", "self", ".", "state", ".", "client", ".", "fleetspeak_validation_info", "=", "(", "metadata", ".", "last_fleetspeak_validation_info", ")" ]
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/server/grr_response_server/flows/general/discovery.py#L270-L306
edfungus/Crouton
ada98b3930192938a48909072b45cb84b945f875
clients/esp8266_clients/venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.py
python
_Cache.__init__
(self)
Initialise an instance. There is normally one for each DistributionPath.
Initialise an instance. There is normally one for each DistributionPath.
[ "Initialise", "an", "instance", ".", "There", "is", "normally", "one", "for", "each", "DistributionPath", "." ]
def __init__(self): """ Initialise an instance. There is normally one for each DistributionPath. """ self.name = {} self.path = {} self.generated = False
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "name", "=", "{", "}", "self", ".", "path", "=", "{", "}", "self", ".", "generated", "=", "False" ]
https://github.com/edfungus/Crouton/blob/ada98b3930192938a48909072b45cb84b945f875/clients/esp8266_clients/venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.py#L48-L54
GoogleCloudPlatform/PerfKitBenchmarker
6e3412d7d5e414b8ca30ed5eaf970cef1d919a67
perfkitbenchmarker/linux_packages/tensorflow_serving.py
python
InstallTensorFlowServingAPI
(vm)
Installs TF Serving API on the vm. Currently this is only useful so that the clients can run python scripts that import tensorflow_serving. The server vms make no use of it. Args: vm: VM to operate on.
Installs TF Serving API on the vm.
[ "Installs", "TF", "Serving", "API", "on", "the", "vm", "." ]
def InstallTensorFlowServingAPI(vm): """Installs TF Serving API on the vm. Currently this is only useful so that the clients can run python scripts that import tensorflow_serving. The server vms make no use of it. Args: vm: VM to operate on. """ pip_package_output_dir = posixpath.join(VM_TMP_DIR, 'tf_serving_pip_package') pip_package = posixpath.join(pip_package_output_dir, 'tensorflow_serving_api*.whl') vm.Install('pip3') vm.RemoteCommand('sudo pip3 install --upgrade pip') # Build the pip package from the same source as the serving binary vm.RemoteCommand('sudo docker run --rm -v {0}:{0} ' 'benchmarks/tensorflow-serving-devel ' 'bash -c "bazel build --config=nativeopt ' 'tensorflow_serving/tools/pip_package:build_pip_package && ' 'bazel-bin/tensorflow_serving/tools/pip_package/' 'build_pip_package {0}"'.format(pip_package_output_dir)) vm.RemoteCommand('sudo pip3 install {0}'.format(pip_package))
[ "def", "InstallTensorFlowServingAPI", "(", "vm", ")", ":", "pip_package_output_dir", "=", "posixpath", ".", "join", "(", "VM_TMP_DIR", ",", "'tf_serving_pip_package'", ")", "pip_package", "=", "posixpath", ".", "join", "(", "pip_package_output_dir", ",", "'tensorflow_serving_api*.whl'", ")", "vm", ".", "Install", "(", "'pip3'", ")", "vm", ".", "RemoteCommand", "(", "'sudo pip3 install --upgrade pip'", ")", "# Build the pip package from the same source as the serving binary", "vm", ".", "RemoteCommand", "(", "'sudo docker run --rm -v {0}:{0} '", "'benchmarks/tensorflow-serving-devel '", "'bash -c \"bazel build --config=nativeopt '", "'tensorflow_serving/tools/pip_package:build_pip_package && '", "'bazel-bin/tensorflow_serving/tools/pip_package/'", "'build_pip_package {0}\"'", ".", "format", "(", "pip_package_output_dir", ")", ")", "vm", ".", "RemoteCommand", "(", "'sudo pip3 install {0}'", ".", "format", "(", "pip_package", ")", ")" ]
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker/blob/6e3412d7d5e414b8ca30ed5eaf970cef1d919a67/perfkitbenchmarker/linux_packages/tensorflow_serving.py#L32-L58
juhakivekas/multidiff
b22b5e202b1ce1a93657f6549e893242f9302f5a
multidiff/Multidiffmodel.py
python
MultidiffModel.diff_baseline
(self, baseline=0)
Diff all objects against a common baseline
Diff all objects against a common baseline
[ "Diff", "all", "objects", "against", "a", "common", "baseline" ]
def diff_baseline(self, baseline=0): """Diff all objects against a common baseline""" for i in range(len(self.objects)): if i is baseline: pass self.diff(baseline, i)
[ "def", "diff_baseline", "(", "self", ",", "baseline", "=", "0", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "objects", ")", ")", ":", "if", "i", "is", "baseline", ":", "pass", "self", ".", "diff", "(", "baseline", ",", "i", ")" ]
https://github.com/juhakivekas/multidiff/blob/b22b5e202b1ce1a93657f6549e893242f9302f5a/multidiff/Multidiffmodel.py#L53-L58
lkiesow/python-feedgen
ffe3e4d752ac76e23c879c35682c310c2b1ccb86
feedgen/feed.py
python
FeedGenerator.register_extension
(self, namespace, extension_class_feed=None, extension_class_entry=None, atom=True, rss=True)
Registers an extension by class. :param namespace: namespace for the extension :param extension_class_feed: Class of the feed extension to load. :param extension_class_entry: Class of the entry extension to load :param atom: If the extension should be used for ATOM feeds. :param rss: If the extension should be used for RSS feeds.
Registers an extension by class.
[ "Registers", "an", "extension", "by", "class", "." ]
def register_extension(self, namespace, extension_class_feed=None, extension_class_entry=None, atom=True, rss=True): '''Registers an extension by class. :param namespace: namespace for the extension :param extension_class_feed: Class of the feed extension to load. :param extension_class_entry: Class of the entry extension to load :param atom: If the extension should be used for ATOM feeds. :param rss: If the extension should be used for RSS feeds. ''' # Check loaded extensions # `load_extension` ignores the "Extension" suffix. if not isinstance(self.__extensions, dict): self.__extensions = {} if namespace in self.__extensions.keys(): raise ImportError('Extension already loaded') # Load extension extinst = extension_class_feed() setattr(self, namespace, extinst) # `load_extension` registry self.__extensions[namespace] = { 'inst': extinst, 'extension_class_feed': extension_class_feed, 'extension_class_entry': extension_class_entry, 'atom': atom, 'rss': rss } # Try to load the extension for already existing entries: for entry in self.__feed_entries: try: entry.register_extension(namespace, extension_class_entry, atom, rss) except ImportError: pass
[ "def", "register_extension", "(", "self", ",", "namespace", ",", "extension_class_feed", "=", "None", ",", "extension_class_entry", "=", "None", ",", "atom", "=", "True", ",", "rss", "=", "True", ")", ":", "# Check loaded extensions", "# `load_extension` ignores the \"Extension\" suffix.", "if", "not", "isinstance", "(", "self", ".", "__extensions", ",", "dict", ")", ":", "self", ".", "__extensions", "=", "{", "}", "if", "namespace", "in", "self", ".", "__extensions", ".", "keys", "(", ")", ":", "raise", "ImportError", "(", "'Extension already loaded'", ")", "# Load extension", "extinst", "=", "extension_class_feed", "(", ")", "setattr", "(", "self", ",", "namespace", ",", "extinst", ")", "# `load_extension` registry", "self", ".", "__extensions", "[", "namespace", "]", "=", "{", "'inst'", ":", "extinst", ",", "'extension_class_feed'", ":", "extension_class_feed", ",", "'extension_class_entry'", ":", "extension_class_entry", ",", "'atom'", ":", "atom", ",", "'rss'", ":", "rss", "}", "# Try to load the extension for already existing entries:", "for", "entry", "in", "self", ".", "__feed_entries", ":", "try", ":", "entry", ".", "register_extension", "(", "namespace", ",", "extension_class_entry", ",", "atom", ",", "rss", ")", "except", "ImportError", ":", "pass" ]
https://github.com/lkiesow/python-feedgen/blob/ffe3e4d752ac76e23c879c35682c310c2b1ccb86/feedgen/feed.py#L1138-L1176
cython/cython
9db1fc39b31b7b3b2ed574a79f5f9fd980ee3be7
Cython/Compiler/Optimize.py
python
FinalOptimizePhase.visit_PyTypeTestNode
(self, node)
return node
Remove tests for alternatively allowed None values from type tests when we know that the argument cannot be None anyway.
Remove tests for alternatively allowed None values from type tests when we know that the argument cannot be None anyway.
[ "Remove", "tests", "for", "alternatively", "allowed", "None", "values", "from", "type", "tests", "when", "we", "know", "that", "the", "argument", "cannot", "be", "None", "anyway", "." ]
def visit_PyTypeTestNode(self, node): """Remove tests for alternatively allowed None values from type tests when we know that the argument cannot be None anyway. """ self.visitchildren(node) if not node.notnone: if not node.arg.may_be_none(): node.notnone = True return node
[ "def", "visit_PyTypeTestNode", "(", "self", ",", "node", ")", ":", "self", ".", "visitchildren", "(", "node", ")", "if", "not", "node", ".", "notnone", ":", "if", "not", "node", ".", "arg", ".", "may_be_none", "(", ")", ":", "node", ".", "notnone", "=", "True", "return", "node" ]
https://github.com/cython/cython/blob/9db1fc39b31b7b3b2ed574a79f5f9fd980ee3be7/Cython/Compiler/Optimize.py#L5000-L5009
DonnchaC/shadowbrokers-exploits
42d8265db860b634717da4faa668b2670457cf7e
windows/fuzzbunch/log.py
python
log.socket_closed
(self, port, ip='0.0.0.0', project=None, is_tcp=None, is_udp=None, is_raw=None, **params)
Close a local socket and any open connections.
Close a local socket and any open connections.
[ "Close", "a", "local", "socket", "and", "any", "open", "connections", "." ]
def socket_closed(self, port, ip='0.0.0.0', project=None, is_tcp=None, is_udp=None, is_raw=None, **params): ''' Close a local socket and any open connections. ''' if is_raw or is_tcp or is_udp: return self(event_type='socket closed', socket_port=port, socket_ip=ip, socket_project=project, socket_is_raw=is_raw, socket_is_tcp=is_tcp, socket_is_udp=is_udp, **params) else: self.notify_of_error('Could not close socket. No socket type specified.')
[ "def", "socket_closed", "(", "self", ",", "port", ",", "ip", "=", "'0.0.0.0'", ",", "project", "=", "None", ",", "is_tcp", "=", "None", ",", "is_udp", "=", "None", ",", "is_raw", "=", "None", ",", "*", "*", "params", ")", ":", "if", "is_raw", "or", "is_tcp", "or", "is_udp", ":", "return", "self", "(", "event_type", "=", "'socket closed'", ",", "socket_port", "=", "port", ",", "socket_ip", "=", "ip", ",", "socket_project", "=", "project", ",", "socket_is_raw", "=", "is_raw", ",", "socket_is_tcp", "=", "is_tcp", ",", "socket_is_udp", "=", "is_udp", ",", "*", "*", "params", ")", "else", ":", "self", ".", "notify_of_error", "(", "'Could not close socket. No socket type specified.'", ")" ]
https://github.com/DonnchaC/shadowbrokers-exploits/blob/42d8265db860b634717da4faa668b2670457cf7e/windows/fuzzbunch/log.py#L181-L184
giantbranch/python-hacker-code
addbc8c73e7e6fb9e4fcadcec022fa1d3da4b96d
我手敲的代码(中文注释)/chapter9/pycrypto-2.6.1/build/lib.win32-2.7/Crypto/Random/random.py
python
StrongRandom.choice
(self, seq)
return seq[self.randrange(len(seq))]
Return a random element from a (non-empty) sequence. If the seqence is empty, raises IndexError.
Return a random element from a (non-empty) sequence.
[ "Return", "a", "random", "element", "from", "a", "(", "non", "-", "empty", ")", "sequence", "." ]
def choice(self, seq): """Return a random element from a (non-empty) sequence. If the seqence is empty, raises IndexError. """ if len(seq) == 0: raise IndexError("empty sequence") return seq[self.randrange(len(seq))]
[ "def", "choice", "(", "self", ",", "seq", ")", ":", "if", "len", "(", "seq", ")", "==", "0", ":", "raise", "IndexError", "(", "\"empty sequence\"", ")", "return", "seq", "[", "self", ".", "randrange", "(", "len", "(", "seq", ")", ")", "]" ]
https://github.com/giantbranch/python-hacker-code/blob/addbc8c73e7e6fb9e4fcadcec022fa1d3da4b96d/我手敲的代码(中文注释)/chapter9/pycrypto-2.6.1/build/lib.win32-2.7/Crypto/Random/random.py#L95-L102
DiligentPanda/Tencent_Ads_Algo_2018
dc1aa2285e196e87d100b7e6769f3c97c19e394f
src/lib/lr_scheduler.py
python
adjust_learning_rate_custom
(base_lr, optimizer, cur_batch, func)
:param lr: :param optimizer: :param cur_batch: :param func: :return:
[]
def adjust_learning_rate_custom(base_lr, optimizer, cur_batch, func): ''' :param lr: :param optimizer: :param cur_batch: :param func: :return: ''' lr = func(base_lr,cur_batch) for param_group in optimizer.param_groups: param_group['lr'] = lr * param_group['lr_mult']
[ "def", "adjust_learning_rate_custom", "(", "base_lr", ",", "optimizer", ",", "cur_batch", ",", "func", ")", ":", "lr", "=", "func", "(", "base_lr", ",", "cur_batch", ")", "for", "param_group", "in", "optimizer", ".", "param_groups", ":", "param_group", "[", "'lr'", "]", "=", "lr", "*", "param_group", "[", "'lr_mult'", "]" ]
https://github.com/DiligentPanda/Tencent_Ads_Algo_2018/blob/dc1aa2285e196e87d100b7e6769f3c97c19e394f/src/lib/lr_scheduler.py#L21-L32
Asana/python-asana
9b54ab99423208bd6aa87dbfaa628c069430b127
asana/resources/gen/sections.py
python
_Sections.get_sections_for_project
(self, project_gid, params=None, **options)
return self.client.get_collection(path, params, **options)
Get sections in a project :param str project_gid: (required) Globally unique identifier for the project. :param Object params: Parameters for the request :param **options - offset {str}: Offset token. An offset to the next page returned by the API. A pagination request will return an offset token, which can be used as an input parameter to the next request. If an offset is not passed in, the API will return the first page of results. 'Note: You can only pass in an offset that was returned to you via a previously paginated request.' - limit {int}: Results per page. The number of objects to return per page. The value must be between 1 and 100. - opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options. - opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging. :return: Object
Get sections in a project :param str project_gid: (required) Globally unique identifier for the project. :param Object params: Parameters for the request :param **options - offset {str}: Offset token. An offset to the next page returned by the API. A pagination request will return an offset token, which can be used as an input parameter to the next request. If an offset is not passed in, the API will return the first page of results. 'Note: You can only pass in an offset that was returned to you via a previously paginated request.' - limit {int}: Results per page. The number of objects to return per page. The value must be between 1 and 100. - opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options. - opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging. :return: Object
[ "Get", "sections", "in", "a", "project", ":", "param", "str", "project_gid", ":", "(", "required", ")", "Globally", "unique", "identifier", "for", "the", "project", ".", ":", "param", "Object", "params", ":", "Parameters", "for", "the", "request", ":", "param", "**", "options", "-", "offset", "{", "str", "}", ":", "Offset", "token", ".", "An", "offset", "to", "the", "next", "page", "returned", "by", "the", "API", ".", "A", "pagination", "request", "will", "return", "an", "offset", "token", "which", "can", "be", "used", "as", "an", "input", "parameter", "to", "the", "next", "request", ".", "If", "an", "offset", "is", "not", "passed", "in", "the", "API", "will", "return", "the", "first", "page", "of", "results", ".", "Note", ":", "You", "can", "only", "pass", "in", "an", "offset", "that", "was", "returned", "to", "you", "via", "a", "previously", "paginated", "request", ".", "-", "limit", "{", "int", "}", ":", "Results", "per", "page", ".", "The", "number", "of", "objects", "to", "return", "per", "page", ".", "The", "value", "must", "be", "between", "1", "and", "100", ".", "-", "opt_fields", "{", "list", "[", "str", "]", "}", ":", "Defines", "fields", "to", "return", ".", "Some", "requests", "return", "*", "compact", "*", "representations", "of", "objects", "in", "order", "to", "conserve", "resources", "and", "complete", "the", "request", "more", "efficiently", ".", "Other", "times", "requests", "return", "more", "information", "than", "you", "may", "need", ".", "This", "option", "allows", "you", "to", "list", "the", "exact", "set", "of", "fields", "that", "the", "API", "should", "be", "sure", "to", "return", "for", "the", "objects", ".", "The", "field", "names", "should", "be", "provided", "as", "paths", "described", "below", ".", "The", "id", "of", "included", "objects", "will", "always", "be", "returned", "regardless", "of", "the", "field", "options", ".", "-", "opt_pretty", "{", "bool", "}", ":", "Provides", "“pretty”", "output", ".", "Provides", "the", "response", "in", "a", "“pretty”", "format", ".", "In", "the", "case", "of", "JSON", "this", "means", "doing", "proper", "line", "breaking", "and", "indentation", "to", "make", "it", "readable", ".", "This", "will", "take", "extra", "time", "and", "increase", "the", "response", "size", "so", "it", "is", "advisable", "only", "to", "use", "this", "during", "debugging", ".", ":", "return", ":", "Object" ]
def get_sections_for_project(self, project_gid, params=None, **options): """Get sections in a project :param str project_gid: (required) Globally unique identifier for the project. :param Object params: Parameters for the request :param **options - offset {str}: Offset token. An offset to the next page returned by the API. A pagination request will return an offset token, which can be used as an input parameter to the next request. If an offset is not passed in, the API will return the first page of results. 'Note: You can only pass in an offset that was returned to you via a previously paginated request.' - limit {int}: Results per page. The number of objects to return per page. The value must be between 1 and 100. - opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options. - opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging. :return: Object """ if params is None: params = {} path = "/projects/{project_gid}/sections".replace("{project_gid}", project_gid) return self.client.get_collection(path, params, **options)
[ "def", "get_sections_for_project", "(", "self", ",", "project_gid", ",", "params", "=", "None", ",", "*", "*", "options", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "path", "=", "\"/projects/{project_gid}/sections\"", ".", "replace", "(", "\"{project_gid}\"", ",", "project_gid", ")", "return", "self", ".", "client", ".", "get_collection", "(", "path", ",", "params", ",", "*", "*", "options", ")" ]
https://github.com/Asana/python-asana/blob/9b54ab99423208bd6aa87dbfaa628c069430b127/asana/resources/gen/sections.py#L63-L77
Staffjoy/suite
14ed49b21cf8296d2e0696a7f50f91f8e4b65072
app/limiters.py
python
BaseNotificationLimiter.allowed_to_send
(cls, user)
determines if a notification is able to be sent
determines if a notification is able to be sent
[ "determines", "if", "a", "notification", "is", "able", "to", "be", "sent" ]
def allowed_to_send(cls, user): """ determines if a notification is able to be sent """ try: last_reminder = cls.get(user.id) last_reminder = iso8601.parse_date(last_reminder).replace( tzinfo=None) except: last_reminder = None if not last_reminder: return True if cls.COMPARE_LAST_SEEN: # Still allowed to send if user has been active # since last notification. if user.last_seen > last_reminder: cls.delete(user.id) return True # redis doesn't guarantee that the key isn't expired exactly at expiration time # check if it *should* have been expired if last_reminder + timedelta(seconds=cls.EXPIRY) < datetime.utcnow(): cls.delete(user.id) return True if cls.WRITE_LOG: current_app.logger.info( "Not sending %s notification to user %s because it was sent recently" % (cls.NAME, user.id)) return False
[ "def", "allowed_to_send", "(", "cls", ",", "user", ")", ":", "try", ":", "last_reminder", "=", "cls", ".", "get", "(", "user", ".", "id", ")", "last_reminder", "=", "iso8601", ".", "parse_date", "(", "last_reminder", ")", ".", "replace", "(", "tzinfo", "=", "None", ")", "except", ":", "last_reminder", "=", "None", "if", "not", "last_reminder", ":", "return", "True", "if", "cls", ".", "COMPARE_LAST_SEEN", ":", "# Still allowed to send if user has been active", "# since last notification.", "if", "user", ".", "last_seen", ">", "last_reminder", ":", "cls", ".", "delete", "(", "user", ".", "id", ")", "return", "True", "# redis doesn't guarantee that the key isn't expired exactly at expiration time", "# check if it *should* have been expired", "if", "last_reminder", "+", "timedelta", "(", "seconds", "=", "cls", ".", "EXPIRY", ")", "<", "datetime", ".", "utcnow", "(", ")", ":", "cls", ".", "delete", "(", "user", ".", "id", ")", "return", "True", "if", "cls", ".", "WRITE_LOG", ":", "current_app", ".", "logger", ".", "info", "(", "\"Not sending %s notification to user %s because it was sent recently\"", "%", "(", "cls", ".", "NAME", ",", "user", ".", "id", ")", ")", "return", "False" ]
https://github.com/Staffjoy/suite/blob/14ed49b21cf8296d2e0696a7f50f91f8e4b65072/app/limiters.py#L26-L58
JacquesLucke/animation_nodes
b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1
animation_nodes/base_types/nodes/base_node.py
python
AnimationNode.getBakeCode
(self)
return []
[]
def getBakeCode(self): return []
[ "def", "getBakeCode", "(", "self", ")", ":", "return", "[", "]" ]
https://github.com/JacquesLucke/animation_nodes/blob/b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1/animation_nodes/base_types/nodes/base_node.py#L135-L136
ansible/ansible-modules-extras
f216ba8e0616bc8ad8794c22d4b48e1ab18886cf
packaging/os/zypper.py
python
package_present
(m, name, want_latest)
return result, retvals
install and update (if want_latest) the packages in name_install, while removing the packages in name_remove
install and update (if want_latest) the packages in name_install, while removing the packages in name_remove
[ "install", "and", "update", "(", "if", "want_latest", ")", "the", "packages", "in", "name_install", "while", "removing", "the", "packages", "in", "name_remove" ]
def package_present(m, name, want_latest): "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove" retvals = {'rc': 0, 'stdout': '', 'stderr': ''} name_install, name_remove, urls = get_want_state(m, name) # if a version string is given, pass it to zypper install_version = [p+name_install[p] for p in name_install if name_install[p]] remove_version = [p+name_remove[p] for p in name_remove if name_remove[p]] # add oldpackage flag when a version is given to allow downgrades if install_version or remove_version: m.params['oldpackage'] = True if not want_latest: # for state=present: filter out already installed packages install_and_remove = name_install.copy() install_and_remove.update(name_remove) prerun_state = get_installed_state(m, install_and_remove) # generate lists of packages to install or remove name_install = [p for p in name_install if p not in prerun_state] name_remove = [p for p in name_remove if p in prerun_state] if not any((name_install, name_remove, urls, install_version, remove_version)): # nothing to install/remove and nothing to update return None, retvals # zypper install also updates packages cmd = get_cmd(m, 'install') cmd.append('--') cmd.extend(urls) # pass packages with version information cmd.extend(install_version) cmd.extend(['-%s' % p for p in remove_version]) # allow for + or - prefixes in install/remove lists # do this in one zypper run to allow for dependency-resolution # for example "-exim postfix" runs without removing packages depending on mailserver cmd.extend(name_install) cmd.extend(['-%s' % p for p in name_remove]) retvals['cmd'] = cmd result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) return result, retvals
[ "def", "package_present", "(", "m", ",", "name", ",", "want_latest", ")", ":", "retvals", "=", "{", "'rc'", ":", "0", ",", "'stdout'", ":", "''", ",", "'stderr'", ":", "''", "}", "name_install", ",", "name_remove", ",", "urls", "=", "get_want_state", "(", "m", ",", "name", ")", "# if a version string is given, pass it to zypper", "install_version", "=", "[", "p", "+", "name_install", "[", "p", "]", "for", "p", "in", "name_install", "if", "name_install", "[", "p", "]", "]", "remove_version", "=", "[", "p", "+", "name_remove", "[", "p", "]", "for", "p", "in", "name_remove", "if", "name_remove", "[", "p", "]", "]", "# add oldpackage flag when a version is given to allow downgrades", "if", "install_version", "or", "remove_version", ":", "m", ".", "params", "[", "'oldpackage'", "]", "=", "True", "if", "not", "want_latest", ":", "# for state=present: filter out already installed packages", "install_and_remove", "=", "name_install", ".", "copy", "(", ")", "install_and_remove", ".", "update", "(", "name_remove", ")", "prerun_state", "=", "get_installed_state", "(", "m", ",", "install_and_remove", ")", "# generate lists of packages to install or remove", "name_install", "=", "[", "p", "for", "p", "in", "name_install", "if", "p", "not", "in", "prerun_state", "]", "name_remove", "=", "[", "p", "for", "p", "in", "name_remove", "if", "p", "in", "prerun_state", "]", "if", "not", "any", "(", "(", "name_install", ",", "name_remove", ",", "urls", ",", "install_version", ",", "remove_version", ")", ")", ":", "# nothing to install/remove and nothing to update", "return", "None", ",", "retvals", "# zypper install also updates packages", "cmd", "=", "get_cmd", "(", "m", ",", "'install'", ")", "cmd", ".", "append", "(", "'--'", ")", "cmd", ".", "extend", "(", "urls", ")", "# pass packages with version information", "cmd", ".", "extend", "(", "install_version", ")", "cmd", ".", "extend", "(", "[", "'-%s'", "%", "p", "for", "p", "in", "remove_version", "]", ")", "# allow for + or - prefixes in install/remove lists", "# do this in one zypper run to allow for dependency-resolution", "# for example \"-exim postfix\" runs without removing packages depending on mailserver", "cmd", ".", "extend", "(", "name_install", ")", "cmd", ".", "extend", "(", "[", "'-%s'", "%", "p", "for", "p", "in", "name_remove", "]", ")", "retvals", "[", "'cmd'", "]", "=", "cmd", "result", ",", "retvals", "[", "'rc'", "]", ",", "retvals", "[", "'stdout'", "]", ",", "retvals", "[", "'stderr'", "]", "=", "parse_zypper_xml", "(", "m", ",", "cmd", ")", "return", "result", ",", "retvals" ]
https://github.com/ansible/ansible-modules-extras/blob/f216ba8e0616bc8ad8794c22d4b48e1ab18886cf/packaging/os/zypper.py#L321-L364
avocado-framework/avocado
1f9b3192e8ba47d029c33fe21266bd113d17811f
avocado/utils/archive.py
python
ArchiveFile.close
(self)
Close archive.
Close archive.
[ "Close", "archive", "." ]
def close(self): """ Close archive. """ self._engine.close()
[ "def", "close", "(", "self", ")", ":", "self", ".", "_engine", ".", "close", "(", ")" ]
https://github.com/avocado-framework/avocado/blob/1f9b3192e8ba47d029c33fe21266bd113d17811f/avocado/utils/archive.py#L235-L239
bridgecrewio/checkov
f4f8caead6aa2f1824ae1cc88cd1816b12211629
checkov/terraform/module_loading/content.py
python
ModuleContent.cleanup
(self)
Clean up any temporary resources, if applicable.
Clean up any temporary resources, if applicable.
[ "Clean", "up", "any", "temporary", "resources", "if", "applicable", "." ]
def cleanup(self) -> None: """ Clean up any temporary resources, if applicable. """ if isinstance(self.dir, tempfile.TemporaryDirectory): self.dir.cleanup()
[ "def", "cleanup", "(", "self", ")", "->", "None", ":", "if", "isinstance", "(", "self", ".", "dir", ",", "tempfile", ".", "TemporaryDirectory", ")", ":", "self", ".", "dir", ".", "cleanup", "(", ")" ]
https://github.com/bridgecrewio/checkov/blob/f4f8caead6aa2f1824ae1cc88cd1816b12211629/checkov/terraform/module_loading/content.py#L32-L37
Delta-ML/delta
31dfebc8f20b7cb282b62f291ff25a87e403cc86
delta/data/feat/speech_feature.py
python
load_wav
(wavpath, sr=8000)
return sample_rate, audio
audio: np.float32, shape [None], sample in [-1, 1], using librosa.load np.int16, shape [None], sample in [-32768, 32767], using scipy.io.wavfile np.float32, shape[None, audio_channel], sample int [-1, 1], using tf.DecodeWav return sr: sample rate audio: [-1, 1], same to tf.DecodeWav
audio: np.float32, shape [None], sample in [-1, 1], using librosa.load np.int16, shape [None], sample in [-32768, 32767], using scipy.io.wavfile np.float32, shape[None, audio_channel], sample int [-1, 1], using tf.DecodeWav
[ "audio", ":", "np", ".", "float32", "shape", "[", "None", "]", "sample", "in", "[", "-", "1", "1", "]", "using", "librosa", ".", "load", "np", ".", "int16", "shape", "[", "None", "]", "sample", "in", "[", "-", "32768", "32767", "]", "using", "scipy", ".", "io", ".", "wavfile", "np", ".", "float32", "shape", "[", "None", "audio_channel", "]", "sample", "int", "[", "-", "1", "1", "]", "using", "tf", ".", "DecodeWav" ]
def load_wav(wavpath, sr=8000): ''' audio: np.float32, shape [None], sample in [-1, 1], using librosa.load np.int16, shape [None], sample in [-32768, 32767], using scipy.io.wavfile np.float32, shape[None, audio_channel], sample int [-1, 1], using tf.DecodeWav return sr: sample rate audio: [-1, 1], same to tf.DecodeWav ''' #from scipy.io import wavfile #sample_rate, audio = wavfile.read(wavpath) #samples, sample_rate = librosa.load(wavpath, sr=sr) feat_name = 'load_wav' graph = None # get session if feat_name not in _global_sess: graph = tf.Graph() with graph.as_default(): params = speech_ops.speech_params(sr=sr, audio_desired_samples=-1) t_wavpath = tf.placeholder(dtype=tf.string, name="wavpath") t_audio, t_sample_rate = speech_ops.read_wav(t_wavpath, params) t_audio = tf.identity(t_audio, name="audio") t_sample_rate = tf.identity(t_sample_rate, name="sample_rate") sess = _get_session(feat_name, graph) audio, sample_rate = sess.run([ _get_out_tensor_name('audio', 0), _get_out_tensor_name('sample_rate', 0) ], feed_dict={"wavpath:0": wavpath}) audio = audio[:, 0] assert sample_rate == sr, 'sampling rate must be {}Hz, get {}Hz'.format( sr, sample_rate) return sample_rate, audio
[ "def", "load_wav", "(", "wavpath", ",", "sr", "=", "8000", ")", ":", "#from scipy.io import wavfile", "#sample_rate, audio = wavfile.read(wavpath)", "#samples, sample_rate = librosa.load(wavpath, sr=sr)", "feat_name", "=", "'load_wav'", "graph", "=", "None", "# get session", "if", "feat_name", "not", "in", "_global_sess", ":", "graph", "=", "tf", ".", "Graph", "(", ")", "with", "graph", ".", "as_default", "(", ")", ":", "params", "=", "speech_ops", ".", "speech_params", "(", "sr", "=", "sr", ",", "audio_desired_samples", "=", "-", "1", ")", "t_wavpath", "=", "tf", ".", "placeholder", "(", "dtype", "=", "tf", ".", "string", ",", "name", "=", "\"wavpath\"", ")", "t_audio", ",", "t_sample_rate", "=", "speech_ops", ".", "read_wav", "(", "t_wavpath", ",", "params", ")", "t_audio", "=", "tf", ".", "identity", "(", "t_audio", ",", "name", "=", "\"audio\"", ")", "t_sample_rate", "=", "tf", ".", "identity", "(", "t_sample_rate", ",", "name", "=", "\"sample_rate\"", ")", "sess", "=", "_get_session", "(", "feat_name", ",", "graph", ")", "audio", ",", "sample_rate", "=", "sess", ".", "run", "(", "[", "_get_out_tensor_name", "(", "'audio'", ",", "0", ")", ",", "_get_out_tensor_name", "(", "'sample_rate'", ",", "0", ")", "]", ",", "feed_dict", "=", "{", "\"wavpath:0\"", ":", "wavpath", "}", ")", "audio", "=", "audio", "[", ":", ",", "0", "]", "assert", "sample_rate", "==", "sr", ",", "'sampling rate must be {}Hz, get {}Hz'", ".", "format", "(", "sr", ",", "sample_rate", ")", "return", "sample_rate", ",", "audio" ]
https://github.com/Delta-ML/delta/blob/31dfebc8f20b7cb282b62f291ff25a87e403cc86/delta/data/feat/speech_feature.py#L154-L192
iopsgroup/imoocc
de810eb6d4c1697b7139305925a5b0ba21225f3f
scanhosts/modules/paramiko1_9/sftp_handle.py
python
SFTPHandle.stat
(self)
return SFTP_OP_UNSUPPORTED
Return an L{SFTPAttributes} object referring to this open file, or an error code. This is equivalent to L{SFTPServerInterface.stat}, except it's called on an open file instead of a path. @return: an attributes object for the given file, or an SFTP error code (like L{SFTP_PERMISSION_DENIED}). @rtype: L{SFTPAttributes} I{or error code}
Return an L{SFTPAttributes} object referring to this open file, or an error code. This is equivalent to L{SFTPServerInterface.stat}, except it's called on an open file instead of a path.
[ "Return", "an", "L", "{", "SFTPAttributes", "}", "object", "referring", "to", "this", "open", "file", "or", "an", "error", "code", ".", "This", "is", "equivalent", "to", "L", "{", "SFTPServerInterface", ".", "stat", "}", "except", "it", "s", "called", "on", "an", "open", "file", "instead", "of", "a", "path", "." ]
def stat(self): """ Return an L{SFTPAttributes} object referring to this open file, or an error code. This is equivalent to L{SFTPServerInterface.stat}, except it's called on an open file instead of a path. @return: an attributes object for the given file, or an SFTP error code (like L{SFTP_PERMISSION_DENIED}). @rtype: L{SFTPAttributes} I{or error code} """ return SFTP_OP_UNSUPPORTED
[ "def", "stat", "(", "self", ")", ":", "return", "SFTP_OP_UNSUPPORTED" ]
https://github.com/iopsgroup/imoocc/blob/de810eb6d4c1697b7139305925a5b0ba21225f3f/scanhosts/modules/paramiko1_9/sftp_handle.py#L149-L159
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/splitter.py
python
Splitter.find_split_single_histogram_guest
(self, histogram, valid_features, sitename, use_missing, zero_as_missing, reshape_tuple=None)
return splitinfo
[]
def find_split_single_histogram_guest(self, histogram, valid_features, sitename, use_missing, zero_as_missing, reshape_tuple=None): if reshape_tuple: histogram = histogram.reshape(reshape_tuple) # default values best_fid = None best_gain = self.min_impurity_split - consts.FLOAT_ZERO best_bid = None best_sum_grad_l = None best_sum_hess_l = None missing_bin = 0 if use_missing: missing_bin = 1 # in default, missing value going to right missing_dir = 1 for fid in range(len(histogram)): if valid_features[fid] is False: continue bin_num = len(histogram[fid]) if bin_num == 0 + missing_bin: continue # last bin contains sum values (cumsum from left) sum_grad = histogram[fid][bin_num - 1][0] sum_hess = histogram[fid][bin_num - 1][1] node_cnt = histogram[fid][bin_num - 1][2] if node_cnt < self.min_sample_split: break # last bin will not participate in split find, so bin_num - 1 for bid in range(bin_num - missing_bin - 1): # left gh sum_grad_l = histogram[fid][bid][0] sum_hess_l = histogram[fid][bid][1] node_cnt_l = histogram[fid][bid][2] # right gh sum_grad_r = sum_grad - sum_grad_l sum_hess_r = sum_hess - sum_hess_l node_cnt_r = node_cnt - node_cnt_l if self._check_sample_num(node_cnt_l, node_cnt_r) and self._check_min_child_weight(sum_hess_l, sum_hess_r): gain = self.criterion.split_gain([sum_grad, sum_hess], [sum_grad_l, sum_hess_l], [sum_grad_r, sum_hess_r]) if gain > self.min_impurity_split and gain > best_gain + consts.FLOAT_ZERO: best_gain = gain best_fid = fid best_bid = bid best_sum_grad_l = sum_grad_l best_sum_hess_l = sum_hess_l missing_dir = 1 """ missing value handle: dispatch to left child""" if use_missing: # add sum of samples with missing features to left sum_grad_l += histogram[fid][-1][0] - histogram[fid][-2][0] sum_hess_l += histogram[fid][-1][1] - histogram[fid][-2][1] node_cnt_l += histogram[fid][-1][2] - histogram[fid][-2][2] sum_grad_r -= histogram[fid][-1][0] - histogram[fid][-2][0] sum_hess_r -= histogram[fid][-1][1] - histogram[fid][-2][1] node_cnt_r -= histogram[fid][-1][2] - histogram[fid][-2][2] # if have a better gain value, missing dir is left if self._check_sample_num(node_cnt_l, node_cnt_r) and self._check_min_child_weight(sum_hess_l, sum_hess_r): gain = self.criterion.split_gain([sum_grad, sum_hess], [sum_grad_l, sum_hess_l], [sum_grad_r, sum_hess_r]) if gain > self.min_impurity_split and gain > best_gain + consts.FLOAT_ZERO: best_gain = gain best_fid = fid best_bid = bid best_sum_grad_l = sum_grad_l best_sum_hess_l = sum_hess_l missing_dir = -1 splitinfo = SplitInfo(sitename=sitename, best_fid=best_fid, best_bid=best_bid, gain=best_gain, sum_grad=best_sum_grad_l, sum_hess=best_sum_hess_l, missing_dir=missing_dir) return splitinfo
[ "def", "find_split_single_histogram_guest", "(", "self", ",", "histogram", ",", "valid_features", ",", "sitename", ",", "use_missing", ",", "zero_as_missing", ",", "reshape_tuple", "=", "None", ")", ":", "if", "reshape_tuple", ":", "histogram", "=", "histogram", ".", "reshape", "(", "reshape_tuple", ")", "# default values", "best_fid", "=", "None", "best_gain", "=", "self", ".", "min_impurity_split", "-", "consts", ".", "FLOAT_ZERO", "best_bid", "=", "None", "best_sum_grad_l", "=", "None", "best_sum_hess_l", "=", "None", "missing_bin", "=", "0", "if", "use_missing", ":", "missing_bin", "=", "1", "# in default, missing value going to right", "missing_dir", "=", "1", "for", "fid", "in", "range", "(", "len", "(", "histogram", ")", ")", ":", "if", "valid_features", "[", "fid", "]", "is", "False", ":", "continue", "bin_num", "=", "len", "(", "histogram", "[", "fid", "]", ")", "if", "bin_num", "==", "0", "+", "missing_bin", ":", "continue", "# last bin contains sum values (cumsum from left)", "sum_grad", "=", "histogram", "[", "fid", "]", "[", "bin_num", "-", "1", "]", "[", "0", "]", "sum_hess", "=", "histogram", "[", "fid", "]", "[", "bin_num", "-", "1", "]", "[", "1", "]", "node_cnt", "=", "histogram", "[", "fid", "]", "[", "bin_num", "-", "1", "]", "[", "2", "]", "if", "node_cnt", "<", "self", ".", "min_sample_split", ":", "break", "# last bin will not participate in split find, so bin_num - 1", "for", "bid", "in", "range", "(", "bin_num", "-", "missing_bin", "-", "1", ")", ":", "# left gh", "sum_grad_l", "=", "histogram", "[", "fid", "]", "[", "bid", "]", "[", "0", "]", "sum_hess_l", "=", "histogram", "[", "fid", "]", "[", "bid", "]", "[", "1", "]", "node_cnt_l", "=", "histogram", "[", "fid", "]", "[", "bid", "]", "[", "2", "]", "# right gh", "sum_grad_r", "=", "sum_grad", "-", "sum_grad_l", "sum_hess_r", "=", "sum_hess", "-", "sum_hess_l", "node_cnt_r", "=", "node_cnt", "-", "node_cnt_l", "if", "self", ".", "_check_sample_num", "(", "node_cnt_l", ",", "node_cnt_r", ")", "and", "self", ".", "_check_min_child_weight", "(", "sum_hess_l", ",", "sum_hess_r", ")", ":", "gain", "=", "self", ".", "criterion", ".", "split_gain", "(", "[", "sum_grad", ",", "sum_hess", "]", ",", "[", "sum_grad_l", ",", "sum_hess_l", "]", ",", "[", "sum_grad_r", ",", "sum_hess_r", "]", ")", "if", "gain", ">", "self", ".", "min_impurity_split", "and", "gain", ">", "best_gain", "+", "consts", ".", "FLOAT_ZERO", ":", "best_gain", "=", "gain", "best_fid", "=", "fid", "best_bid", "=", "bid", "best_sum_grad_l", "=", "sum_grad_l", "best_sum_hess_l", "=", "sum_hess_l", "missing_dir", "=", "1", "\"\"\" missing value handle: dispatch to left child\"\"\"", "if", "use_missing", ":", "# add sum of samples with missing features to left", "sum_grad_l", "+=", "histogram", "[", "fid", "]", "[", "-", "1", "]", "[", "0", "]", "-", "histogram", "[", "fid", "]", "[", "-", "2", "]", "[", "0", "]", "sum_hess_l", "+=", "histogram", "[", "fid", "]", "[", "-", "1", "]", "[", "1", "]", "-", "histogram", "[", "fid", "]", "[", "-", "2", "]", "[", "1", "]", "node_cnt_l", "+=", "histogram", "[", "fid", "]", "[", "-", "1", "]", "[", "2", "]", "-", "histogram", "[", "fid", "]", "[", "-", "2", "]", "[", "2", "]", "sum_grad_r", "-=", "histogram", "[", "fid", "]", "[", "-", "1", "]", "[", "0", "]", "-", "histogram", "[", "fid", "]", "[", "-", "2", "]", "[", "0", "]", "sum_hess_r", "-=", "histogram", "[", "fid", "]", "[", "-", "1", "]", "[", "1", "]", "-", "histogram", "[", "fid", "]", "[", "-", "2", "]", "[", "1", "]", "node_cnt_r", "-=", "histogram", "[", "fid", "]", "[", "-", "1", "]", "[", "2", "]", "-", "histogram", "[", "fid", "]", "[", "-", "2", "]", "[", "2", "]", "# if have a better gain value, missing dir is left", "if", "self", ".", "_check_sample_num", "(", "node_cnt_l", ",", "node_cnt_r", ")", "and", "self", ".", "_check_min_child_weight", "(", "sum_hess_l", ",", "sum_hess_r", ")", ":", "gain", "=", "self", ".", "criterion", ".", "split_gain", "(", "[", "sum_grad", ",", "sum_hess", "]", ",", "[", "sum_grad_l", ",", "sum_hess_l", "]", ",", "[", "sum_grad_r", ",", "sum_hess_r", "]", ")", "if", "gain", ">", "self", ".", "min_impurity_split", "and", "gain", ">", "best_gain", "+", "consts", ".", "FLOAT_ZERO", ":", "best_gain", "=", "gain", "best_fid", "=", "fid", "best_bid", "=", "bid", "best_sum_grad_l", "=", "sum_grad_l", "best_sum_hess_l", "=", "sum_hess_l", "missing_dir", "=", "-", "1", "splitinfo", "=", "SplitInfo", "(", "sitename", "=", "sitename", ",", "best_fid", "=", "best_fid", ",", "best_bid", "=", "best_bid", ",", "gain", "=", "best_gain", ",", "sum_grad", "=", "best_sum_grad_l", ",", "sum_hess", "=", "best_sum_hess_l", ",", "missing_dir", "=", "missing_dir", ")", "return", "splitinfo" ]
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/splitter.py#L99-L188
Nitrate/Nitrate
7eacef697a15dcbb7ae90c8a1dbf769cba6d1bb1
src/tcms/xmlrpc/serializer.py
python
XMLRPCSerializer.__init__
(self, queryset=None, model=None)
Initial the class
Initial the class
[ "Initial", "the", "class" ]
def __init__(self, queryset=None, model=None): """Initial the class""" if hasattr(queryset, "__iter__"): self.queryset = queryset return elif hasattr(model, "__dict__"): self.model = model return raise TypeError("QuerySet(list) or Models(dictionary) is required")
[ "def", "__init__", "(", "self", ",", "queryset", "=", "None", ",", "model", "=", "None", ")", ":", "if", "hasattr", "(", "queryset", ",", "\"__iter__\"", ")", ":", "self", ".", "queryset", "=", "queryset", "return", "elif", "hasattr", "(", "model", ",", "\"__dict__\"", ")", ":", "self", ".", "model", "=", "model", "return", "raise", "TypeError", "(", "\"QuerySet(list) or Models(dictionary) is required\"", ")" ]
https://github.com/Nitrate/Nitrate/blob/7eacef697a15dcbb7ae90c8a1dbf769cba6d1bb1/src/tcms/xmlrpc/serializer.py#L73-L82
zalando/spilo
72b447fac1fc9b9e6e2b519dc520a50a5e0fdd25
postgres-appliance/scripts/configure_spilo.py
python
write_clone_pgpass
(placeholders, overwrite)
[]
def write_clone_pgpass(placeholders, overwrite): pgpassfile = placeholders['CLONE_PGPASS'] # pgpass is host:port:database:user:password r = {'host': escape_pgpass_value(placeholders['CLONE_HOST']), 'port': placeholders['CLONE_PORT'], 'database': '*', 'user': escape_pgpass_value(placeholders['CLONE_USER']), 'password': escape_pgpass_value(placeholders['CLONE_PASSWORD'])} pgpass_string = "{host}:{port}:{database}:{user}:{password}".format(**r) write_file(pgpass_string, pgpassfile, overwrite) os.chmod(pgpassfile, 0o600) adjust_owner(pgpassfile, gid=-1)
[ "def", "write_clone_pgpass", "(", "placeholders", ",", "overwrite", ")", ":", "pgpassfile", "=", "placeholders", "[", "'CLONE_PGPASS'", "]", "# pgpass is host:port:database:user:password", "r", "=", "{", "'host'", ":", "escape_pgpass_value", "(", "placeholders", "[", "'CLONE_HOST'", "]", ")", ",", "'port'", ":", "placeholders", "[", "'CLONE_PORT'", "]", ",", "'database'", ":", "'*'", ",", "'user'", ":", "escape_pgpass_value", "(", "placeholders", "[", "'CLONE_USER'", "]", ")", ",", "'password'", ":", "escape_pgpass_value", "(", "placeholders", "[", "'CLONE_PASSWORD'", "]", ")", "}", "pgpass_string", "=", "\"{host}:{port}:{database}:{user}:{password}\"", ".", "format", "(", "*", "*", "r", ")", "write_file", "(", "pgpass_string", ",", "pgpassfile", ",", "overwrite", ")", "os", ".", "chmod", "(", "pgpassfile", ",", "0o600", ")", "adjust_owner", "(", "pgpassfile", ",", "gid", "=", "-", "1", ")" ]
https://github.com/zalando/spilo/blob/72b447fac1fc9b9e6e2b519dc520a50a5e0fdd25/postgres-appliance/scripts/configure_spilo.py#L866-L877
hoangminhle/hierarchical_IL_RL
f97263dbc646ea0f2bfbc602d130d68b01bda8ca
hybrid_imitation_reinforcement_Montezuma/replay_buffer.py
python
PrioritizedReplayBuffer.__init__
(self, size, alpha)
Create Prioritized Replay buffer. Parameters ---------- size: int Max number of transitions to store in the buffer. When the buffer overflows the old memories are dropped. alpha: float how much prioritization is used (0 - no prioritization, 1 - full prioritization) See Also -------- ReplayBuffer.__init__
Create Prioritized Replay buffer.
[ "Create", "Prioritized", "Replay", "buffer", "." ]
def __init__(self, size, alpha): """Create Prioritized Replay buffer. Parameters ---------- size: int Max number of transitions to store in the buffer. When the buffer overflows the old memories are dropped. alpha: float how much prioritization is used (0 - no prioritization, 1 - full prioritization) See Also -------- ReplayBuffer.__init__ """ super(PrioritizedReplayBuffer, self).__init__(size) assert alpha > 0 self._alpha = alpha it_capacity = 1 while it_capacity < size: it_capacity *= 2 self._it_sum = SumSegmentTree(it_capacity) self._it_min = MinSegmentTree(it_capacity) self._max_priority = 1.0
[ "def", "__init__", "(", "self", ",", "size", ",", "alpha", ")", ":", "super", "(", "PrioritizedReplayBuffer", ",", "self", ")", ".", "__init__", "(", "size", ")", "assert", "alpha", ">", "0", "self", ".", "_alpha", "=", "alpha", "it_capacity", "=", "1", "while", "it_capacity", "<", "size", ":", "it_capacity", "*=", "2", "self", ".", "_it_sum", "=", "SumSegmentTree", "(", "it_capacity", ")", "self", ".", "_it_min", "=", "MinSegmentTree", "(", "it_capacity", ")", "self", ".", "_max_priority", "=", "1.0" ]
https://github.com/hoangminhle/hierarchical_IL_RL/blob/f97263dbc646ea0f2bfbc602d130d68b01bda8ca/hybrid_imitation_reinforcement_Montezuma/replay_buffer.py#L78-L104
joosthoeks/jhTAlib
4931a34829d966ccc973fb29d767a359d6e94b44
jhtalib/overlap_studies/overlap_studies.py
python
WWMA
(df, n, price='Close')
return wwma_list
Welles Wilder Moving Average Returns: list of floats = jhta.WWMA(df, n, price='Close') Source: https://www.fmlabs.com/reference/default.htm?url=WellesMA.htm
Welles Wilder Moving Average Returns: list of floats = jhta.WWMA(df, n, price='Close') Source: https://www.fmlabs.com/reference/default.htm?url=WellesMA.htm
[ "Welles", "Wilder", "Moving", "Average", "Returns", ":", "list", "of", "floats", "=", "jhta", ".", "WWMA", "(", "df", "n", "price", "=", "Close", ")", "Source", ":", "https", ":", "//", "www", ".", "fmlabs", ".", "com", "/", "reference", "/", "default", ".", "htm?url", "=", "WellesMA", ".", "htm" ]
def WWMA(df, n, price='Close'): """ Welles Wilder Moving Average Returns: list of floats = jhta.WWMA(df, n, price='Close') Source: https://www.fmlabs.com/reference/default.htm?url=WellesMA.htm """ wwma_list = [] for i in range(len(df[price])): if i + 1 < n: wwma = float('NaN') wwma_list.append(wwma) wwma = df[price][i] else: wwma = (wwma * (n - 1) + df[price][i]) / n wwma_list.append(wwma) return wwma_list
[ "def", "WWMA", "(", "df", ",", "n", ",", "price", "=", "'Close'", ")", ":", "wwma_list", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "df", "[", "price", "]", ")", ")", ":", "if", "i", "+", "1", "<", "n", ":", "wwma", "=", "float", "(", "'NaN'", ")", "wwma_list", ".", "append", "(", "wwma", ")", "wwma", "=", "df", "[", "price", "]", "[", "i", "]", "else", ":", "wwma", "=", "(", "wwma", "*", "(", "n", "-", "1", ")", "+", "df", "[", "price", "]", "[", "i", "]", ")", "/", "n", "wwma_list", ".", "append", "(", "wwma", ")", "return", "wwma_list" ]
https://github.com/joosthoeks/jhTAlib/blob/4931a34829d966ccc973fb29d767a359d6e94b44/jhtalib/overlap_studies/overlap_studies.py#L329-L344
pm4py/pm4py-core
7807b09a088b02199cd0149d724d0e28793971bf
pm4py/algo/discovery/footprints/tree/variants/bottomup.py
python
get_footprints
(node, footprints_dictio)
Gets the footprints for a node (having the history of the child nodes) Parameters -------------- node Node of the tree footprints_dictio Dictionary of footprints of the process tree Returns -------------- footprints Footprints of the node (having the history of the child nodes)
Gets the footprints for a node (having the history of the child nodes)
[ "Gets", "the", "footprints", "for", "a", "node", "(", "having", "the", "history", "of", "the", "child", "nodes", ")" ]
def get_footprints(node, footprints_dictio): """ Gets the footprints for a node (having the history of the child nodes) Parameters -------------- node Node of the tree footprints_dictio Dictionary of footprints of the process tree Returns -------------- footprints Footprints of the node (having the history of the child nodes) """ if len(node.children) == 0: return get_footprints_leaf(node, footprints_dictio) elif node.operator == Operator.XOR: return get_footprints_xor(node, footprints_dictio) elif node.operator == Operator.PARALLEL or node.operator == Operator.OR: return get_footprints_parallel(node, footprints_dictio) elif node.operator == Operator.SEQUENCE: return get_footprints_sequence(node, footprints_dictio) elif node.operator == Operator.LOOP: return get_footprints_loop(node, footprints_dictio)
[ "def", "get_footprints", "(", "node", ",", "footprints_dictio", ")", ":", "if", "len", "(", "node", ".", "children", ")", "==", "0", ":", "return", "get_footprints_leaf", "(", "node", ",", "footprints_dictio", ")", "elif", "node", ".", "operator", "==", "Operator", ".", "XOR", ":", "return", "get_footprints_xor", "(", "node", ",", "footprints_dictio", ")", "elif", "node", ".", "operator", "==", "Operator", ".", "PARALLEL", "or", "node", ".", "operator", "==", "Operator", ".", "OR", ":", "return", "get_footprints_parallel", "(", "node", ",", "footprints_dictio", ")", "elif", "node", ".", "operator", "==", "Operator", ".", "SEQUENCE", ":", "return", "get_footprints_sequence", "(", "node", ",", "footprints_dictio", ")", "elif", "node", ".", "operator", "==", "Operator", ".", "LOOP", ":", "return", "get_footprints_loop", "(", "node", ",", "footprints_dictio", ")" ]
https://github.com/pm4py/pm4py-core/blob/7807b09a088b02199cd0149d724d0e28793971bf/pm4py/algo/discovery/footprints/tree/variants/bottomup.py#L342-L367
avalonstrel/GatedConvolution
407210b6ee91c536cda7ed9cc92ba73a0bf7024b
inpaint_ops.py
python
local_patch
(x, bbox)
return x
Crop local patch according to bbox. Args: x: input bbox: (top, left, height, width) Returns: tf.Tensor: local patch
Crop local patch according to bbox.
[ "Crop", "local", "patch", "according", "to", "bbox", "." ]
def local_patch(x, bbox): """Crop local patch according to bbox. Args: x: input bbox: (top, left, height, width) Returns: tf.Tensor: local patch """ x = tf.image.crop_to_bounding_box(x, bbox[0], bbox[1], bbox[2], bbox[3]) return x
[ "def", "local_patch", "(", "x", ",", "bbox", ")", ":", "x", "=", "tf", ".", "image", ".", "crop_to_bounding_box", "(", "x", ",", "bbox", "[", "0", "]", ",", "bbox", "[", "1", "]", ",", "bbox", "[", "2", "]", ",", "bbox", "[", "3", "]", ")", "return", "x" ]
https://github.com/avalonstrel/GatedConvolution/blob/407210b6ee91c536cda7ed9cc92ba73a0bf7024b/inpaint_ops.py#L360-L372
mchristopher/PokemonGo-DesktopMap
ec37575f2776ee7d64456e2a1f6b6b78830b4fe0
app/pylibs/win32/pyproj/__init__.py
python
transform
(p1, p2, x, y, z=None, radians=False)
x2, y2, z2 = transform(p1, p2, x1, y1, z1, radians=False) Transform points between two coordinate systems defined by the Proj instances p1 and p2. The points x1,y1,z1 in the coordinate system defined by p1 are transformed to x2,y2,z2 in the coordinate system defined by p2. z1 is optional, if it is not set it is assumed to be zero (and only x2 and y2 are returned). In addition to converting between cartographic and geographic projection coordinates, this function can take care of datum shifts (which cannot be done using the __call__ method of the Proj instances). It also allows for one of the coordinate systems to be geographic (proj = 'latlong'). If optional keyword 'radians' is True (default is False) and p1 is defined in geographic coordinate (pj.is_latlong() is True), x1,y1 is interpreted as radians instead of the default degrees. Similarly, if p2 is defined in geographic coordinates and radians=True, x2, y2 are returned in radians instead of degrees. if p1.is_latlong() and p2.is_latlong() both are False, the radians keyword has no effect. x,y and z can be numpy or regular python arrays, python lists/tuples or scalars. Arrays are fastest. For projections in geocentric coordinates, values of x and y are given in meters. z is always meters. Example usage: >>> # projection 1: UTM zone 15, grs80 ellipse, NAD83 datum >>> # (defined by epsg code 26915) >>> p1 = Proj(init='epsg:26915') >>> # projection 2: UTM zone 15, clrk66 ellipse, NAD27 datum >>> p2 = Proj(init='epsg:26715') >>> # find x,y of Jefferson City, MO. >>> x1, y1 = p1(-92.199881,38.56694) >>> # transform this point to projection 2 coordinates. >>> x2, y2 = transform(p1,p2,x1,y1) >>> '%9.3f %11.3f' % (x1,y1) '569704.566 4269024.671' >>> '%9.3f %11.3f' % (x2,y2) '569722.342 4268814.027' >>> '%8.3f %5.3f' % p2(x2,y2,inverse=True) ' -92.200 38.567' >>> # process 3 points at a time in a tuple >>> lats = (38.83,39.32,38.75) # Columbia, KC and StL Missouri >>> lons = (-92.22,-94.72,-90.37) >>> x1, y1 = p1(lons,lats) >>> x2, y2 = transform(p1,p2,x1,y1) >>> xy = x1+y1 >>> '%9.3f %9.3f %9.3f %11.3f %11.3f %11.3f' % xy '567703.344 351730.944 728553.093 4298200.739 4353698.725 4292319.005' >>> xy = x2+y2 >>> '%9.3f %9.3f %9.3f %11.3f %11.3f %11.3f' % xy '567721.149 351747.558 728569.133 4297989.112 4353489.644 4292106.305' >>> lons, lats = p2(x2,y2,inverse=True) >>> xy = lons+lats >>> '%8.3f %8.3f %8.3f %5.3f %5.3f %5.3f' % xy ' -92.220 -94.720 -90.370 38.830 39.320 38.750' >>> # test datum shifting, installation of extra datum grid files. >>> p1 = Proj(proj='latlong',datum='WGS84') >>> x1 = -111.5; y1 = 45.25919444444 >>> p2 = Proj(proj="utm",zone=10,datum='NAD27') >>> x2, y2 = transform(p1, p2, x1, y1) >>> "%s %s" % (str(x2)[:9],str(y2)[:9]) '1402285.9 5076292.4'
x2, y2, z2 = transform(p1, p2, x1, y1, z1, radians=False)
[ "x2", "y2", "z2", "=", "transform", "(", "p1", "p2", "x1", "y1", "z1", "radians", "=", "False", ")" ]
def transform(p1, p2, x, y, z=None, radians=False): """ x2, y2, z2 = transform(p1, p2, x1, y1, z1, radians=False) Transform points between two coordinate systems defined by the Proj instances p1 and p2. The points x1,y1,z1 in the coordinate system defined by p1 are transformed to x2,y2,z2 in the coordinate system defined by p2. z1 is optional, if it is not set it is assumed to be zero (and only x2 and y2 are returned). In addition to converting between cartographic and geographic projection coordinates, this function can take care of datum shifts (which cannot be done using the __call__ method of the Proj instances). It also allows for one of the coordinate systems to be geographic (proj = 'latlong'). If optional keyword 'radians' is True (default is False) and p1 is defined in geographic coordinate (pj.is_latlong() is True), x1,y1 is interpreted as radians instead of the default degrees. Similarly, if p2 is defined in geographic coordinates and radians=True, x2, y2 are returned in radians instead of degrees. if p1.is_latlong() and p2.is_latlong() both are False, the radians keyword has no effect. x,y and z can be numpy or regular python arrays, python lists/tuples or scalars. Arrays are fastest. For projections in geocentric coordinates, values of x and y are given in meters. z is always meters. Example usage: >>> # projection 1: UTM zone 15, grs80 ellipse, NAD83 datum >>> # (defined by epsg code 26915) >>> p1 = Proj(init='epsg:26915') >>> # projection 2: UTM zone 15, clrk66 ellipse, NAD27 datum >>> p2 = Proj(init='epsg:26715') >>> # find x,y of Jefferson City, MO. >>> x1, y1 = p1(-92.199881,38.56694) >>> # transform this point to projection 2 coordinates. >>> x2, y2 = transform(p1,p2,x1,y1) >>> '%9.3f %11.3f' % (x1,y1) '569704.566 4269024.671' >>> '%9.3f %11.3f' % (x2,y2) '569722.342 4268814.027' >>> '%8.3f %5.3f' % p2(x2,y2,inverse=True) ' -92.200 38.567' >>> # process 3 points at a time in a tuple >>> lats = (38.83,39.32,38.75) # Columbia, KC and StL Missouri >>> lons = (-92.22,-94.72,-90.37) >>> x1, y1 = p1(lons,lats) >>> x2, y2 = transform(p1,p2,x1,y1) >>> xy = x1+y1 >>> '%9.3f %9.3f %9.3f %11.3f %11.3f %11.3f' % xy '567703.344 351730.944 728553.093 4298200.739 4353698.725 4292319.005' >>> xy = x2+y2 >>> '%9.3f %9.3f %9.3f %11.3f %11.3f %11.3f' % xy '567721.149 351747.558 728569.133 4297989.112 4353489.644 4292106.305' >>> lons, lats = p2(x2,y2,inverse=True) >>> xy = lons+lats >>> '%8.3f %8.3f %8.3f %5.3f %5.3f %5.3f' % xy ' -92.220 -94.720 -90.370 38.830 39.320 38.750' >>> # test datum shifting, installation of extra datum grid files. >>> p1 = Proj(proj='latlong',datum='WGS84') >>> x1 = -111.5; y1 = 45.25919444444 >>> p2 = Proj(proj="utm",zone=10,datum='NAD27') >>> x2, y2 = transform(p1, p2, x1, y1) >>> "%s %s" % (str(x2)[:9],str(y2)[:9]) '1402285.9 5076292.4' """ # check that p1 and p2 are from the Proj class if not isinstance(p1, Proj): raise TypeError("p1 must be a Proj class") if not isinstance(p2, Proj): raise TypeError("p2 must be a Proj class") # process inputs, making copies that support buffer API. inx, xisfloat, xislist, xistuple = _copytobuffer(x) iny, yisfloat, yislist, yistuple = _copytobuffer(y) if z is not None: inz, zisfloat, zislist, zistuple = _copytobuffer(z) else: inz = None # call pj_transform. inx,iny,inz buffers modified in place. _proj._transform(p1,p2,inx,iny,inz,radians) # if inputs were lists, tuples or floats, convert back. outx = _convertback(xisfloat,xislist,xistuple,inx) outy = _convertback(yisfloat,yislist,xistuple,iny) if inz is not None: outz = _convertback(zisfloat,zislist,zistuple,inz) return outx, outy, outz else: return outx, outy
[ "def", "transform", "(", "p1", ",", "p2", ",", "x", ",", "y", ",", "z", "=", "None", ",", "radians", "=", "False", ")", ":", "# check that p1 and p2 are from the Proj class", "if", "not", "isinstance", "(", "p1", ",", "Proj", ")", ":", "raise", "TypeError", "(", "\"p1 must be a Proj class\"", ")", "if", "not", "isinstance", "(", "p2", ",", "Proj", ")", ":", "raise", "TypeError", "(", "\"p2 must be a Proj class\"", ")", "# process inputs, making copies that support buffer API.", "inx", ",", "xisfloat", ",", "xislist", ",", "xistuple", "=", "_copytobuffer", "(", "x", ")", "iny", ",", "yisfloat", ",", "yislist", ",", "yistuple", "=", "_copytobuffer", "(", "y", ")", "if", "z", "is", "not", "None", ":", "inz", ",", "zisfloat", ",", "zislist", ",", "zistuple", "=", "_copytobuffer", "(", "z", ")", "else", ":", "inz", "=", "None", "# call pj_transform. inx,iny,inz buffers modified in place.", "_proj", ".", "_transform", "(", "p1", ",", "p2", ",", "inx", ",", "iny", ",", "inz", ",", "radians", ")", "# if inputs were lists, tuples or floats, convert back.", "outx", "=", "_convertback", "(", "xisfloat", ",", "xislist", ",", "xistuple", ",", "inx", ")", "outy", "=", "_convertback", "(", "yisfloat", ",", "yislist", ",", "xistuple", ",", "iny", ")", "if", "inz", "is", "not", "None", ":", "outz", "=", "_convertback", "(", "zisfloat", ",", "zislist", ",", "zistuple", ",", "inz", ")", "return", "outx", ",", "outy", ",", "outz", "else", ":", "return", "outx", ",", "outy" ]
https://github.com/mchristopher/PokemonGo-DesktopMap/blob/ec37575f2776ee7d64456e2a1f6b6b78830b4fe0/app/pylibs/win32/pyproj/__init__.py#L418-L512
jobovy/galpy
8e6a230bbe24ce16938db10053f92eb17fe4bb52
galpy/potential/planarPotential.py
python
planarPotential.Rforce
(self,R,phi=0.,t=0.)
return self._Rforce_nodecorator(R,phi=phi,t=t)
NAME: Rforce PURPOSE: evaluate the radial force INPUT: R - Cylindrical radius (can be Quantity) phi= azimuth (optional; can be Quantity) t= time (optional; can be Quantity) OUTPUT: F_R(R,(\phi,t))) HISTORY: 2010-07-13 - Written - Bovy (NYU)
NAME:
[ "NAME", ":" ]
def Rforce(self,R,phi=0.,t=0.): """ NAME: Rforce PURPOSE: evaluate the radial force INPUT: R - Cylindrical radius (can be Quantity) phi= azimuth (optional; can be Quantity) t= time (optional; can be Quantity) OUTPUT: F_R(R,(\phi,t))) HISTORY: 2010-07-13 - Written - Bovy (NYU) """ return self._Rforce_nodecorator(R,phi=phi,t=t)
[ "def", "Rforce", "(", "self", ",", "R", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "return", "self", ".", "_Rforce_nodecorator", "(", "R", ",", "phi", "=", "phi", ",", "t", "=", "t", ")" ]
https://github.com/jobovy/galpy/blob/8e6a230bbe24ce16938db10053f92eb17fe4bb52/galpy/potential/planarPotential.py#L235-L262
openstack/tacker
a60993fc3b2d4fc0e93ab13a874fe3c314fe48de
tacker/wsgi.py
python
DefaultMethodController.options
(self, request, **kwargs)
Return a response that includes the 'Allow' header. Return a response that includes the 'Allow' header listing the methods that are implemented. A 204 status code is used for this response.
Return a response that includes the 'Allow' header.
[ "Return", "a", "response", "that", "includes", "the", "Allow", "header", "." ]
def options(self, request, **kwargs): """Return a response that includes the 'Allow' header. Return a response that includes the 'Allow' header listing the methods that are implemented. A 204 status code is used for this response. """ headers = [('Allow', kwargs.get('allowed_methods'))] raise webob.exc.HTTPNoContent(headers=headers)
[ "def", "options", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "[", "(", "'Allow'", ",", "kwargs", ".", "get", "(", "'allowed_methods'", ")", ")", "]", "raise", "webob", ".", "exc", ".", "HTTPNoContent", "(", "headers", "=", "headers", ")" ]
https://github.com/openstack/tacker/blob/a60993fc3b2d4fc0e93ab13a874fe3c314fe48de/tacker/wsgi.py#L718-L725
sharppy/SHARPpy
19175269ab11fe06c917b5d10376862a4716e1db
sharppy/sharptab/params.py
python
convective_temp
(prof, **kwargs)
return tmpc
Computes the convective temperature, assuming no change in the moisture profile. Parcels are iteratively lifted until only mincinh is left as a cap. The first guess is the observed surface temperature. Parameters ---------- prof : profile object Profile Object mincinh : parcel object (optional; default -1) Amount of CINH left at CI pres : number (optional) Pressure of parcel to lift (hPa) tmpc : number (optional) Temperature of parcel to lift (C) dwpc : number (optional) Dew Point of parcel to lift (C) Returns ------- Convective Temperature (C) : number
Computes the convective temperature, assuming no change in the moisture profile. Parcels are iteratively lifted until only mincinh is left as a cap. The first guess is the observed surface temperature. Parameters ---------- prof : profile object Profile Object mincinh : parcel object (optional; default -1) Amount of CINH left at CI pres : number (optional) Pressure of parcel to lift (hPa) tmpc : number (optional) Temperature of parcel to lift (C) dwpc : number (optional) Dew Point of parcel to lift (C) Returns ------- Convective Temperature (C) : number
[ "Computes", "the", "convective", "temperature", "assuming", "no", "change", "in", "the", "moisture", "profile", ".", "Parcels", "are", "iteratively", "lifted", "until", "only", "mincinh", "is", "left", "as", "a", "cap", ".", "The", "first", "guess", "is", "the", "observed", "surface", "temperature", ".", "Parameters", "----------", "prof", ":", "profile", "object", "Profile", "Object", "mincinh", ":", "parcel", "object", "(", "optional", ";", "default", "-", "1", ")", "Amount", "of", "CINH", "left", "at", "CI", "pres", ":", "number", "(", "optional", ")", "Pressure", "of", "parcel", "to", "lift", "(", "hPa", ")", "tmpc", ":", "number", "(", "optional", ")", "Temperature", "of", "parcel", "to", "lift", "(", "C", ")", "dwpc", ":", "number", "(", "optional", ")", "Dew", "Point", "of", "parcel", "to", "lift", "(", "C", ")", "Returns", "-------", "Convective", "Temperature", "(", "C", ")", ":", "number" ]
def convective_temp(prof, **kwargs): ''' Computes the convective temperature, assuming no change in the moisture profile. Parcels are iteratively lifted until only mincinh is left as a cap. The first guess is the observed surface temperature. Parameters ---------- prof : profile object Profile Object mincinh : parcel object (optional; default -1) Amount of CINH left at CI pres : number (optional) Pressure of parcel to lift (hPa) tmpc : number (optional) Temperature of parcel to lift (C) dwpc : number (optional) Dew Point of parcel to lift (C) Returns ------- Convective Temperature (C) : number ''' mincinh = kwargs.get('mincinh', 0.) mmr = mean_mixratio(prof) pres = kwargs.get('pres', prof.pres[prof.sfc]) tmpc = kwargs.get('tmpc', prof.tmpc[prof.sfc]) dwpc = kwargs.get('dwpc', thermo.temp_at_mixrat(mmr, pres)) # Do a quick search to fine whether to continue. If you need to heat # up more than 25C, don't compute. pcl = cape(prof, flag=5, pres=pres, tmpc=tmpc+25., dwpc=dwpc, trunc=True) if pcl.bplus == 0. or not utils.QC(pcl.bminus) or pcl.bminus < mincinh: return ma.masked excess = dwpc - tmpc if excess > 0: tmpc = tmpc + excess + 4. pcl = cape(prof, flag=5, pres=pres, tmpc=tmpc, dwpc=dwpc, trunc=True) if pcl.bplus == 0. or not utils.QC(pcl.bminus): pcl.bminus = ma.masked while not utils.QC(pcl.bminus) or pcl.bminus < mincinh: if pcl.bminus < -100: tmpc += 2. else: tmpc += 0.5 pcl = cape(prof, flag=5, pres=pres, tmpc=tmpc, dwpc=dwpc, trunc=True) if pcl.bplus == 0.: pcl.bminus = ma.masked return tmpc
[ "def", "convective_temp", "(", "prof", ",", "*", "*", "kwargs", ")", ":", "mincinh", "=", "kwargs", ".", "get", "(", "'mincinh'", ",", "0.", ")", "mmr", "=", "mean_mixratio", "(", "prof", ")", "pres", "=", "kwargs", ".", "get", "(", "'pres'", ",", "prof", ".", "pres", "[", "prof", ".", "sfc", "]", ")", "tmpc", "=", "kwargs", ".", "get", "(", "'tmpc'", ",", "prof", ".", "tmpc", "[", "prof", ".", "sfc", "]", ")", "dwpc", "=", "kwargs", ".", "get", "(", "'dwpc'", ",", "thermo", ".", "temp_at_mixrat", "(", "mmr", ",", "pres", ")", ")", "# Do a quick search to fine whether to continue. If you need to heat", "# up more than 25C, don't compute.", "pcl", "=", "cape", "(", "prof", ",", "flag", "=", "5", ",", "pres", "=", "pres", ",", "tmpc", "=", "tmpc", "+", "25.", ",", "dwpc", "=", "dwpc", ",", "trunc", "=", "True", ")", "if", "pcl", ".", "bplus", "==", "0.", "or", "not", "utils", ".", "QC", "(", "pcl", ".", "bminus", ")", "or", "pcl", ".", "bminus", "<", "mincinh", ":", "return", "ma", ".", "masked", "excess", "=", "dwpc", "-", "tmpc", "if", "excess", ">", "0", ":", "tmpc", "=", "tmpc", "+", "excess", "+", "4.", "pcl", "=", "cape", "(", "prof", ",", "flag", "=", "5", ",", "pres", "=", "pres", ",", "tmpc", "=", "tmpc", ",", "dwpc", "=", "dwpc", ",", "trunc", "=", "True", ")", "if", "pcl", ".", "bplus", "==", "0.", "or", "not", "utils", ".", "QC", "(", "pcl", ".", "bminus", ")", ":", "pcl", ".", "bminus", "=", "ma", ".", "masked", "while", "not", "utils", ".", "QC", "(", "pcl", ".", "bminus", ")", "or", "pcl", ".", "bminus", "<", "mincinh", ":", "if", "pcl", ".", "bminus", "<", "-", "100", ":", "tmpc", "+=", "2.", "else", ":", "tmpc", "+=", "0.5", "pcl", "=", "cape", "(", "prof", ",", "flag", "=", "5", ",", "pres", "=", "pres", ",", "tmpc", "=", "tmpc", ",", "dwpc", "=", "dwpc", ",", "trunc", "=", "True", ")", "if", "pcl", ".", "bplus", "==", "0.", ":", "pcl", ".", "bminus", "=", "ma", ".", "masked", "return", "tmpc" ]
https://github.com/sharppy/SHARPpy/blob/19175269ab11fe06c917b5d10376862a4716e1db/sharppy/sharptab/params.py#L2470-L2513
sklearn-theano/sklearn-theano
3eba566d8b624885b75759de47e52f903c015e40
sklearn_theano/externals/google/protobuf/message.py
python
Message.IsInitialized
(self)
Checks if the message is initialized. Returns: The method returns True if the message is initialized (i.e. all of its required fields are set).
Checks if the message is initialized.
[ "Checks", "if", "the", "message", "is", "initialized", "." ]
def IsInitialized(self): """Checks if the message is initialized. Returns: The method returns True if the message is initialized (i.e. all of its required fields are set). """ raise NotImplementedError
[ "def", "IsInitialized", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/sklearn-theano/sklearn-theano/blob/3eba566d8b624885b75759de47e52f903c015e40/sklearn_theano/externals/google/protobuf/message.py#L133-L140
globaleaks/GlobaLeaks
4624ca937728adb8e21c4733a8aecec6a41cb3db
backend/globaleaks/handlers/admin/submission_statuses.py
python
db_update_substatus_model_from_request
(model_obj, request, language)
Populates the model from the request, as well as setting default values :param model_obj: The object model :param request: The request data :param language: The language of the request
Populates the model from the request, as well as setting default values
[ "Populates", "the", "model", "from", "the", "request", "as", "well", "as", "setting", "default", "values" ]
def db_update_substatus_model_from_request(model_obj, request, language): """ Populates the model from the request, as well as setting default values :param model_obj: The object model :param request: The request data :param language: The language of the request """ fill_localized_keys(request, models.SubmissionSubStatus.localized_keys, language) model_obj.update(request)
[ "def", "db_update_substatus_model_from_request", "(", "model_obj", ",", "request", ",", "language", ")", ":", "fill_localized_keys", "(", "request", ",", "models", ".", "SubmissionSubStatus", ".", "localized_keys", ",", "language", ")", "model_obj", ".", "update", "(", "request", ")" ]
https://github.com/globaleaks/GlobaLeaks/blob/4624ca937728adb8e21c4733a8aecec6a41cb3db/backend/globaleaks/handlers/admin/submission_statuses.py#L66-L75
LowinLi/fushare
5aa02159e2c3a5e47eab5149c7f14d5c5b5e3c17
fushare/requests_fun.py
python
urllib_request_link
(url,encoding='utf-8')
爬取网站内容,如网站链接失败,可重复爬取20次 Parameters ------ url: 网站 string encoding: 编码类型 string:’utf-8‘、’gbk‘等 Return ------- r: 爬取返回内容 response:
爬取网站内容,如网站链接失败,可重复爬取20次 Parameters ------ url: 网站 string encoding: 编码类型 string:’utf-8‘、’gbk‘等 Return ------- r: 爬取返回内容 response:
[ "爬取网站内容,如网站链接失败,可重复爬取20次", "Parameters", "------", "url", ":", "网站", "string", "encoding", ":", "编码类型", "string:’utf", "-", "8‘、’gbk‘等", "Return", "-------", "r:", "爬取返回内容", "response", ":" ]
def urllib_request_link(url,encoding='utf-8'): """ 爬取网站内容,如网站链接失败,可重复爬取20次 Parameters ------ url: 网站 string encoding: 编码类型 string:’utf-8‘、’gbk‘等 Return ------- r: 爬取返回内容 response: """ i=0 while True: try: texts = urllib.request.urlopen(url).readlines() return texts except: i+=1 print('第%s次链接失败最多20次' %str(i)) time.sleep(5) if i>20: return None
[ "def", "urllib_request_link", "(", "url", ",", "encoding", "=", "'utf-8'", ")", ":", "i", "=", "0", "while", "True", ":", "try", ":", "texts", "=", "urllib", ".", "request", ".", "urlopen", "(", "url", ")", ".", "readlines", "(", ")", "return", "texts", "except", ":", "i", "+=", "1", "print", "(", "'第%s次链接失败最多20次' %str(i))", "", "", "", "", "", "", "time", ".", "sleep", "(", "5", ")", "if", "i", ">", "20", ":", "return", "None" ]
https://github.com/LowinLi/fushare/blob/5aa02159e2c3a5e47eab5149c7f14d5c5b5e3c17/fushare/requests_fun.py#L64-L86
agronholm/pythonfutures
acba785c8a5c331edce05dfa760540b8ea8a81f1
concurrent/futures/_base.py
python
Future.exception
(self, timeout=None)
return self.exception_info(timeout)[0]
Return the exception raised by the call that the future represents. Args: timeout: The number of seconds to wait for the exception if the future isn't done. If None, then there is no limit on the wait time. Returns: The exception raised by the call that the future represents or None if the call completed without raising. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout.
Return the exception raised by the call that the future represents.
[ "Return", "the", "exception", "raised", "by", "the", "call", "that", "the", "future", "represents", "." ]
def exception(self, timeout=None): """Return the exception raised by the call that the future represents. Args: timeout: The number of seconds to wait for the exception if the future isn't done. If None, then there is no limit on the wait time. Returns: The exception raised by the call that the future represents or None if the call completed without raising. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout. """ return self.exception_info(timeout)[0]
[ "def", "exception", "(", "self", ",", "timeout", "=", "None", ")", ":", "return", "self", ".", "exception_info", "(", "timeout", ")", "[", "0", "]" ]
https://github.com/agronholm/pythonfutures/blob/acba785c8a5c331edce05dfa760540b8ea8a81f1/concurrent/futures/_base.py#L499-L516
deepchem/deepchem
054eb4b2b082e3df8e1a8e77f36a52137ae6e375
deepchem/trans/transformers.py
python
CoulombFitTransformer.normalize
(self, X: np.ndarray)
return (X - self.mean) / self.std
Normalize features. Parameters ---------- X: np.ndarray Features Returns ------- X: np.ndarray Normalized features
Normalize features.
[ "Normalize", "features", "." ]
def normalize(self, X: np.ndarray) -> np.ndarray: """Normalize features. Parameters ---------- X: np.ndarray Features Returns ------- X: np.ndarray Normalized features """ return (X - self.mean) / self.std
[ "def", "normalize", "(", "self", ",", "X", ":", "np", ".", "ndarray", ")", "->", "np", ".", "ndarray", ":", "return", "(", "X", "-", "self", ".", "mean", ")", "/", "self", ".", "std" ]
https://github.com/deepchem/deepchem/blob/054eb4b2b082e3df8e1a8e77f36a52137ae6e375/deepchem/trans/transformers.py#L1453-L1466
CLUEbenchmark/CLUEPretrainedModels
b384fd41665a8261f9c689c940cf750b3bc21fce
baselines/models/bert/run_squad.py
python
create_model
(bert_config, is_training, input_ids, input_mask, segment_ids, use_one_hot_embeddings)
return (start_logits, end_logits)
Creates a classification model.
Creates a classification model.
[ "Creates", "a", "classification", "model", "." ]
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) final_hidden = model.get_sequence_output() final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) batch_size = final_hidden_shape[0] seq_length = final_hidden_shape[1] hidden_size = final_hidden_shape[2] output_weights = tf.get_variable( "cls/squad/output_weights", [2, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "cls/squad/output_bias", [2], initializer=tf.zeros_initializer()) final_hidden_matrix = tf.reshape(final_hidden, [batch_size * seq_length, hidden_size]) logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [batch_size, seq_length, 2]) logits = tf.transpose(logits, [2, 0, 1]) unstacked_logits = tf.unstack(logits, axis=0) (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) return (start_logits, end_logits)
[ "def", "create_model", "(", "bert_config", ",", "is_training", ",", "input_ids", ",", "input_mask", ",", "segment_ids", ",", "use_one_hot_embeddings", ")", ":", "model", "=", "modeling", ".", "BertModel", "(", "config", "=", "bert_config", ",", "is_training", "=", "is_training", ",", "input_ids", "=", "input_ids", ",", "input_mask", "=", "input_mask", ",", "token_type_ids", "=", "segment_ids", ",", "use_one_hot_embeddings", "=", "use_one_hot_embeddings", ")", "final_hidden", "=", "model", ".", "get_sequence_output", "(", ")", "final_hidden_shape", "=", "modeling", ".", "get_shape_list", "(", "final_hidden", ",", "expected_rank", "=", "3", ")", "batch_size", "=", "final_hidden_shape", "[", "0", "]", "seq_length", "=", "final_hidden_shape", "[", "1", "]", "hidden_size", "=", "final_hidden_shape", "[", "2", "]", "output_weights", "=", "tf", ".", "get_variable", "(", "\"cls/squad/output_weights\"", ",", "[", "2", ",", "hidden_size", "]", ",", "initializer", "=", "tf", ".", "truncated_normal_initializer", "(", "stddev", "=", "0.02", ")", ")", "output_bias", "=", "tf", ".", "get_variable", "(", "\"cls/squad/output_bias\"", ",", "[", "2", "]", ",", "initializer", "=", "tf", ".", "zeros_initializer", "(", ")", ")", "final_hidden_matrix", "=", "tf", ".", "reshape", "(", "final_hidden", ",", "[", "batch_size", "*", "seq_length", ",", "hidden_size", "]", ")", "logits", "=", "tf", ".", "matmul", "(", "final_hidden_matrix", ",", "output_weights", ",", "transpose_b", "=", "True", ")", "logits", "=", "tf", ".", "nn", ".", "bias_add", "(", "logits", ",", "output_bias", ")", "logits", "=", "tf", ".", "reshape", "(", "logits", ",", "[", "batch_size", ",", "seq_length", ",", "2", "]", ")", "logits", "=", "tf", ".", "transpose", "(", "logits", ",", "[", "2", ",", "0", ",", "1", "]", ")", "unstacked_logits", "=", "tf", ".", "unstack", "(", "logits", ",", "axis", "=", "0", ")", "(", "start_logits", ",", "end_logits", ")", "=", "(", "unstacked_logits", "[", "0", "]", ",", "unstacked_logits", "[", "1", "]", ")", "return", "(", "start_logits", ",", "end_logits", ")" ]
https://github.com/CLUEbenchmark/CLUEPretrainedModels/blob/b384fd41665a8261f9c689c940cf750b3bc21fce/baselines/models/bert/run_squad.py#L550-L587
filerock/FileRock-Client
37214f701666e76e723595f8f9ed238a42f6eb06
filerockclient/ui/wxGui/MainWindow.py
python
MainWindow.OnUpdateClientInfo
(self, event)
event.infos contains: username: string client_id: number client_hostname: string client_platform: string client_version: string basis: string last_commit_timestamp: number or None used_space: number or None user_quota: number or None
event.infos contains: username: string client_id: number client_hostname: string client_platform: string client_version: string basis: string last_commit_timestamp: number or None used_space: number or None user_quota: number or None
[ "event", ".", "infos", "contains", ":", "username", ":", "string", "client_id", ":", "number", "client_hostname", ":", "string", "client_platform", ":", "string", "client_version", ":", "string", "basis", ":", "string", "last_commit_timestamp", ":", "number", "or", "None", "used_space", ":", "number", "or", "None", "user_quota", ":", "number", "or", "None" ]
def OnUpdateClientInfo(self, event): ''' event.infos contains: username: string client_id: number client_hostname: string client_platform: string client_version: string basis: string last_commit_timestamp: number or None used_space: number or None user_quota: number or None ''' client_id = event.infos['client_id'] hostname = event.infos['client_hostname'] username = event.infos["username"] self.panel_1.version_ctrl.SetValue(event.infos['client_version'], True) if client_id is not None: self.panel_1.client_ctrl.SetValue(client_id, True) if hostname is not None: self.panel_1.host_ctrl.SetValue(hostname, True) if username is not None: self.panel_1.user_ctrl.SetValue(username, True) self.OnUpdateSessionInfo(event) self.Layout() self.panel_1.Layout() self.Fit()
[ "def", "OnUpdateClientInfo", "(", "self", ",", "event", ")", ":", "client_id", "=", "event", ".", "infos", "[", "'client_id'", "]", "hostname", "=", "event", ".", "infos", "[", "'client_hostname'", "]", "username", "=", "event", ".", "infos", "[", "\"username\"", "]", "self", ".", "panel_1", ".", "version_ctrl", ".", "SetValue", "(", "event", ".", "infos", "[", "'client_version'", "]", ",", "True", ")", "if", "client_id", "is", "not", "None", ":", "self", ".", "panel_1", ".", "client_ctrl", ".", "SetValue", "(", "client_id", ",", "True", ")", "if", "hostname", "is", "not", "None", ":", "self", ".", "panel_1", ".", "host_ctrl", ".", "SetValue", "(", "hostname", ",", "True", ")", "if", "username", "is", "not", "None", ":", "self", ".", "panel_1", ".", "user_ctrl", ".", "SetValue", "(", "username", ",", "True", ")", "self", ".", "OnUpdateSessionInfo", "(", "event", ")", "self", ".", "Layout", "(", ")", "self", ".", "panel_1", ".", "Layout", "(", ")", "self", ".", "Fit", "(", ")" ]
https://github.com/filerock/FileRock-Client/blob/37214f701666e76e723595f8f9ed238a42f6eb06/filerockclient/ui/wxGui/MainWindow.py#L249-L277
richshaw2015/oh-my-rss
68b9284e0acaf44ea389d675b71949177f9f3256
web/views/views_html.py
python
get_site_update_view
(request)
return HttpResponseNotFound("No Feeds Subscribed")
获取更新的全局站点视图,游客 100 个,登陆用户 200 个站点
获取更新的全局站点视图,游客 100 个,登陆用户 200 个站点
[ "获取更新的全局站点视图,游客", "100", "个,登陆用户", "200", "个站点" ]
def get_site_update_view(request): """ 获取更新的全局站点视图,游客 100 个,登陆用户 200 个站点 """ sub_feeds = json.loads(request.POST.get('sub_feeds') or '[]') unsub_feeds = json.loads(request.POST.get('unsub_feeds') or '[]') page_size = int(request.POST.get('page_size', 10)) page = int(request.POST.get('page', 1)) onlyunread = request.POST.get('onlyunread', 'no') == 'yes' user = get_login_user(request) if user is None: my_feeds = get_visitor_subscribe_feeds(tuple(sub_feeds), tuple(unsub_feeds)) else: my_feeds = get_user_subscribe_feeds(user.oauth_id, user_level=user.level) # 过滤有内容更新的 if user and onlyunread: my_feeds = get_user_unread_sites(user.oauth_id, my_feeds) my_feeds = sorted(my_feeds, key=lambda t: get_site_last_id(t), reverse=True) if my_feeds: # 分页处理 try: paginator_obj = Paginator(my_feeds, page_size) except: logger.warning(f"分页参数错误:`{page}`{page_size}`{sub_feeds}`{unsub_feeds}") return HttpResponseNotFound("Page Number Error") pg = paginator_obj.page(page) num_pages = paginator_obj.num_pages sites = Site.objects.filter(pk__in=pg.object_list, status='active').order_by('-star')[:50] for site in sites: recent_articles = get_recent_site_articles(site.pk) site.update_count = len(recent_articles) site.update_ids = json.dumps(list(recent_articles)) site.update_time = get_site_last_id(site.pk) if user: site.unread_count = get_user_unread_count(user.oauth_id, recent_articles) context = dict() context['pg'] = pg context['sites'] = sites context['num_pages'] = num_pages context['user'] = user return render(request, 'left/site_view.html', context=context) return HttpResponseNotFound("No Feeds Subscribed")
[ "def", "get_site_update_view", "(", "request", ")", ":", "sub_feeds", "=", "json", ".", "loads", "(", "request", ".", "POST", ".", "get", "(", "'sub_feeds'", ")", "or", "'[]'", ")", "unsub_feeds", "=", "json", ".", "loads", "(", "request", ".", "POST", ".", "get", "(", "'unsub_feeds'", ")", "or", "'[]'", ")", "page_size", "=", "int", "(", "request", ".", "POST", ".", "get", "(", "'page_size'", ",", "10", ")", ")", "page", "=", "int", "(", "request", ".", "POST", ".", "get", "(", "'page'", ",", "1", ")", ")", "onlyunread", "=", "request", ".", "POST", ".", "get", "(", "'onlyunread'", ",", "'no'", ")", "==", "'yes'", "user", "=", "get_login_user", "(", "request", ")", "if", "user", "is", "None", ":", "my_feeds", "=", "get_visitor_subscribe_feeds", "(", "tuple", "(", "sub_feeds", ")", ",", "tuple", "(", "unsub_feeds", ")", ")", "else", ":", "my_feeds", "=", "get_user_subscribe_feeds", "(", "user", ".", "oauth_id", ",", "user_level", "=", "user", ".", "level", ")", "# 过滤有内容更新的", "if", "user", "and", "onlyunread", ":", "my_feeds", "=", "get_user_unread_sites", "(", "user", ".", "oauth_id", ",", "my_feeds", ")", "my_feeds", "=", "sorted", "(", "my_feeds", ",", "key", "=", "lambda", "t", ":", "get_site_last_id", "(", "t", ")", ",", "reverse", "=", "True", ")", "if", "my_feeds", ":", "# 分页处理", "try", ":", "paginator_obj", "=", "Paginator", "(", "my_feeds", ",", "page_size", ")", "except", ":", "logger", ".", "warning", "(", "f\"分页参数错误:`{page}`{page_size}`{sub_feeds}`{unsub_feeds}\")", "", "return", "HttpResponseNotFound", "(", "\"Page Number Error\"", ")", "pg", "=", "paginator_obj", ".", "page", "(", "page", ")", "num_pages", "=", "paginator_obj", ".", "num_pages", "sites", "=", "Site", ".", "objects", ".", "filter", "(", "pk__in", "=", "pg", ".", "object_list", ",", "status", "=", "'active'", ")", ".", "order_by", "(", "'-star'", ")", "[", ":", "50", "]", "for", "site", "in", "sites", ":", "recent_articles", "=", "get_recent_site_articles", "(", "site", ".", "pk", ")", "site", ".", "update_count", "=", "len", "(", "recent_articles", ")", "site", ".", "update_ids", "=", "json", ".", "dumps", "(", "list", "(", "recent_articles", ")", ")", "site", ".", "update_time", "=", "get_site_last_id", "(", "site", ".", "pk", ")", "if", "user", ":", "site", ".", "unread_count", "=", "get_user_unread_count", "(", "user", ".", "oauth_id", ",", "recent_articles", ")", "context", "=", "dict", "(", ")", "context", "[", "'pg'", "]", "=", "pg", "context", "[", "'sites'", "]", "=", "sites", "context", "[", "'num_pages'", "]", "=", "num_pages", "context", "[", "'user'", "]", "=", "user", "return", "render", "(", "request", ",", "'left/site_view.html'", ",", "context", "=", "context", ")", "return", "HttpResponseNotFound", "(", "\"No Feeds Subscribed\"", ")" ]
https://github.com/richshaw2015/oh-my-rss/blob/68b9284e0acaf44ea389d675b71949177f9f3256/web/views/views_html.py#L247-L300
uclanlp/visualbert
2a9e8bd58a20af9ed32c0cb5d3a293f5bf5df019
visualbert/utils/pytorch_misc.py
python
find_latest_checkpoint_step
(serialization_dir, epoch_to_load = None)
return model_path, training_state_path
Return the location of the latest model and training state files. If there isn't a valid checkpoint then return None.
Return the location of the latest model and training state files. If there isn't a valid checkpoint then return None.
[ "Return", "the", "location", "of", "the", "latest", "model", "and", "training", "state", "files", ".", "If", "there", "isn", "t", "a", "valid", "checkpoint", "then", "return", "None", "." ]
def find_latest_checkpoint_step(serialization_dir, epoch_to_load = None): """ Return the location of the latest model and training state files. If there isn't a valid checkpoint then return None. """ have_checkpoint = (serialization_dir is not None and any("model_step_" in x for x in os.listdir(serialization_dir))) if not have_checkpoint: return None serialization_files = os.listdir(serialization_dir) model_checkpoints = [x for x in serialization_files if "model_step_" in x] # Get the last checkpoint file. Epochs are specified as either an # int (for end of epoch files) or with epoch and timestamp for # within epoch checkpoints, e.g. 5.2018-02-02-15-33-42 info = [(x, int(x.split('_')[2]), int(x.split('_')[4].split('.')[0])) for x in model_checkpoints] max_epoch = -1 max_step = -1 max_index = -1 for index, i in enumerate(info): if i[2] > max_epoch: max_epoch = i[2] max_step = i[1] max_index = index elif i[2] == max_epoch: if i[1] > max_step: max_step = i[1] max_index = index model_path = os.path.join(serialization_dir, "model_step_{}_epoch_{}.th".format(max_step, max_epoch)) training_state_path = os.path.join(serialization_dir, "training_step_{}_epoch_{}.th".format(max_step, max_epoch)) return model_path, training_state_path
[ "def", "find_latest_checkpoint_step", "(", "serialization_dir", ",", "epoch_to_load", "=", "None", ")", ":", "have_checkpoint", "=", "(", "serialization_dir", "is", "not", "None", "and", "any", "(", "\"model_step_\"", "in", "x", "for", "x", "in", "os", ".", "listdir", "(", "serialization_dir", ")", ")", ")", "if", "not", "have_checkpoint", ":", "return", "None", "serialization_files", "=", "os", ".", "listdir", "(", "serialization_dir", ")", "model_checkpoints", "=", "[", "x", "for", "x", "in", "serialization_files", "if", "\"model_step_\"", "in", "x", "]", "# Get the last checkpoint file. Epochs are specified as either an", "# int (for end of epoch files) or with epoch and timestamp for", "# within epoch checkpoints, e.g. 5.2018-02-02-15-33-42", "info", "=", "[", "(", "x", ",", "int", "(", "x", ".", "split", "(", "'_'", ")", "[", "2", "]", ")", ",", "int", "(", "x", ".", "split", "(", "'_'", ")", "[", "4", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", ")", ")", "for", "x", "in", "model_checkpoints", "]", "max_epoch", "=", "-", "1", "max_step", "=", "-", "1", "max_index", "=", "-", "1", "for", "index", ",", "i", "in", "enumerate", "(", "info", ")", ":", "if", "i", "[", "2", "]", ">", "max_epoch", ":", "max_epoch", "=", "i", "[", "2", "]", "max_step", "=", "i", "[", "1", "]", "max_index", "=", "index", "elif", "i", "[", "2", "]", "==", "max_epoch", ":", "if", "i", "[", "1", "]", ">", "max_step", ":", "max_step", "=", "i", "[", "1", "]", "max_index", "=", "index", "model_path", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "\"model_step_{}_epoch_{}.th\"", ".", "format", "(", "max_step", ",", "max_epoch", ")", ")", "training_state_path", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "\"training_step_{}_epoch_{}.th\"", ".", "format", "(", "max_step", ",", "max_epoch", ")", ")", "return", "model_path", ",", "training_state_path" ]
https://github.com/uclanlp/visualbert/blob/2a9e8bd58a20af9ed32c0cb5d3a293f5bf5df019/visualbert/utils/pytorch_misc.py#L155-L191
celery/kombu
853b13f1d018ebfe7ad2d064a3111cac9fcf5383
kombu/transport/qpid.py
python
Channel.queue_delete
(self, queue, if_unused=False, if_empty=False, **kwargs)
Delete a queue by name. Delete a queue specified by name. Using the if_unused keyword argument, the delete can only occur if there are 0 consumers bound to it. Using the if_empty keyword argument, the delete can only occur if there are 0 messages in the queue. :param queue: The name of the queue to be deleted. :type queue: str :keyword if_unused: If True, delete only if the queue has 0 consumers. If False, delete a queue even with consumers bound to it. :type if_unused: bool :keyword if_empty: If True, only delete the queue if it is empty. If False, delete the queue if it is empty or not. :type if_empty: bool
Delete a queue by name.
[ "Delete", "a", "queue", "by", "name", "." ]
def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs): """Delete a queue by name. Delete a queue specified by name. Using the if_unused keyword argument, the delete can only occur if there are 0 consumers bound to it. Using the if_empty keyword argument, the delete can only occur if there are 0 messages in the queue. :param queue: The name of the queue to be deleted. :type queue: str :keyword if_unused: If True, delete only if the queue has 0 consumers. If False, delete a queue even with consumers bound to it. :type if_unused: bool :keyword if_empty: If True, only delete the queue if it is empty. If False, delete the queue if it is empty or not. :type if_empty: bool """ if self._has_queue(queue): if if_empty and self._size(queue): return queue_obj = self._broker.getQueue(queue) consumer_count = queue_obj.getAttributes()['consumerCount'] if if_unused and consumer_count > 0: return self._delete(queue)
[ "def", "queue_delete", "(", "self", ",", "queue", ",", "if_unused", "=", "False", ",", "if_empty", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_has_queue", "(", "queue", ")", ":", "if", "if_empty", "and", "self", ".", "_size", "(", "queue", ")", ":", "return", "queue_obj", "=", "self", ".", "_broker", ".", "getQueue", "(", "queue", ")", "consumer_count", "=", "queue_obj", ".", "getAttributes", "(", ")", "[", "'consumerCount'", "]", "if", "if_unused", "and", "consumer_count", ">", "0", ":", "return", "self", ".", "_delete", "(", "queue", ")" ]
https://github.com/celery/kombu/blob/853b13f1d018ebfe7ad2d064a3111cac9fcf5383/kombu/transport/qpid.py#L682-L708
FSecureLABS/Jandroid
e31d0dab58a2bfd6ed8e0a387172b8bd7c893436
libs/platform-tools/platform-tools_darwin/systrace/catapult/devil/devil/utils/lazy/weak_constant.py
python
WeakConstant.read
(self)
return self._val
Get the object, creating it if necessary.
Get the object, creating it if necessary.
[ "Get", "the", "object", "creating", "it", "if", "necessary", "." ]
def read(self): """Get the object, creating it if necessary.""" if self._initialized.is_set(): return self._val with self._lock: if not self._initialized.is_set(): # We initialize the value on a separate thread to protect # from holding self._lock indefinitely in the event that # self._initializer hangs. initializer_thread = reraiser_thread.ReraiserThread( self._initializer) initializer_thread.start() timeout_retry.WaitFor( lambda: initializer_thread.join(1) or not initializer_thread.isAlive(), wait_period=0) self._val = initializer_thread.GetReturnValue() self._initialized.set() return self._val
[ "def", "read", "(", "self", ")", ":", "if", "self", ".", "_initialized", ".", "is_set", "(", ")", ":", "return", "self", ".", "_val", "with", "self", ".", "_lock", ":", "if", "not", "self", ".", "_initialized", ".", "is_set", "(", ")", ":", "# We initialize the value on a separate thread to protect", "# from holding self._lock indefinitely in the event that", "# self._initializer hangs.", "initializer_thread", "=", "reraiser_thread", ".", "ReraiserThread", "(", "self", ".", "_initializer", ")", "initializer_thread", ".", "start", "(", ")", "timeout_retry", ".", "WaitFor", "(", "lambda", ":", "initializer_thread", ".", "join", "(", "1", ")", "or", "not", "initializer_thread", ".", "isAlive", "(", ")", ",", "wait_period", "=", "0", ")", "self", ".", "_val", "=", "initializer_thread", ".", "GetReturnValue", "(", ")", "self", ".", "_initialized", ".", "set", "(", ")", "return", "self", ".", "_val" ]
https://github.com/FSecureLABS/Jandroid/blob/e31d0dab58a2bfd6ed8e0a387172b8bd7c893436/libs/platform-tools/platform-tools_darwin/systrace/catapult/devil/devil/utils/lazy/weak_constant.py#L24-L42
007gzs/dingtalk-sdk
7979da2e259fdbc571728cae2425a04dbc65850a
dingtalk/client/api/taobao.py
python
TbDMP.taobao_dmp_analysis_coverage
( self, select_tag_option_set_d_t_o )
return self._top_request( "taobao.dmp.analysis.coverage", { "select_tag_option_set_d_t_o": select_tag_option_set_d_t_o } )
获取标签组合覆盖人数 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=29408 :param select_tag_option_set_d_t_o: 标签组合规则
获取标签组合覆盖人数 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=29408
[ "获取标签组合覆盖人数", "文档地址:https", ":", "//", "open", "-", "doc", ".", "dingtalk", ".", "com", "/", "docs", "/", "api", ".", "htm?apiId", "=", "29408" ]
def taobao_dmp_analysis_coverage( self, select_tag_option_set_d_t_o ): """ 获取标签组合覆盖人数 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=29408 :param select_tag_option_set_d_t_o: 标签组合规则 """ return self._top_request( "taobao.dmp.analysis.coverage", { "select_tag_option_set_d_t_o": select_tag_option_set_d_t_o } )
[ "def", "taobao_dmp_analysis_coverage", "(", "self", ",", "select_tag_option_set_d_t_o", ")", ":", "return", "self", ".", "_top_request", "(", "\"taobao.dmp.analysis.coverage\"", ",", "{", "\"select_tag_option_set_d_t_o\"", ":", "select_tag_option_set_d_t_o", "}", ")" ]
https://github.com/007gzs/dingtalk-sdk/blob/7979da2e259fdbc571728cae2425a04dbc65850a/dingtalk/client/api/taobao.py#L45255-L45270
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/pip/_vendor/distro.py
python
LinuxDistribution.minor_version
(self, best=False)
return self.version_parts(best)[1]
Return the minor version number of the current distribution. For details, see :func:`distro.minor_version`.
Return the minor version number of the current distribution.
[ "Return", "the", "minor", "version", "number", "of", "the", "current", "distribution", "." ]
def minor_version(self, best=False): """ Return the minor version number of the current distribution. For details, see :func:`distro.minor_version`. """ return self.version_parts(best)[1]
[ "def", "minor_version", "(", "self", ",", "best", "=", "False", ")", ":", "return", "self", ".", "version_parts", "(", "best", ")", "[", "1", "]" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/pip/_vendor/distro.py#L787-L793
weldr/lorax
d692ce366287ae468c52bc8becde2fef113661a3
src/pylorax/monitor.py
python
LogRequestHandler.iserror
(self, line)
Check a line to see if it contains an error indicating installation failure :param str line: log line to check for failure If the line contains IGNORED it will be skipped.
Check a line to see if it contains an error indicating installation failure
[ "Check", "a", "line", "to", "see", "if", "it", "contains", "an", "error", "indicating", "installation", "failure" ]
def iserror(self, line): """ Check a line to see if it contains an error indicating installation failure :param str line: log line to check for failure If the line contains IGNORED it will be skipped. """ if "IGNORED" in line: return for t in self.simple_tests: if t in line: self.server.log_error = True self.server.error_line = line return for t in self.re_tests: if re.search(t, line): self.server.log_error = True self.server.error_line = line return
[ "def", "iserror", "(", "self", ",", "line", ")", ":", "if", "\"IGNORED\"", "in", "line", ":", "return", "for", "t", "in", "self", ".", "simple_tests", ":", "if", "t", "in", "line", ":", "self", ".", "server", ".", "log_error", "=", "True", "self", ".", "server", ".", "error_line", "=", "line", "return", "for", "t", "in", "self", ".", "re_tests", ":", "if", "re", ".", "search", "(", "t", ",", "line", ")", ":", "self", ".", "server", ".", "log_error", "=", "True", "self", ".", "server", ".", "error_line", "=", "line", "return" ]
https://github.com/weldr/lorax/blob/d692ce366287ae468c52bc8becde2fef113661a3/src/pylorax/monitor.py#L107-L127
lightforever/mlcomp
c78fdb77ec9c4ec8ff11beea50b90cab20903ad9
mlcomp/db/providers/dag.py
python
DagProvider.remove_all
(self, ids: List[int])
[]
def remove_all(self, ids: List[int]): self.query(Dag).filter(Dag.id.in_(ids)).delete( synchronize_session=False) self.commit()
[ "def", "remove_all", "(", "self", ",", "ids", ":", "List", "[", "int", "]", ")", ":", "self", ".", "query", "(", "Dag", ")", ".", "filter", "(", "Dag", ".", "id", ".", "in_", "(", "ids", ")", ")", ".", "delete", "(", "synchronize_session", "=", "False", ")", "self", ".", "commit", "(", ")" ]
https://github.com/lightforever/mlcomp/blob/c78fdb77ec9c4ec8ff11beea50b90cab20903ad9/mlcomp/db/providers/dag.py#L228-L231
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/tarfile.py
python
TarInfo._create_gnu_long_header
(cls, name, type, encoding, errors)
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ cls._create_payload(name)
Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence for name.
Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence for name.
[ "Return", "a", "GNUTYPE_LONGNAME", "or", "GNUTYPE_LONGLINK", "sequence", "for", "name", "." ]
def _create_gnu_long_header(cls, name, type, encoding, errors): """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence for name. """ name = name.encode(encoding, errors) + NUL info = {} info["name"] = "././@LongLink" info["type"] = type info["size"] = len(name) info["magic"] = GNU_MAGIC # create extended header + name blocks. return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ cls._create_payload(name)
[ "def", "_create_gnu_long_header", "(", "cls", ",", "name", ",", "type", ",", "encoding", ",", "errors", ")", ":", "name", "=", "name", ".", "encode", "(", "encoding", ",", "errors", ")", "+", "NUL", "info", "=", "{", "}", "info", "[", "\"name\"", "]", "=", "\"././@LongLink\"", "info", "[", "\"type\"", "]", "=", "type", "info", "[", "\"size\"", "]", "=", "len", "(", "name", ")", "info", "[", "\"magic\"", "]", "=", "GNU_MAGIC", "# create extended header + name blocks.", "return", "cls", ".", "_create_header", "(", "info", ",", "USTAR_FORMAT", ",", "encoding", ",", "errors", ")", "+", "cls", ".", "_create_payload", "(", "name", ")" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/tarfile.py#L954-L968
cortex-lab/phy
9a330b9437a3d0b40a37a201d147224e6e7fb462
plugins/matplotlib_view.py
python
FeatureDensityView.__init__
(self, features=None)
features is a function (cluster_id => Bunch(data, ...)) where data is a 3D array.
features is a function (cluster_id => Bunch(data, ...)) where data is a 3D array.
[ "features", "is", "a", "function", "(", "cluster_id", "=", ">", "Bunch", "(", "data", "...", "))", "where", "data", "is", "a", "3D", "array", "." ]
def __init__(self, features=None): """features is a function (cluster_id => Bunch(data, ...)) where data is a 3D array.""" super(FeatureDensityView, self).__init__() self.features = features
[ "def", "__init__", "(", "self", ",", "features", "=", "None", ")", ":", "super", "(", "FeatureDensityView", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "features", "=", "features" ]
https://github.com/cortex-lab/phy/blob/9a330b9437a3d0b40a37a201d147224e6e7fb462/plugins/matplotlib_view.py#L11-L14
IntelAI/models
1d7a53ccfad3e6f0e7378c9e3c8840895d63df8c
models/language_translation/tensorflow/transformer_mlperf/inference/int8/transformer/data_download.py
python
download_from_url
(path, url)
Download content from a url. Args: path: string directory where file will be downloaded url: string url Returns: Full path to downloaded file
Download content from a url.
[ "Download", "content", "from", "a", "url", "." ]
def download_from_url(path, url): """Download content from a url. Args: path: string directory where file will be downloaded url: string url Returns: Full path to downloaded file """ filename = url.split("/")[-1] found_file = find_file(path, filename, max_depth=0) if found_file is None: filename = os.path.join(path, filename) tf.compat.v1.logging.info("Downloading from %s to %s." % (url, filename)) inprogress_filepath = filename + ".incomplete" inprogress_filepath, _ = urllib.request.urlretrieve( url, inprogress_filepath, reporthook=download_report_hook) # Print newline to clear the carriage return from the download progress. print() tf.io.gfile.rename(inprogress_filepath, filename) return filename else: tf.compat.v1.logging.info("Already downloaded: %s (at %s)." % (url, found_file)) return found_file
[ "def", "download_from_url", "(", "path", ",", "url", ")", ":", "filename", "=", "url", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "found_file", "=", "find_file", "(", "path", ",", "filename", ",", "max_depth", "=", "0", ")", "if", "found_file", "is", "None", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "tf", ".", "compat", ".", "v1", ".", "logging", ".", "info", "(", "\"Downloading from %s to %s.\"", "%", "(", "url", ",", "filename", ")", ")", "inprogress_filepath", "=", "filename", "+", "\".incomplete\"", "inprogress_filepath", ",", "_", "=", "urllib", ".", "request", ".", "urlretrieve", "(", "url", ",", "inprogress_filepath", ",", "reporthook", "=", "download_report_hook", ")", "# Print newline to clear the carriage return from the download progress.", "print", "(", ")", "tf", ".", "io", ".", "gfile", ".", "rename", "(", "inprogress_filepath", ",", "filename", ")", "return", "filename", "else", ":", "tf", ".", "compat", ".", "v1", ".", "logging", ".", "info", "(", "\"Already downloaded: %s (at %s).\"", "%", "(", "url", ",", "found_file", ")", ")", "return", "found_file" ]
https://github.com/IntelAI/models/blob/1d7a53ccfad3e6f0e7378c9e3c8840895d63df8c/models/language_translation/tensorflow/transformer_mlperf/inference/int8/transformer/data_download.py#L142-L166
riptideio/pymodbus
c5772b35ae3f29d1947f3ab453d8d00df846459f
pymodbus/repl/client/helper.py
python
Result.__init__
(self, result)
:param result: Response of a modbus command.
:param result: Response of a modbus command.
[ ":", "param", "result", ":", "Response", "of", "a", "modbus", "command", "." ]
def __init__(self, result): """ :param result: Response of a modbus command. """ if isinstance(result, dict): # Modbus response self.function_code = result.pop('function_code', None) self.data = dict(result) else: self.data = result
[ "def", "__init__", "(", "self", ",", "result", ")", ":", "if", "isinstance", "(", "result", ",", "dict", ")", ":", "# Modbus response", "self", ".", "function_code", "=", "result", ".", "pop", "(", "'function_code'", ",", "None", ")", "self", ".", "data", "=", "dict", "(", "result", ")", "else", ":", "self", ".", "data", "=", "result" ]
https://github.com/riptideio/pymodbus/blob/c5772b35ae3f29d1947f3ab453d8d00df846459f/pymodbus/repl/client/helper.py#L243-L251
hakril/PythonForWindows
61e027a678d5b87aa64fcf8a37a6661a86236589
windows/winobject/process.py
python
WinProcess.low_read_memory
(self, addr, buffer_addr, size)
return winproxy.ReadProcessMemory(self.handle, addr, lpBuffer=buffer_addr, nSize=size)
[]
def low_read_memory(self, addr, buffer_addr, size): if windows.current_process.bitness == 32 and self.bitness == 64: # OptionalExport can be None (see winproxy.py) if not winproxy.is_implemented(winproxy.NtWow64ReadVirtualMemory64): raise ValueError("NtWow64ReadVirtualMemory64 non available in ntdll: cannot read into 64bits processus") return winproxy.NtWow64ReadVirtualMemory64(self.handle, addr, buffer_addr, size) #if self.is_wow_64 and addr > 0xffffffff: # return winproxy.NtWow64ReadVirtualMemory64(self.handle, addr, buffer_addr, size) return winproxy.ReadProcessMemory(self.handle, addr, lpBuffer=buffer_addr, nSize=size)
[ "def", "low_read_memory", "(", "self", ",", "addr", ",", "buffer_addr", ",", "size", ")", ":", "if", "windows", ".", "current_process", ".", "bitness", "==", "32", "and", "self", ".", "bitness", "==", "64", ":", "# OptionalExport can be None (see winproxy.py)", "if", "not", "winproxy", ".", "is_implemented", "(", "winproxy", ".", "NtWow64ReadVirtualMemory64", ")", ":", "raise", "ValueError", "(", "\"NtWow64ReadVirtualMemory64 non available in ntdll: cannot read into 64bits processus\"", ")", "return", "winproxy", ".", "NtWow64ReadVirtualMemory64", "(", "self", ".", "handle", ",", "addr", ",", "buffer_addr", ",", "size", ")", "#if self.is_wow_64 and addr > 0xffffffff:", "# return winproxy.NtWow64ReadVirtualMemory64(self.handle, addr, buffer_addr, size)", "return", "winproxy", ".", "ReadProcessMemory", "(", "self", ".", "handle", ",", "addr", ",", "lpBuffer", "=", "buffer_addr", ",", "nSize", "=", "size", ")" ]
https://github.com/hakril/PythonForWindows/blob/61e027a678d5b87aa64fcf8a37a6661a86236589/windows/winobject/process.py#L1038-L1046
landlab/landlab
a5dd80b8ebfd03d1ba87ef6c4368c409485f222c
landlab/components/lake_fill/lake_fill_barnes.py
python
LakeMapperBarnes._track_original_surface
(self)
return orig_surf
This helper method ensures that if flow is to be redircted, the _redirect_flowdirs() method can still get access to this information when it needs it. The idea here is that the operation is essentially free when surface and fill_surface were different to start with, which should make us faster. Examples -------- >>> from landlab import RasterModelGrid >>> from landlab.components import LakeMapperBarnes, FlowAccumulator >>> mg = RasterModelGrid((5, 6), xy_spacing=2.) >>> z = mg.add_zeros("topographic__elevation", at="node", dtype=float) >>> z_new = mg.add_zeros("topographic__fill", at="node", dtype=float) >>> fa = FlowAccumulator(mg) >>> lmb = LakeMapperBarnes(mg, method='D8', ... surface='topographic__elevation', ... fill_surface='topographic__fill', ... redirect_flow_steepest_descent=False, ... track_lakes=False) >>> orig_surf = lmb._track_original_surface() >>> z is orig_surf True >>> lmb = LakeMapperBarnes(mg, method='D8', ... surface='topographic__elevation', ... fill_surface='topographic__elevation', ... redirect_flow_steepest_descent=False, ... track_lakes=False) >>> orig_surf = lmb._track_original_surface() >>> z is orig_surf False
This helper method ensures that if flow is to be redircted, the _redirect_flowdirs() method can still get access to this information when it needs it. The idea here is that the operation is essentially free when surface and fill_surface were different to start with, which should make us faster.
[ "This", "helper", "method", "ensures", "that", "if", "flow", "is", "to", "be", "redircted", "the", "_redirect_flowdirs", "()", "method", "can", "still", "get", "access", "to", "this", "information", "when", "it", "needs", "it", ".", "The", "idea", "here", "is", "that", "the", "operation", "is", "essentially", "free", "when", "surface", "and", "fill_surface", "were", "different", "to", "start", "with", "which", "should", "make", "us", "faster", "." ]
def _track_original_surface(self): """This helper method ensures that if flow is to be redircted, the _redirect_flowdirs() method can still get access to this information when it needs it. The idea here is that the operation is essentially free when surface and fill_surface were different to start with, which should make us faster. Examples -------- >>> from landlab import RasterModelGrid >>> from landlab.components import LakeMapperBarnes, FlowAccumulator >>> mg = RasterModelGrid((5, 6), xy_spacing=2.) >>> z = mg.add_zeros("topographic__elevation", at="node", dtype=float) >>> z_new = mg.add_zeros("topographic__fill", at="node", dtype=float) >>> fa = FlowAccumulator(mg) >>> lmb = LakeMapperBarnes(mg, method='D8', ... surface='topographic__elevation', ... fill_surface='topographic__fill', ... redirect_flow_steepest_descent=False, ... track_lakes=False) >>> orig_surf = lmb._track_original_surface() >>> z is orig_surf True >>> lmb = LakeMapperBarnes(mg, method='D8', ... surface='topographic__elevation', ... fill_surface='topographic__elevation', ... redirect_flow_steepest_descent=False, ... track_lakes=False) >>> orig_surf = lmb._track_original_surface() >>> z is orig_surf False """ if self._inplace: orig_surf = self._surface.copy() else: orig_surf = self._surface return orig_surf
[ "def", "_track_original_surface", "(", "self", ")", ":", "if", "self", ".", "_inplace", ":", "orig_surf", "=", "self", ".", "_surface", ".", "copy", "(", ")", "else", ":", "orig_surf", "=", "self", ".", "_surface", "return", "orig_surf" ]
https://github.com/landlab/landlab/blob/a5dd80b8ebfd03d1ba87ef6c4368c409485f222c/landlab/components/lake_fill/lake_fill_barnes.py#L961-L997
jupyter/jupyter_core
66e16cada28a15c1e0dd14e00f48c5bb7578db09
jupyter_core/troubleshoot.py
python
main
()
print out useful info
print out useful info
[ "print", "out", "useful", "info" ]
def main(): """ print out useful info """ #pylint: disable=superfluous-parens # args = get_args() environment_data = get_data() print('$PATH:') for directory in environment_data['path'].split(os.pathsep): print('\t' + directory) print('\n' + 'sys.path:') for directory in environment_data['sys_path']: print('\t' + directory) print('\n' + 'sys.executable:') print('\t' + environment_data['sys_exe']) print('\n' + 'sys.version:') if '\n' in environment_data['sys_version']: for data in environment_data['sys_version'].split('\n'): print('\t' + data) else: print('\t' + environment_data['sys_version']) print('\n' + 'platform.platform():') print('\t' + environment_data['platform']) if environment_data['which']: print('\n' + 'which -a jupyter:') for line in environment_data['which'].split('\n'): print('\t' + line) if environment_data['where']: print('\n' + 'where jupyter:') for line in environment_data['where'].split('\n'): print('\t' + line) if environment_data['pip']: print('\n' + 'pip list:') for package in environment_data['pip'].split('\n'): print('\t' + package) if environment_data['conda']: print('\n' + 'conda list:') for package in environment_data['conda'].split('\n'): print('\t' + package) if environment_data['conda-env']: print('\n' + 'conda env:') for package in environment_data['conda-env'].split('\n'): print('\t' + package)
[ "def", "main", "(", ")", ":", "#pylint: disable=superfluous-parens", "# args = get_args()", "environment_data", "=", "get_data", "(", ")", "print", "(", "'$PATH:'", ")", "for", "directory", "in", "environment_data", "[", "'path'", "]", ".", "split", "(", "os", ".", "pathsep", ")", ":", "print", "(", "'\\t'", "+", "directory", ")", "print", "(", "'\\n'", "+", "'sys.path:'", ")", "for", "directory", "in", "environment_data", "[", "'sys_path'", "]", ":", "print", "(", "'\\t'", "+", "directory", ")", "print", "(", "'\\n'", "+", "'sys.executable:'", ")", "print", "(", "'\\t'", "+", "environment_data", "[", "'sys_exe'", "]", ")", "print", "(", "'\\n'", "+", "'sys.version:'", ")", "if", "'\\n'", "in", "environment_data", "[", "'sys_version'", "]", ":", "for", "data", "in", "environment_data", "[", "'sys_version'", "]", ".", "split", "(", "'\\n'", ")", ":", "print", "(", "'\\t'", "+", "data", ")", "else", ":", "print", "(", "'\\t'", "+", "environment_data", "[", "'sys_version'", "]", ")", "print", "(", "'\\n'", "+", "'platform.platform():'", ")", "print", "(", "'\\t'", "+", "environment_data", "[", "'platform'", "]", ")", "if", "environment_data", "[", "'which'", "]", ":", "print", "(", "'\\n'", "+", "'which -a jupyter:'", ")", "for", "line", "in", "environment_data", "[", "'which'", "]", ".", "split", "(", "'\\n'", ")", ":", "print", "(", "'\\t'", "+", "line", ")", "if", "environment_data", "[", "'where'", "]", ":", "print", "(", "'\\n'", "+", "'where jupyter:'", ")", "for", "line", "in", "environment_data", "[", "'where'", "]", ".", "split", "(", "'\\n'", ")", ":", "print", "(", "'\\t'", "+", "line", ")", "if", "environment_data", "[", "'pip'", "]", ":", "print", "(", "'\\n'", "+", "'pip list:'", ")", "for", "package", "in", "environment_data", "[", "'pip'", "]", ".", "split", "(", "'\\n'", ")", ":", "print", "(", "'\\t'", "+", "package", ")", "if", "environment_data", "[", "'conda'", "]", ":", "print", "(", "'\\n'", "+", "'conda list:'", ")", "for", "package", "in", "environment_data", "[", "'conda'", "]", ".", "split", "(", "'\\n'", ")", ":", "print", "(", "'\\t'", "+", "package", ")", "if", "environment_data", "[", "'conda-env'", "]", ":", "print", "(", "'\\n'", "+", "'conda env:'", ")", "for", "package", "in", "environment_data", "[", "'conda-env'", "]", ".", "split", "(", "'\\n'", ")", ":", "print", "(", "'\\t'", "+", "package", ")" ]
https://github.com/jupyter/jupyter_core/blob/66e16cada28a15c1e0dd14e00f48c5bb7578db09/jupyter_core/troubleshoot.py#L46-L98
coursera/dataduct
83aea17c1b1abd376270bc8fd4a180ce09181cc5
examples/resources/scripts/s3_profiler.py
python
recurse_directory
(directory_path)
return result
Recursively walk directories and output basic stats on files Args: directory_path(str): Path to the directory which is read Returns: result(list of tuples): (filename, count of lines in file, size of file)
Recursively walk directories and output basic stats on files
[ "Recursively", "walk", "directories", "and", "output", "basic", "stats", "on", "files" ]
def recurse_directory(directory_path): """Recursively walk directories and output basic stats on files Args: directory_path(str): Path to the directory which is read Returns: result(list of tuples): (filename, count of lines in file, size of file) """ result = [] for root, _, files in walk(directory_path): for f in files: filename = join(root, f) result.append(( filename, run_command(['wc', '-l', filename]).split(' ').pop(0), str(stat(filename).st_size), )) return result
[ "def", "recurse_directory", "(", "directory_path", ")", ":", "result", "=", "[", "]", "for", "root", ",", "_", ",", "files", "in", "walk", "(", "directory_path", ")", ":", "for", "f", "in", "files", ":", "filename", "=", "join", "(", "root", ",", "f", ")", "result", ".", "append", "(", "(", "filename", ",", "run_command", "(", "[", "'wc'", ",", "'-l'", ",", "filename", "]", ")", ".", "split", "(", "' '", ")", ".", "pop", "(", "0", ")", ",", "str", "(", "stat", "(", "filename", ")", ".", "st_size", ")", ",", ")", ")", "return", "result" ]
https://github.com/coursera/dataduct/blob/83aea17c1b1abd376270bc8fd4a180ce09181cc5/examples/resources/scripts/s3_profiler.py#L26-L44
yt-project/yt
dc7b24f9b266703db4c843e329c6c8644d47b824
yt/frontends/gadget_fof/data_structures.py
python
GadgetFOFHaloParticleIndex._get_halo_values
(self, ptype, identifiers, fields, f=None)
return data
Get field values for halos. IDs are likely to be sequential (or at least monotonic), but not necessarily all within the same file. This does not do much to minimize file i/o, but with halos randomly distributed across files, there's not much more we can do.
Get field values for halos. IDs are likely to be sequential (or at least monotonic), but not necessarily all within the same file.
[ "Get", "field", "values", "for", "halos", ".", "IDs", "are", "likely", "to", "be", "sequential", "(", "or", "at", "least", "monotonic", ")", "but", "not", "necessarily", "all", "within", "the", "same", "file", "." ]
def _get_halo_values(self, ptype, identifiers, fields, f=None): """ Get field values for halos. IDs are likely to be sequential (or at least monotonic), but not necessarily all within the same file. This does not do much to minimize file i/o, but with halos randomly distributed across files, there's not much more we can do. """ # if a file is already open, don't open it again filename = None if f is None else f.filename data = defaultdict(lambda: np.empty(identifiers.size)) i_scalars = self._get_halo_file_indices(ptype, identifiers) for i_scalar in np.unique(i_scalars): target = i_scalars == i_scalar scalar_indices = identifiers - self._halo_index_start[ptype][i_scalar] # only open file if it's not already open my_f = ( f if self.data_files[i_scalar].filename == filename else h5py.File(self.data_files[i_scalar].filename, mode="r") ) for field in fields: data[field][target] = my_f[os.path.join(ptype, field)][()][ scalar_indices[target] ] if self.data_files[i_scalar].filename != filename: my_f.close() return data
[ "def", "_get_halo_values", "(", "self", ",", "ptype", ",", "identifiers", ",", "fields", ",", "f", "=", "None", ")", ":", "# if a file is already open, don't open it again", "filename", "=", "None", "if", "f", "is", "None", "else", "f", ".", "filename", "data", "=", "defaultdict", "(", "lambda", ":", "np", ".", "empty", "(", "identifiers", ".", "size", ")", ")", "i_scalars", "=", "self", ".", "_get_halo_file_indices", "(", "ptype", ",", "identifiers", ")", "for", "i_scalar", "in", "np", ".", "unique", "(", "i_scalars", ")", ":", "target", "=", "i_scalars", "==", "i_scalar", "scalar_indices", "=", "identifiers", "-", "self", ".", "_halo_index_start", "[", "ptype", "]", "[", "i_scalar", "]", "# only open file if it's not already open", "my_f", "=", "(", "f", "if", "self", ".", "data_files", "[", "i_scalar", "]", ".", "filename", "==", "filename", "else", "h5py", ".", "File", "(", "self", ".", "data_files", "[", "i_scalar", "]", ".", "filename", ",", "mode", "=", "\"r\"", ")", ")", "for", "field", "in", "fields", ":", "data", "[", "field", "]", "[", "target", "]", "=", "my_f", "[", "os", ".", "path", ".", "join", "(", "ptype", ",", "field", ")", "]", "[", "(", ")", "]", "[", "scalar_indices", "[", "target", "]", "]", "if", "self", ".", "data_files", "[", "i_scalar", "]", ".", "filename", "!=", "filename", ":", "my_f", ".", "close", "(", ")", "return", "data" ]
https://github.com/yt-project/yt/blob/dc7b24f9b266703db4c843e329c6c8644d47b824/yt/frontends/gadget_fof/data_structures.py#L386-L421
ganeti/ganeti
d340a9ddd12f501bef57da421b5f9b969a4ba905
lib/client/gnt_cluster.py
python
_ReadIntentToUpgrade
()
return (contents[0], contents[1])
Read the file documenting the intent to upgrade the cluster. @rtype: (string, string) or (None, None) @return: (old version, version to upgrade to), if the file exists, and (None, None) otherwise.
Read the file documenting the intent to upgrade the cluster.
[ "Read", "the", "file", "documenting", "the", "intent", "to", "upgrade", "the", "cluster", "." ]
def _ReadIntentToUpgrade(): """Read the file documenting the intent to upgrade the cluster. @rtype: (string, string) or (None, None) @return: (old version, version to upgrade to), if the file exists, and (None, None) otherwise. """ if not os.path.isfile(pathutils.INTENT_TO_UPGRADE): return (None, None) contentstring = utils.ReadFile(pathutils.INTENT_TO_UPGRADE) contents = utils.UnescapeAndSplit(contentstring) if len(contents) != 3: # file syntactically mal-formed return (None, None) return (contents[0], contents[1])
[ "def", "_ReadIntentToUpgrade", "(", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "pathutils", ".", "INTENT_TO_UPGRADE", ")", ":", "return", "(", "None", ",", "None", ")", "contentstring", "=", "utils", ".", "ReadFile", "(", "pathutils", ".", "INTENT_TO_UPGRADE", ")", "contents", "=", "utils", ".", "UnescapeAndSplit", "(", "contentstring", ")", "if", "len", "(", "contents", ")", "!=", "3", ":", "# file syntactically mal-formed", "return", "(", "None", ",", "None", ")", "return", "(", "contents", "[", "0", "]", ",", "contents", "[", "1", "]", ")" ]
https://github.com/ganeti/ganeti/blob/d340a9ddd12f501bef57da421b5f9b969a4ba905/lib/client/gnt_cluster.py#L2144-L2160
PacktPublishing/Mastering-OpenCV-4-with-Python
ea5372c6d8758ebc56ef5c775f9785d4427f81e6
Chapter08/01-chapter-content/contours_shape_recognition.py
python
array_to_tuple
(arr)
return tuple(arr.reshape(1, -1)[0])
Converts array to tuple
Converts array to tuple
[ "Converts", "array", "to", "tuple" ]
def array_to_tuple(arr): """Converts array to tuple""" return tuple(arr.reshape(1, -1)[0])
[ "def", "array_to_tuple", "(", "arr", ")", ":", "return", "tuple", "(", "arr", ".", "reshape", "(", "1", ",", "-", "1", ")", "[", "0", "]", ")" ]
https://github.com/PacktPublishing/Mastering-OpenCV-4-with-Python/blob/ea5372c6d8758ebc56ef5c775f9785d4427f81e6/Chapter08/01-chapter-content/contours_shape_recognition.py#L64-L67
exodrifter/unity-python
bef6e4e9ddfbbf1eaf7acbbb973e9aa3dd64a20d
Lib/mailbox.py
python
_create_temporary
(path)
return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()), socket.gethostname(), os.getpid()))
Create a temp file based on path and open for reading and writing.
Create a temp file based on path and open for reading and writing.
[ "Create", "a", "temp", "file", "based", "on", "path", "and", "open", "for", "reading", "and", "writing", "." ]
def _create_temporary(path): """Create a temp file based on path and open for reading and writing.""" return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()), socket.gethostname(), os.getpid()))
[ "def", "_create_temporary", "(", "path", ")", ":", "return", "_create_carefully", "(", "'%s.%s.%s.%s'", "%", "(", "path", ",", "int", "(", "time", ".", "time", "(", ")", ")", ",", "socket", ".", "gethostname", "(", ")", ",", "os", ".", "getpid", "(", ")", ")", ")" ]
https://github.com/exodrifter/unity-python/blob/bef6e4e9ddfbbf1eaf7acbbb973e9aa3dd64a20d/Lib/mailbox.py#L2031-L2035
selfboot/LeetCode
473c0c5451651140d75cbd143309c51cd8fe1cf1
String/67_AddBinary.py
python
Solution_2.addBinary
(self, a, b)
return result
Iteratively way.
Iteratively way.
[ "Iteratively", "way", "." ]
def addBinary(self, a, b): """Iteratively way. """ carry_in, index = '0', 0 result = "" while index < max(len(a), len(b)) or carry_in == '1': num_a = a[-1 - index] if index < len(a) else '0' num_b = b[-1 - index] if index < len(b) else '0' val = int(num_a) + int(num_b) + int(carry_in) result = str(val % 2) + result carry_in = '1' if val > 1 else '0' index += 1 return result
[ "def", "addBinary", "(", "self", ",", "a", ",", "b", ")", ":", "carry_in", ",", "index", "=", "'0'", ",", "0", "result", "=", "\"\"", "while", "index", "<", "max", "(", "len", "(", "a", ")", ",", "len", "(", "b", ")", ")", "or", "carry_in", "==", "'1'", ":", "num_a", "=", "a", "[", "-", "1", "-", "index", "]", "if", "index", "<", "len", "(", "a", ")", "else", "'0'", "num_b", "=", "b", "[", "-", "1", "-", "index", "]", "if", "index", "<", "len", "(", "b", ")", "else", "'0'", "val", "=", "int", "(", "num_a", ")", "+", "int", "(", "num_b", ")", "+", "int", "(", "carry_in", ")", "result", "=", "str", "(", "val", "%", "2", ")", "+", "result", "carry_in", "=", "'1'", "if", "val", ">", "1", "else", "'0'", "index", "+=", "1", "return", "result" ]
https://github.com/selfboot/LeetCode/blob/473c0c5451651140d75cbd143309c51cd8fe1cf1/String/67_AddBinary.py#L23-L37
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/site-packages/pandas-0.17.0-py3.3-win-amd64.egg/pandas/compat/chainmap_impl.py
python
ChainMap.parents
(self)
return self.__class__(*self.maps[1:])
New ChainMap from maps[1:].
New ChainMap from maps[1:].
[ "New", "ChainMap", "from", "maps", "[", "1", ":", "]", "." ]
def parents(self): # like Django's Context.pop() 'New ChainMap from maps[1:].' return self.__class__(*self.maps[1:])
[ "def", "parents", "(", "self", ")", ":", "# like Django's Context.pop()", "return", "self", ".", "__class__", "(", "*", "self", ".", "maps", "[", "1", ":", "]", ")" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/pandas-0.17.0-py3.3-win-amd64.egg/pandas/compat/chainmap_impl.py#L107-L109
mozilla/zamboni
14b1a44658e47b9f048962fa52dbf00a3beaaf30
mkt/reviewers/models.py
python
ReviewerScore.all_users_by_score
(cls)
return scores
Returns reviewers ordered by highest total points first.
Returns reviewers ordered by highest total points first.
[ "Returns", "reviewers", "ordered", "by", "highest", "total", "points", "first", "." ]
def all_users_by_score(cls): """ Returns reviewers ordered by highest total points first. """ query = cls._leaderboard_query() scores = [] for row in query: user_id, name, total = row user_level = len(mkt.REVIEWED_LEVELS) - 1 for i, level in enumerate(mkt.REVIEWED_LEVELS): if total < level['points']: user_level = i - 1 break # Only show level if it changes. if user_level < 0: level = '' else: level = mkt.REVIEWED_LEVELS[user_level]['name'] scores.append({ 'user_id': user_id, 'name': name, 'total': int(total), 'level': level, }) prev = None for score in reversed(scores): if score['level'] == prev: score['level'] = '' else: prev = score['level'] return scores
[ "def", "all_users_by_score", "(", "cls", ")", ":", "query", "=", "cls", ".", "_leaderboard_query", "(", ")", "scores", "=", "[", "]", "for", "row", "in", "query", ":", "user_id", ",", "name", ",", "total", "=", "row", "user_level", "=", "len", "(", "mkt", ".", "REVIEWED_LEVELS", ")", "-", "1", "for", "i", ",", "level", "in", "enumerate", "(", "mkt", ".", "REVIEWED_LEVELS", ")", ":", "if", "total", "<", "level", "[", "'points'", "]", ":", "user_level", "=", "i", "-", "1", "break", "# Only show level if it changes.", "if", "user_level", "<", "0", ":", "level", "=", "''", "else", ":", "level", "=", "mkt", ".", "REVIEWED_LEVELS", "[", "user_level", "]", "[", "'name'", "]", "scores", ".", "append", "(", "{", "'user_id'", ":", "user_id", ",", "'name'", ":", "name", ",", "'total'", ":", "int", "(", "total", ")", ",", "'level'", ":", "level", ",", "}", ")", "prev", "=", "None", "for", "score", "in", "reversed", "(", "scores", ")", ":", "if", "score", "[", "'level'", "]", "==", "prev", ":", "score", "[", "'level'", "]", "=", "''", "else", ":", "prev", "=", "score", "[", "'level'", "]", "return", "scores" ]
https://github.com/mozilla/zamboni/blob/14b1a44658e47b9f048962fa52dbf00a3beaaf30/mkt/reviewers/models.py#L327-L362
mihaip/readerisdead
0e35cf26e88f27e0a07432182757c1ce230f6936
third_party/web/wsgiserver/__init__.py
python
WSGIGateway_u0.get_environ
(self)
return env
Return a new environ dict targeting the given wsgi.version
Return a new environ dict targeting the given wsgi.version
[ "Return", "a", "new", "environ", "dict", "targeting", "the", "given", "wsgi", ".", "version" ]
def get_environ(self): """Return a new environ dict targeting the given wsgi.version""" req = self.req env_10 = WSGIGateway_10.get_environ(self) env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()]) env[u'wsgi.version'] = ('u', 0) # Request-URI env.setdefault(u'wsgi.url_encoding', u'utf-8') try: for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]: env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding']) except UnicodeDecodeError: # Fall back to latin 1 so apps can transcode if needed. env[u'wsgi.url_encoding'] = u'ISO-8859-1' for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]: env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding']) for k, v in sorted(env.items()): if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'): env[k] = v.decode('ISO-8859-1') return env
[ "def", "get_environ", "(", "self", ")", ":", "req", "=", "self", ".", "req", "env_10", "=", "WSGIGateway_10", ".", "get_environ", "(", "self", ")", "env", "=", "dict", "(", "[", "(", "k", ".", "decode", "(", "'ISO-8859-1'", ")", ",", "v", ")", "for", "k", ",", "v", "in", "env_10", ".", "iteritems", "(", ")", "]", ")", "env", "[", "u'wsgi.version'", "]", "=", "(", "'u'", ",", "0", ")", "# Request-URI", "env", ".", "setdefault", "(", "u'wsgi.url_encoding'", ",", "u'utf-8'", ")", "try", ":", "for", "key", "in", "[", "u\"PATH_INFO\"", ",", "u\"SCRIPT_NAME\"", ",", "u\"QUERY_STRING\"", "]", ":", "env", "[", "key", "]", "=", "env_10", "[", "str", "(", "key", ")", "]", ".", "decode", "(", "env", "[", "u'wsgi.url_encoding'", "]", ")", "except", "UnicodeDecodeError", ":", "# Fall back to latin 1 so apps can transcode if needed.", "env", "[", "u'wsgi.url_encoding'", "]", "=", "u'ISO-8859-1'", "for", "key", "in", "[", "u\"PATH_INFO\"", ",", "u\"SCRIPT_NAME\"", ",", "u\"QUERY_STRING\"", "]", ":", "env", "[", "key", "]", "=", "env_10", "[", "str", "(", "key", ")", "]", ".", "decode", "(", "env", "[", "u'wsgi.url_encoding'", "]", ")", "for", "k", ",", "v", "in", "sorted", "(", "env", ".", "items", "(", ")", ")", ":", "if", "isinstance", "(", "v", ",", "str", ")", "and", "k", "not", "in", "(", "'REQUEST_URI'", ",", "'wsgi.input'", ")", ":", "env", "[", "k", "]", "=", "v", ".", "decode", "(", "'ISO-8859-1'", ")", "return", "env" ]
https://github.com/mihaip/readerisdead/blob/0e35cf26e88f27e0a07432182757c1ce230f6936/third_party/web/wsgiserver/__init__.py#L2157-L2179
deepgram/kur
fd0c120e50815c1e5be64e5dde964dcd47234556
kur/__main__.py
python
evaluate
(args)
Evaluates a model.
Evaluates a model.
[ "Evaluates", "a", "model", "." ]
def evaluate(args): """ Evaluates a model. """ spec = parse_kurfile(args.kurfile, args.engine) func = spec.get_evaluation_function() func(step=args.step)
[ "def", "evaluate", "(", "args", ")", ":", "spec", "=", "parse_kurfile", "(", "args", ".", "kurfile", ",", "args", ".", "engine", ")", "func", "=", "spec", ".", "get_evaluation_function", "(", ")", "func", "(", "step", "=", "args", ".", "step", ")" ]
https://github.com/deepgram/kur/blob/fd0c120e50815c1e5be64e5dde964dcd47234556/kur/__main__.py#L76-L81
IronLanguages/ironpython3
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
Src/StdLib/Lib/calendar.py
python
LocaleHTMLCalendar.__init__
(self, firstweekday=0, locale=None)
[]
def __init__(self, firstweekday=0, locale=None): HTMLCalendar.__init__(self, firstweekday) if locale is None: locale = _locale.getdefaultlocale() self.locale = locale
[ "def", "__init__", "(", "self", ",", "firstweekday", "=", "0", ",", "locale", "=", "None", ")", ":", "HTMLCalendar", ".", "__init__", "(", "self", ",", "firstweekday", ")", "if", "locale", "is", "None", ":", "locale", "=", "_locale", ".", "getdefaultlocale", "(", ")", "self", ".", "locale", "=", "locale" ]
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/calendar.py#L538-L542
googleapis/python-ndb
e780c81cde1016651afbfcad8180d9912722cf1b
google/cloud/ndb/tasklets.py
python
tasklet
(wrapped)
return tasklet_wrapper
A decorator to turn a function or method into a tasklet. Calling a tasklet will return a :class:`~Future` instance which can be used to get the eventual return value of the tasklet. For more information on tasklets and cooperative multitasking, see the main documentation. Args: wrapped (Callable): The wrapped function.
A decorator to turn a function or method into a tasklet.
[ "A", "decorator", "to", "turn", "a", "function", "or", "method", "into", "a", "tasklet", "." ]
def tasklet(wrapped): """ A decorator to turn a function or method into a tasklet. Calling a tasklet will return a :class:`~Future` instance which can be used to get the eventual return value of the tasklet. For more information on tasklets and cooperative multitasking, see the main documentation. Args: wrapped (Callable): The wrapped function. """ @functools.wraps(wrapped) def tasklet_wrapper(*args, **kwargs): # Avoid Python 2.7 circular import from google.cloud.ndb import context as context_module # The normal case is that the wrapped function is a generator function # that returns a generator when called. We also support the case that # the user has wrapped a regular function with the tasklet decorator. # In this case, we fail to realize an actual tasklet, but we go ahead # and create a future object and set the result to the function's # return value so that from the user perspective there is no problem. # This permissive behavior is inherited from legacy NDB. context = context_module.get_context() try: returned = wrapped(*args, **kwargs) except Return as stop: # If wrapped is a regular function and the function uses "raise # Return(result)" pattern rather than just returning the result, # then we'll extract the result from the StopIteration exception. returned = _get_return_value(stop) if isinstance(returned, types.GeneratorType): # We have a tasklet, start it future = _TaskletFuture(returned, context, info=wrapped.__name__) future._advance_tasklet() else: # We don't have a tasklet, but we fake it anyway future = Future(info=wrapped.__name__) future.set_result(returned) return future return tasklet_wrapper
[ "def", "tasklet", "(", "wrapped", ")", ":", "@", "functools", ".", "wraps", "(", "wrapped", ")", "def", "tasklet_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Avoid Python 2.7 circular import", "from", "google", ".", "cloud", ".", "ndb", "import", "context", "as", "context_module", "# The normal case is that the wrapped function is a generator function", "# that returns a generator when called. We also support the case that", "# the user has wrapped a regular function with the tasklet decorator.", "# In this case, we fail to realize an actual tasklet, but we go ahead", "# and create a future object and set the result to the function's", "# return value so that from the user perspective there is no problem.", "# This permissive behavior is inherited from legacy NDB.", "context", "=", "context_module", ".", "get_context", "(", ")", "try", ":", "returned", "=", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Return", "as", "stop", ":", "# If wrapped is a regular function and the function uses \"raise", "# Return(result)\" pattern rather than just returning the result,", "# then we'll extract the result from the StopIteration exception.", "returned", "=", "_get_return_value", "(", "stop", ")", "if", "isinstance", "(", "returned", ",", "types", ".", "GeneratorType", ")", ":", "# We have a tasklet, start it", "future", "=", "_TaskletFuture", "(", "returned", ",", "context", ",", "info", "=", "wrapped", ".", "__name__", ")", "future", ".", "_advance_tasklet", "(", ")", "else", ":", "# We don't have a tasklet, but we fake it anyway", "future", "=", "Future", "(", "info", "=", "wrapped", ".", "__name__", ")", "future", ".", "set_result", "(", "returned", ")", "return", "future", "return", "tasklet_wrapper" ]
https://github.com/googleapis/python-ndb/blob/e780c81cde1016651afbfcad8180d9912722cf1b/google/cloud/ndb/tasklets.py#L478-L526
Yelp/paasta
6c08c04a577359509575c794b973ea84d72accf9
paasta_tools/contrib/graceful_container_drain.py
python
_timeout
(process)
Helper function for _run. It terminates the process. Doesn't raise OSError, if we try to terminate a non-existing process as there can be a very small window between poll() and kill()
Helper function for _run. It terminates the process. Doesn't raise OSError, if we try to terminate a non-existing process as there can be a very small window between poll() and kill()
[ "Helper", "function", "for", "_run", ".", "It", "terminates", "the", "process", ".", "Doesn", "t", "raise", "OSError", "if", "we", "try", "to", "terminate", "a", "non", "-", "existing", "process", "as", "there", "can", "be", "a", "very", "small", "window", "between", "poll", "()", "and", "kill", "()" ]
def _timeout(process): """Helper function for _run. It terminates the process. Doesn't raise OSError, if we try to terminate a non-existing process as there can be a very small window between poll() and kill() """ if process.poll() is None: try: # sending SIGKILL to the process process.kill() except OSError as e: # No such process error # The process could have been terminated meanwhile if e.errno != errno.ESRCH: raise
[ "def", "_timeout", "(", "process", ")", ":", "if", "process", ".", "poll", "(", ")", "is", "None", ":", "try", ":", "# sending SIGKILL to the process", "process", ".", "kill", "(", ")", "except", "OSError", "as", "e", ":", "# No such process error", "# The process could have been terminated meanwhile", "if", "e", ".", "errno", "!=", "errno", ".", "ESRCH", ":", "raise" ]
https://github.com/Yelp/paasta/blob/6c08c04a577359509575c794b973ea84d72accf9/paasta_tools/contrib/graceful_container_drain.py#L16-L29
SmallVagetable/machine_learning_python
a9cc8074adf91567407a5cb70a4c17b64f299fbc
decision_tree/tree_id3.py
python
DTreeID3.train
(self, datasets, node)
[]
def train(self, datasets, node): labely = datasets.columns[-1] # 判断样本是否为同一类输出Di,如果是则返回单节点树T。标记类别为Di if len(datasets[labely].value_counts()) == 1: node.data = datasets[labely] node.y = datasets[labely][0] return # 判断特征是否为空,如果是则返回单节点树T,标记类别为样本中输出类别D实例数最多的类别 if len(datasets.columns[:-1]) == 0: node.data = datasets[labely] node.y = datasets[labely].value_counts().index[0] return # 计算A中的各个特征(一共n个)对输出D的信息增益,选择信息增益最大的特征Ag。 gainmaxi, gainmax = self.info_gain_train(datasets, datasets.columns) # 如果Ag的信息增益小于阈值ε,则返回单节点树T,标记类别为样本中输出类别D实例数最多的类别。 if gainmax <= self.epsilon: node.data = datasets[labely] node.y = datasets[labely].value_counts().index[0] return # 按特征Ag的不同取值Agi将对应的样本输出D分成不同的类别Di。每个类别产生一个子节点。对应特征值为Agi。返回增加了节点的数T。 vc = datasets[datasets.columns[gainmaxi]].value_counts() for Di in vc.index: node.label = gainmaxi child = Node(Di) node.append(child) new_datasets = pd.DataFrame([list(i) for i in datasets.values if i[gainmaxi]==Di], columns=datasets.columns) self.train(new_datasets, child)
[ "def", "train", "(", "self", ",", "datasets", ",", "node", ")", ":", "labely", "=", "datasets", ".", "columns", "[", "-", "1", "]", "# 判断样本是否为同一类输出Di,如果是则返回单节点树T。标记类别为Di", "if", "len", "(", "datasets", "[", "labely", "]", ".", "value_counts", "(", ")", ")", "==", "1", ":", "node", ".", "data", "=", "datasets", "[", "labely", "]", "node", ".", "y", "=", "datasets", "[", "labely", "]", "[", "0", "]", "return", "# 判断特征是否为空,如果是则返回单节点树T,标记类别为样本中输出类别D实例数最多的类别", "if", "len", "(", "datasets", ".", "columns", "[", ":", "-", "1", "]", ")", "==", "0", ":", "node", ".", "data", "=", "datasets", "[", "labely", "]", "node", ".", "y", "=", "datasets", "[", "labely", "]", ".", "value_counts", "(", ")", ".", "index", "[", "0", "]", "return", "# 计算A中的各个特征(一共n个)对输出D的信息增益,选择信息增益最大的特征Ag。", "gainmaxi", ",", "gainmax", "=", "self", ".", "info_gain_train", "(", "datasets", ",", "datasets", ".", "columns", ")", "# 如果Ag的信息增益小于阈值ε,则返回单节点树T,标记类别为样本中输出类别D实例数最多的类别。", "if", "gainmax", "<=", "self", ".", "epsilon", ":", "node", ".", "data", "=", "datasets", "[", "labely", "]", "node", ".", "y", "=", "datasets", "[", "labely", "]", ".", "value_counts", "(", ")", ".", "index", "[", "0", "]", "return", "# 按特征Ag的不同取值Agi将对应的样本输出D分成不同的类别Di。每个类别产生一个子节点。对应特征值为Agi。返回增加了节点的数T。", "vc", "=", "datasets", "[", "datasets", ".", "columns", "[", "gainmaxi", "]", "]", ".", "value_counts", "(", ")", "for", "Di", "in", "vc", ".", "index", ":", "node", ".", "label", "=", "gainmaxi", "child", "=", "Node", "(", "Di", ")", "node", ".", "append", "(", "child", ")", "new_datasets", "=", "pd", ".", "DataFrame", "(", "[", "list", "(", "i", ")", "for", "i", "in", "datasets", ".", "values", "if", "i", "[", "gainmaxi", "]", "==", "Di", "]", ",", "columns", "=", "datasets", ".", "columns", ")", "self", ".", "train", "(", "new_datasets", ",", "child", ")" ]
https://github.com/SmallVagetable/machine_learning_python/blob/a9cc8074adf91567407a5cb70a4c17b64f299fbc/decision_tree/tree_id3.py#L79-L105
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pip/_vendor/html5lib/_trie/datrie.py
python
Trie.__init__
(self, data)
[]
def __init__(self, data): chars = set() for key in data.keys(): if not isinstance(key, text_type): raise TypeError("All keys must be strings") for char in key: chars.add(char) self._data = DATrie("".join(chars)) for key, value in data.items(): self._data[key] = value
[ "def", "__init__", "(", "self", ",", "data", ")", ":", "chars", "=", "set", "(", ")", "for", "key", "in", "data", ".", "keys", "(", ")", ":", "if", "not", "isinstance", "(", "key", ",", "text_type", ")", ":", "raise", "TypeError", "(", "\"All keys must be strings\"", ")", "for", "char", "in", "key", ":", "chars", ".", "add", "(", "char", ")", "self", ".", "_data", "=", "DATrie", "(", "\"\"", ".", "join", "(", "chars", ")", ")", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "self", ".", "_data", "[", "key", "]", "=", "value" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pip/_vendor/html5lib/_trie/datrie.py#L10-L20
rhinstaller/anaconda
63edc8680f1b05cbfe11bef28703acba808c5174
pyanaconda/core/configuration/storage_constraints.py
python
StorageConstraints._convert_device_types
(self, value)
return set(map(DeviceType.from_name, value.split()))
Convert the given value into a set of device types.
Convert the given value into a set of device types.
[ "Convert", "the", "given", "value", "into", "a", "set", "of", "device", "types", "." ]
def _convert_device_types(self, value): """Convert the given value into a set of device types.""" return set(map(DeviceType.from_name, value.split()))
[ "def", "_convert_device_types", "(", "self", ",", "value", ")", ":", "return", "set", "(", "map", "(", "DeviceType", ".", "from_name", ",", "value", ".", "split", "(", ")", ")", ")" ]
https://github.com/rhinstaller/anaconda/blob/63edc8680f1b05cbfe11bef28703acba808c5174/pyanaconda/core/configuration/storage_constraints.py#L110-L112
rougier/from-python-to-numpy
eb21651fc84d132414603e5a16d93f54ef45ec99
code/gpudata.py
python
GPUData.stride
(self)
Item stride in the base array.
Item stride in the base array.
[ "Item", "stride", "in", "the", "base", "array", "." ]
def stride(self): """ Item stride in the base array. """ if self.base is None: return self.ravel().strides[0] else: return self.base.ravel().strides[0]
[ "def", "stride", "(", "self", ")", ":", "if", "self", ".", "base", "is", "None", ":", "return", "self", ".", "ravel", "(", ")", ".", "strides", "[", "0", "]", "else", ":", "return", "self", ".", "base", ".", "ravel", "(", ")", ".", "strides", "[", "0", "]" ]
https://github.com/rougier/from-python-to-numpy/blob/eb21651fc84d132414603e5a16d93f54ef45ec99/code/gpudata.py#L65-L71
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/twisted/twisted/python/usage.py
python
Options.parseArgs
(self)
I am called with any leftover arguments which were not options. Override me to do something with the remaining arguments on the command line, those which were not flags or options. e.g. interpret them as a list of files to operate on. Note that if there more arguments on the command line than this method accepts, parseArgs will blow up with a getopt.error. This means if you don't override me, parseArgs will blow up if I am passed any arguments at all!
I am called with any leftover arguments which were not options.
[ "I", "am", "called", "with", "any", "leftover", "arguments", "which", "were", "not", "options", "." ]
def parseArgs(self): """ I am called with any leftover arguments which were not options. Override me to do something with the remaining arguments on the command line, those which were not flags or options. e.g. interpret them as a list of files to operate on. Note that if there more arguments on the command line than this method accepts, parseArgs will blow up with a getopt.error. This means if you don't override me, parseArgs will blow up if I am passed any arguments at all! """
[ "def", "parseArgs", "(", "self", ")", ":" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twisted/twisted/python/usage.py#L252-L265
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/models/v1beta1_custom_resource_definition.py
python
V1beta1CustomResourceDefinition.spec
(self, spec)
Sets the spec of this V1beta1CustomResourceDefinition. :param spec: The spec of this V1beta1CustomResourceDefinition. # noqa: E501 :type: V1beta1CustomResourceDefinitionSpec
Sets the spec of this V1beta1CustomResourceDefinition.
[ "Sets", "the", "spec", "of", "this", "V1beta1CustomResourceDefinition", "." ]
def spec(self, spec): """Sets the spec of this V1beta1CustomResourceDefinition. :param spec: The spec of this V1beta1CustomResourceDefinition. # noqa: E501 :type: V1beta1CustomResourceDefinitionSpec """ if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501 raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501 self._spec = spec
[ "def", "spec", "(", "self", ",", "spec", ")", ":", "if", "self", ".", "local_vars_configuration", ".", "client_side_validation", "and", "spec", "is", "None", ":", "# noqa: E501", "raise", "ValueError", "(", "\"Invalid value for `spec`, must not be `None`\"", ")", "# noqa: E501", "self", ".", "_spec", "=", "spec" ]
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1beta1_custom_resource_definition.py#L152-L162
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
ansible/roles/lib_oa_openshift/library/oc_label.py
python
OpenShiftCLI._evacuate
(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
perform oadm manage-node evacuate
perform oadm manage-node evacuate
[ "perform", "oadm", "manage", "-", "node", "evacuate" ]
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): ''' perform oadm manage-node evacuate ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') cmd.append('--evacuate') return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
[ "def", "_evacuate", "(", "self", ",", "node", "=", "None", ",", "selector", "=", "None", ",", "pod_selector", "=", "None", ",", "dry_run", "=", "False", ",", "grace_period", "=", "None", ",", "force", "=", "False", ")", ":", "cmd", "=", "[", "'manage-node'", "]", "if", "node", ":", "cmd", ".", "extend", "(", "node", ")", "else", ":", "cmd", ".", "append", "(", "'--selector={}'", ".", "format", "(", "selector", ")", ")", "if", "dry_run", ":", "cmd", ".", "append", "(", "'--dry-run'", ")", "if", "pod_selector", ":", "cmd", ".", "append", "(", "'--pod-selector={}'", ".", "format", "(", "pod_selector", ")", ")", "if", "grace_period", ":", "cmd", ".", "append", "(", "'--grace-period={}'", ".", "format", "(", "int", "(", "grace_period", ")", ")", ")", "if", "force", ":", "cmd", ".", "append", "(", "'--force'", ")", "cmd", ".", "append", "(", "'--evacuate'", ")", "return", "self", ".", "openshift_cmd", "(", "cmd", ",", "oadm", "=", "True", ",", "output", "=", "True", ",", "output_type", "=", "'raw'", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_oa_openshift/library/oc_label.py#L1071-L1093
chribsen/simple-machine-learning-examples
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
venv/lib/python2.7/site-packages/pandas/core/reshape.py
python
make_axis_dummies
(frame, axis='minor', transform=None)
return DataFrame(values, columns=items, index=frame.index)
Construct 1-0 dummy variables corresponding to designated axis labels Parameters ---------- frame : DataFrame axis : {'major', 'minor'}, default 'minor' transform : function, default None Function to apply to axis labels first. For example, to get "day of week" dummies in a time series regression you might call:: make_axis_dummies(panel, axis='major', transform=lambda d: d.weekday()) Returns ------- dummies : DataFrame Column names taken from chosen axis
Construct 1-0 dummy variables corresponding to designated axis labels
[ "Construct", "1", "-", "0", "dummy", "variables", "corresponding", "to", "designated", "axis", "labels" ]
def make_axis_dummies(frame, axis='minor', transform=None): """ Construct 1-0 dummy variables corresponding to designated axis labels Parameters ---------- frame : DataFrame axis : {'major', 'minor'}, default 'minor' transform : function, default None Function to apply to axis labels first. For example, to get "day of week" dummies in a time series regression you might call:: make_axis_dummies(panel, axis='major', transform=lambda d: d.weekday()) Returns ------- dummies : DataFrame Column names taken from chosen axis """ numbers = {'major': 0, 'minor': 1} num = numbers.get(axis, axis) items = frame.index.levels[num] labels = frame.index.labels[num] if transform is not None: mapped_items = items.map(transform) labels, items = _factorize_from_iterable(mapped_items.take(labels)) values = np.eye(len(items), dtype=float) values = values.take(labels, axis=0) return DataFrame(values, columns=items, index=frame.index)
[ "def", "make_axis_dummies", "(", "frame", ",", "axis", "=", "'minor'", ",", "transform", "=", "None", ")", ":", "numbers", "=", "{", "'major'", ":", "0", ",", "'minor'", ":", "1", "}", "num", "=", "numbers", ".", "get", "(", "axis", ",", "axis", ")", "items", "=", "frame", ".", "index", ".", "levels", "[", "num", "]", "labels", "=", "frame", ".", "index", ".", "labels", "[", "num", "]", "if", "transform", "is", "not", "None", ":", "mapped_items", "=", "items", ".", "map", "(", "transform", ")", "labels", ",", "items", "=", "_factorize_from_iterable", "(", "mapped_items", ".", "take", "(", "labels", ")", ")", "values", "=", "np", ".", "eye", "(", "len", "(", "items", ")", ",", "dtype", "=", "float", ")", "values", "=", "values", ".", "take", "(", "labels", ",", "axis", "=", "0", ")", "return", "DataFrame", "(", "values", ",", "columns", "=", "items", ",", "index", "=", "frame", ".", "index", ")" ]
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/pandas/core/reshape.py#L1185-L1218
Esri/ArcREST
ab240fde2b0200f61d4a5f6df033516e53f2f416
src/arcrest/manageorg/_content.py
python
UserItem.languages
(self)
return self._languages
gets the property value for languages
gets the property value for languages
[ "gets", "the", "property", "value", "for", "languages" ]
def languages(self): '''gets the property value for languages''' if self._languages is None: self.__init() return self._languages
[ "def", "languages", "(", "self", ")", ":", "if", "self", ".", "_languages", "is", "None", ":", "self", ".", "__init", "(", ")", "return", "self", ".", "_languages" ]
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_content.py#L1279-L1283
google/clusterfuzz
f358af24f414daa17a3649b143e71ea71871ef59
src/clusterfuzz/_internal/datastore/data_types.py
python
Testcase.get_metadata
(self, key=None, default=None)
Get metadata for a test case. Slow on first access.
Get metadata for a test case. Slow on first access.
[ "Get", "metadata", "for", "a", "test", "case", ".", "Slow", "on", "first", "access", "." ]
def get_metadata(self, key=None, default=None): """Get metadata for a test case. Slow on first access.""" self._ensure_metadata_is_cached() # If no key is specified, return all metadata. if not key: return self.metadata_cache try: return self.metadata_cache[key] except KeyError: return default
[ "def", "get_metadata", "(", "self", ",", "key", "=", "None", ",", "default", "=", "None", ")", ":", "self", ".", "_ensure_metadata_is_cached", "(", ")", "# If no key is specified, return all metadata.", "if", "not", "key", ":", "return", "self", ".", "metadata_cache", "try", ":", "return", "self", ".", "metadata_cache", "[", "key", "]", "except", "KeyError", ":", "return", "default" ]
https://github.com/google/clusterfuzz/blob/f358af24f414daa17a3649b143e71ea71871ef59/src/clusterfuzz/_internal/datastore/data_types.py#L667-L678
relekang/python-semantic-release
09af5f11a6134c8711b59a5bcd57c917c0c91b5e
semantic_release/vcs_helpers.py
python
get_current_head_hash
()
return repo.head.commit.name_rev.split(" ")[0]
Get the commit hash of the current HEAD. :return: The commit hash.
Get the commit hash of the current HEAD.
[ "Get", "the", "commit", "hash", "of", "the", "current", "HEAD", "." ]
def get_current_head_hash() -> str: """ Get the commit hash of the current HEAD. :return: The commit hash. """ return repo.head.commit.name_rev.split(" ")[0]
[ "def", "get_current_head_hash", "(", ")", "->", "str", ":", "return", "repo", ".", "head", ".", "commit", ".", "name_rev", ".", "split", "(", "\" \"", ")", "[", "0", "]" ]
https://github.com/relekang/python-semantic-release/blob/09af5f11a6134c8711b59a5bcd57c917c0c91b5e/semantic_release/vcs_helpers.py#L131-L137
jeffknupp/sandman
253ea4d15cbccd9f0016d66fedd7478614cc0b2f
sandman/sandman.py
python
_single_attribute_html_response
(resource, name, value)
return make_response(render_template( 'attribute.html', resource=resource, name=name, value=value))
Return the json representation of a single attribute of a resource. :param :class:`sandman.model.Model` resource: resource for attribute :param string name: name of the attribute :param string value: string value of the attribute :rtype: :class:`flask.Response`
Return the json representation of a single attribute of a resource.
[ "Return", "the", "json", "representation", "of", "a", "single", "attribute", "of", "a", "resource", "." ]
def _single_attribute_html_response(resource, name, value): """Return the json representation of a single attribute of a resource. :param :class:`sandman.model.Model` resource: resource for attribute :param string name: name of the attribute :param string value: string value of the attribute :rtype: :class:`flask.Response` """ return make_response(render_template( 'attribute.html', resource=resource, name=name, value=value))
[ "def", "_single_attribute_html_response", "(", "resource", ",", "name", ",", "value", ")", ":", "return", "make_response", "(", "render_template", "(", "'attribute.html'", ",", "resource", "=", "resource", ",", "name", "=", "name", ",", "value", "=", "value", ")", ")" ]
https://github.com/jeffknupp/sandman/blob/253ea4d15cbccd9f0016d66fedd7478614cc0b2f/sandman/sandman.py#L106-L118
yaohungt/Gated-Spatio-Temporal-Energy-Graph
bc8f44b3d95cbfe3032bb3612daa07b4d9cd4298
models/layers/AsyncTFCriterion.py
python
axb
(a, x, b)
return (a * xb.squeeze()).sum(1)
[]
def axb(a, x, b): # a and b are batched vectors, X is batched matrix # returns a^t * X * b xb = torch.bmm(x, b[:, :, None]) return (a * xb.squeeze()).sum(1)
[ "def", "axb", "(", "a", ",", "x", ",", "b", ")", ":", "# a and b are batched vectors, X is batched matrix", "# returns a^t * X * b", "xb", "=", "torch", ".", "bmm", "(", "x", ",", "b", "[", ":", ",", ":", ",", "None", "]", ")", "return", "(", "a", "*", "xb", ".", "squeeze", "(", ")", ")", ".", "sum", "(", "1", ")" ]
https://github.com/yaohungt/Gated-Spatio-Temporal-Energy-Graph/blob/bc8f44b3d95cbfe3032bb3612daa07b4d9cd4298/models/layers/AsyncTFCriterion.py#L31-L35
wistbean/fxxkpython
88e16d79d8dd37236ba6ecd0d0ff11d63143968c
vip/qyxuan/projects/Snake/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/pkg_resources/__init__.py
python
Environment.__iter__
(self)
Yield the unique project names of the available distributions
Yield the unique project names of the available distributions
[ "Yield", "the", "unique", "project", "names", "of", "the", "available", "distributions" ]
def __iter__(self): """Yield the unique project names of the available distributions""" for key in self._distmap.keys(): if self[key]: yield key
[ "def", "__iter__", "(", "self", ")", ":", "for", "key", "in", "self", ".", "_distmap", ".", "keys", "(", ")", ":", "if", "self", "[", "key", "]", ":", "yield", "key" ]
https://github.com/wistbean/fxxkpython/blob/88e16d79d8dd37236ba6ecd0d0ff11d63143968c/vip/qyxuan/projects/Snake/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/pkg_resources/__init__.py#L1077-L1081
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/lib2to3/fixes/fix_imports.py
python
alternates
(members)
return "(" + "|".join(map(repr, members)) + ")"
[]
def alternates(members): return "(" + "|".join(map(repr, members)) + ")"
[ "def", "alternates", "(", "members", ")", ":", "return", "\"(\"", "+", "\"|\"", ".", "join", "(", "map", "(", "repr", ",", "members", ")", ")", "+", "\")\"" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/lib2to3/fixes/fix_imports.py#L61-L62
kamalgill/flask-appengine-template
11760f83faccbb0d0afe416fc58e67ecfb4643c2
src/lib/flask_cache/__init__.py
python
Cache._memoize_kwargs_to_args
(self, f, *args, **kwargs)
return tuple(new_args), {}
[]
def _memoize_kwargs_to_args(self, f, *args, **kwargs): #: Inspect the arguments to the function #: This allows the memoization to be the same #: whether the function was called with #: 1, b=2 is equivilant to a=1, b=2, etc. new_args = [] arg_num = 0 argspec = inspect.getargspec(f) args_len = len(argspec.args) for i in range(args_len): if i == 0 and argspec.args[i] in ('self', 'cls'): #: use the repr of the class instance #: this supports instance methods for #: the memoized functions, giving more #: flexibility to developers arg = repr(args[0]) arg_num += 1 elif argspec.args[i] in kwargs: arg = kwargs[argspec.args[i]] elif arg_num < len(args): arg = args[arg_num] arg_num += 1 elif abs(i-args_len) <= len(argspec.defaults): arg = argspec.defaults[i-args_len] arg_num += 1 else: arg = None arg_num += 1 #: Attempt to convert all arguments to a #: hash/id or a representation? #: Not sure if this is necessary, since #: using objects as keys gets tricky quickly. # if hasattr(arg, '__class__'): # try: # arg = hash(arg) # except: # arg = repr(arg) #: Or what about a special __cacherepr__ function #: on an object, this allows objects to act normal #: upon inspection, yet they can define a representation #: that can be used to make the object unique in the #: cache key. Given that a case comes across that #: an object "must" be used as a cache key # if hasattr(arg, '__cacherepr__'): # arg = arg.__cacherepr__ new_args.append(arg) return tuple(new_args), {}
[ "def", "_memoize_kwargs_to_args", "(", "self", ",", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "#: Inspect the arguments to the function", "#: This allows the memoization to be the same", "#: whether the function was called with", "#: 1, b=2 is equivilant to a=1, b=2, etc.", "new_args", "=", "[", "]", "arg_num", "=", "0", "argspec", "=", "inspect", ".", "getargspec", "(", "f", ")", "args_len", "=", "len", "(", "argspec", ".", "args", ")", "for", "i", "in", "range", "(", "args_len", ")", ":", "if", "i", "==", "0", "and", "argspec", ".", "args", "[", "i", "]", "in", "(", "'self'", ",", "'cls'", ")", ":", "#: use the repr of the class instance", "#: this supports instance methods for", "#: the memoized functions, giving more", "#: flexibility to developers", "arg", "=", "repr", "(", "args", "[", "0", "]", ")", "arg_num", "+=", "1", "elif", "argspec", ".", "args", "[", "i", "]", "in", "kwargs", ":", "arg", "=", "kwargs", "[", "argspec", ".", "args", "[", "i", "]", "]", "elif", "arg_num", "<", "len", "(", "args", ")", ":", "arg", "=", "args", "[", "arg_num", "]", "arg_num", "+=", "1", "elif", "abs", "(", "i", "-", "args_len", ")", "<=", "len", "(", "argspec", ".", "defaults", ")", ":", "arg", "=", "argspec", ".", "defaults", "[", "i", "-", "args_len", "]", "arg_num", "+=", "1", "else", ":", "arg", "=", "None", "arg_num", "+=", "1", "#: Attempt to convert all arguments to a", "#: hash/id or a representation?", "#: Not sure if this is necessary, since", "#: using objects as keys gets tricky quickly.", "# if hasattr(arg, '__class__'):", "# try:", "# arg = hash(arg)", "# except:", "# arg = repr(arg)", "#: Or what about a special __cacherepr__ function", "#: on an object, this allows objects to act normal", "#: upon inspection, yet they can define a representation", "#: that can be used to make the object unique in the", "#: cache key. Given that a case comes across that", "#: an object \"must\" be used as a cache key", "# if hasattr(arg, '__cacherepr__'):", "# arg = arg.__cacherepr__", "new_args", ".", "append", "(", "arg", ")", "return", "tuple", "(", "new_args", ")", ",", "{", "}" ]
https://github.com/kamalgill/flask-appengine-template/blob/11760f83faccbb0d0afe416fc58e67ecfb4643c2/src/lib/flask_cache/__init__.py#L411-L462
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/tornado-6.0.1-py3.7-macosx-12.1-iPad6,7.egg/tornado/locks.py
python
Event.set
(self)
Set the internal flag to ``True``. All waiters are awakened. Calling `.wait` once the flag is set will not block.
Set the internal flag to ``True``. All waiters are awakened.
[ "Set", "the", "internal", "flag", "to", "True", ".", "All", "waiters", "are", "awakened", "." ]
def set(self) -> None: """Set the internal flag to ``True``. All waiters are awakened. Calling `.wait` once the flag is set will not block. """ if not self._value: self._value = True for fut in self._waiters: if not fut.done(): fut.set_result(None)
[ "def", "set", "(", "self", ")", "->", "None", ":", "if", "not", "self", ".", "_value", ":", "self", ".", "_value", "=", "True", "for", "fut", "in", "self", ".", "_waiters", ":", "if", "not", "fut", ".", "done", "(", ")", ":", "fut", ".", "set_result", "(", "None", ")" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/tornado-6.0.1-py3.7-macosx-12.1-iPad6,7.egg/tornado/locks.py#L215-L225
descarteslabs/descarteslabs-python
ace8a1a89d58b75df1bcaa613a4b3544d7bdc4be
descarteslabs/tables/client.py
python
Tables._normalize_features
(self, obj)
:param object obj: Python object representing GeoJSON-like features. This can be an object with __geo_interface__ method (e.g. GeoDataFrame), a GeoJSON-like FeatureCollection mapping, or a single GeoJSON-like Feature mapping, or an iterable of GeoJSON-like Feature mappings :return: Iterable of GeoJSON-like Feature mappings
:param object obj: Python object representing GeoJSON-like features. This can be an object with __geo_interface__ method (e.g. GeoDataFrame), a GeoJSON-like FeatureCollection mapping, or a single GeoJSON-like Feature mapping, or an iterable of GeoJSON-like Feature mappings
[ ":", "param", "object", "obj", ":", "Python", "object", "representing", "GeoJSON", "-", "like", "features", ".", "This", "can", "be", "an", "object", "with", "__geo_interface__", "method", "(", "e", ".", "g", ".", "GeoDataFrame", ")", "a", "GeoJSON", "-", "like", "FeatureCollection", "mapping", "or", "a", "single", "GeoJSON", "-", "like", "Feature", "mapping", "or", "an", "iterable", "of", "GeoJSON", "-", "like", "Feature", "mappings" ]
def _normalize_features(self, obj): """ :param object obj: Python object representing GeoJSON-like features. This can be an object with __geo_interface__ method (e.g. GeoDataFrame), a GeoJSON-like FeatureCollection mapping, or a single GeoJSON-like Feature mapping, or an iterable of GeoJSON-like Feature mappings :return: Iterable of GeoJSON-like Feature mappings """ if hasattr(obj, "__geo_interface__"): features = obj.__geo_interface__["features"] elif isinstance(obj, pd.DataFrame): features = ( {"type": "Feature", "properties": r, "geometry": None} for r in obj.to_dict("records") ) elif "features" in obj: features = obj["features"] elif "properties" in obj and "geometry" in obj: features = [obj] elif isinstance(obj, Iterable): # TODO we have to trust that the contents are GeoJSON-like features # to avoid consuming any stateful iterators features = obj else: raise BadRequestError("Could not find any GeoJSON-like features") yield from features
[ "def", "_normalize_features", "(", "self", ",", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "\"__geo_interface__\"", ")", ":", "features", "=", "obj", ".", "__geo_interface__", "[", "\"features\"", "]", "elif", "isinstance", "(", "obj", ",", "pd", ".", "DataFrame", ")", ":", "features", "=", "(", "{", "\"type\"", ":", "\"Feature\"", ",", "\"properties\"", ":", "r", ",", "\"geometry\"", ":", "None", "}", "for", "r", "in", "obj", ".", "to_dict", "(", "\"records\"", ")", ")", "elif", "\"features\"", "in", "obj", ":", "features", "=", "obj", "[", "\"features\"", "]", "elif", "\"properties\"", "in", "obj", "and", "\"geometry\"", "in", "obj", ":", "features", "=", "[", "obj", "]", "elif", "isinstance", "(", "obj", ",", "Iterable", ")", ":", "# TODO we have to trust that the contents are GeoJSON-like features", "# to avoid consuming any stateful iterators", "features", "=", "obj", "else", ":", "raise", "BadRequestError", "(", "\"Could not find any GeoJSON-like features\"", ")", "yield", "from", "features" ]
https://github.com/descarteslabs/descarteslabs-python/blob/ace8a1a89d58b75df1bcaa613a4b3544d7bdc4be/descarteslabs/tables/client.py#L378-L406
erikrose/parsimonious
3da7e804c07d4e495873be208701b5c955247c58
parsimonious/grammar.py
python
RuleVisitor.visit_sequence
(self, node, sequence)
return Sequence(term, *other_terms)
A parsed Sequence looks like [term node, OneOrMore node of ``another_term``s]. Flatten it out.
A parsed Sequence looks like [term node, OneOrMore node of ``another_term``s]. Flatten it out.
[ "A", "parsed", "Sequence", "looks", "like", "[", "term", "node", "OneOrMore", "node", "of", "another_term", "s", "]", ".", "Flatten", "it", "out", "." ]
def visit_sequence(self, node, sequence): """A parsed Sequence looks like [term node, OneOrMore node of ``another_term``s]. Flatten it out.""" term, other_terms = sequence return Sequence(term, *other_terms)
[ "def", "visit_sequence", "(", "self", ",", "node", ",", "sequence", ")", ":", "term", ",", "other_terms", "=", "sequence", "return", "Sequence", "(", "term", ",", "*", "other_terms", ")" ]
https://github.com/erikrose/parsimonious/blob/3da7e804c07d4e495873be208701b5c955247c58/parsimonious/grammar.py#L325-L329
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/lib/django-0.96/django/utils/simplejson/decoder.py
python
JSONDecoder.__init__
(self, encoding=None, object_hook=None)
``encoding`` determines the encoding used to interpret any ``str`` objects decoded by this instance (utf-8 by default). It has no effect when decoding ``unicode`` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as ``unicode``. ``object_hook``, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given ``dict``. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting).
``encoding`` determines the encoding used to interpret any ``str`` objects decoded by this instance (utf-8 by default). It has no effect when decoding ``unicode`` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as ``unicode``.
[ "encoding", "determines", "the", "encoding", "used", "to", "interpret", "any", "str", "objects", "decoded", "by", "this", "instance", "(", "utf", "-", "8", "by", "default", ")", ".", "It", "has", "no", "effect", "when", "decoding", "unicode", "objects", ".", "Note", "that", "currently", "only", "encodings", "that", "are", "a", "superset", "of", "ASCII", "work", "strings", "of", "other", "encodings", "should", "be", "passed", "in", "as", "unicode", "." ]
def __init__(self, encoding=None, object_hook=None): """ ``encoding`` determines the encoding used to interpret any ``str`` objects decoded by this instance (utf-8 by default). It has no effect when decoding ``unicode`` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as ``unicode``. ``object_hook``, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given ``dict``. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting). """ self.encoding = encoding self.object_hook = object_hook
[ "def", "__init__", "(", "self", ",", "encoding", "=", "None", ",", "object_hook", "=", "None", ")", ":", "self", ".", "encoding", "=", "encoding", "self", ".", "object_hook", "=", "object_hook" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/django-0.96/django/utils/simplejson/decoder.py#L229-L244
hubblestack/hubble
763142474edcecdec5fd25591dc29c3536e8f969
hubblestack/audit/misc.py
python
_check_password_fields_not_empty
(block_id, block_dict, extra_args)
return True if result == "" else result
Ensure password fields are not empty
Ensure password fields are not empty
[ "Ensure", "password", "fields", "are", "not", "empty" ]
def _check_password_fields_not_empty(block_id, block_dict, extra_args): """ Ensure password fields are not empty """ result = _execute_shell_command( 'cat /etc/shadow | awk -F: \'($2 == "" ) { print $1 " does not have a password "}\'', python_shell=True ) return True if result == "" else result
[ "def", "_check_password_fields_not_empty", "(", "block_id", ",", "block_dict", ",", "extra_args", ")", ":", "result", "=", "_execute_shell_command", "(", "'cat /etc/shadow | awk -F: \\'($2 == \"\" ) { print $1 \" does not have a password \"}\\''", ",", "python_shell", "=", "True", ")", "return", "True", "if", "result", "==", "\"\"", "else", "result" ]
https://github.com/hubblestack/hubble/blob/763142474edcecdec5fd25591dc29c3536e8f969/hubblestack/audit/misc.py#L447-L454
CenterForOpenScience/osf.io
cc02691be017e61e2cd64f19b848b2f4c18dcc84
api/base/requests.py
python
EmbeddedRequest.method
(self)
return 'GET'
Overrides method to be 'GET'
Overrides method to be 'GET'
[ "Overrides", "method", "to", "be", "GET" ]
def method(self): """ Overrides method to be 'GET' """ return 'GET'
[ "def", "method", "(", "self", ")", ":", "return", "'GET'" ]
https://github.com/CenterForOpenScience/osf.io/blob/cc02691be017e61e2cd64f19b848b2f4c18dcc84/api/base/requests.py#L28-L32
Crypto-toolbox/bitex
56d46ea3db6de5219a72dad9b052fbabc921232f
bitex/api/WSS/bitfinex.py
python
BitfinexWSS.process
(self)
Processes the Client queue, and passes the data to the respective methods. :return:
Processes the Client queue, and passes the data to the respective methods. :return:
[ "Processes", "the", "Client", "queue", "and", "passes", "the", "data", "to", "the", "respective", "methods", ".", ":", "return", ":" ]
def process(self): """ Processes the Client queue, and passes the data to the respective methods. :return: """ while self.running: if self._processor_lock.acquire(blocking=False): if self.ping_timer: try: self._check_ping() except TimeoutError: log.exception("BitfinexWSS.ping(): TimedOut! (%ss)" % self.ping_timer) except (WebSocketConnectionClosedException, ConnectionResetError): log.exception("BitfinexWSS.ping(): Connection Error!") self.conn = None if not self.conn: # The connection was killed - initiate restart self._controller_q.put('restart') skip_processing = False try: ts, data = self.receiver_q.get(timeout=0.1) except queue.Empty: skip_processing = True ts = time.time() data = None if not skip_processing: log.debug("Processing Data: %s", data) if isinstance(data, list): self.handle_data(ts, data) else: # Not a list, hence it could be a response try: self.handle_response(ts, data) except UnknownEventError: # We don't know what event this is- Raise an # error & log data! log.exception("main() - UnknownEventError: %s", data) log.info("main() - Shutting Down due to " "Unknown Error!") self._controller_q.put('stop') except ConnectionResetError: log.info("processor Thread: Connection Was reset, " "initiating restart") self._controller_q.put('restart') self._check_heartbeats(ts) self._processor_lock.release() else: time.sleep(0.5)
[ "def", "process", "(", "self", ")", ":", "while", "self", ".", "running", ":", "if", "self", ".", "_processor_lock", ".", "acquire", "(", "blocking", "=", "False", ")", ":", "if", "self", ".", "ping_timer", ":", "try", ":", "self", ".", "_check_ping", "(", ")", "except", "TimeoutError", ":", "log", ".", "exception", "(", "\"BitfinexWSS.ping(): TimedOut! (%ss)\"", "%", "self", ".", "ping_timer", ")", "except", "(", "WebSocketConnectionClosedException", ",", "ConnectionResetError", ")", ":", "log", ".", "exception", "(", "\"BitfinexWSS.ping(): Connection Error!\"", ")", "self", ".", "conn", "=", "None", "if", "not", "self", ".", "conn", ":", "# The connection was killed - initiate restart", "self", ".", "_controller_q", ".", "put", "(", "'restart'", ")", "skip_processing", "=", "False", "try", ":", "ts", ",", "data", "=", "self", ".", "receiver_q", ".", "get", "(", "timeout", "=", "0.1", ")", "except", "queue", ".", "Empty", ":", "skip_processing", "=", "True", "ts", "=", "time", ".", "time", "(", ")", "data", "=", "None", "if", "not", "skip_processing", ":", "log", ".", "debug", "(", "\"Processing Data: %s\"", ",", "data", ")", "if", "isinstance", "(", "data", ",", "list", ")", ":", "self", ".", "handle_data", "(", "ts", ",", "data", ")", "else", ":", "# Not a list, hence it could be a response", "try", ":", "self", ".", "handle_response", "(", "ts", ",", "data", ")", "except", "UnknownEventError", ":", "# We don't know what event this is- Raise an", "# error & log data!", "log", ".", "exception", "(", "\"main() - UnknownEventError: %s\"", ",", "data", ")", "log", ".", "info", "(", "\"main() - Shutting Down due to \"", "\"Unknown Error!\"", ")", "self", ".", "_controller_q", ".", "put", "(", "'stop'", ")", "except", "ConnectionResetError", ":", "log", ".", "info", "(", "\"processor Thread: Connection Was reset, \"", "\"initiating restart\"", ")", "self", ".", "_controller_q", ".", "put", "(", "'restart'", ")", "self", ".", "_check_heartbeats", "(", "ts", ")", "self", ".", "_processor_lock", ".", "release", "(", ")", "else", ":", "time", ".", "sleep", "(", "0.5", ")" ]
https://github.com/Crypto-toolbox/bitex/blob/56d46ea3db6de5219a72dad9b052fbabc921232f/bitex/api/WSS/bitfinex.py#L307-L364
securityclippy/elasticintel
aa08d3e9f5ab1c000128e95161139ce97ff0e334
ingest_feed_lambda/numpy/core/fromnumeric.py
python
sometrue
(a, axis=None, out=None, keepdims=np._NoValue)
return arr.any(axis=axis, out=out, **kwargs)
Check whether some values are true. Refer to `any` for full documentation. See Also -------- any : equivalent function
Check whether some values are true.
[ "Check", "whether", "some", "values", "are", "true", "." ]
def sometrue(a, axis=None, out=None, keepdims=np._NoValue): """ Check whether some values are true. Refer to `any` for full documentation. See Also -------- any : equivalent function """ arr = asanyarray(a) kwargs = {} if keepdims is not np._NoValue: kwargs['keepdims'] = keepdims return arr.any(axis=axis, out=out, **kwargs)
[ "def", "sometrue", "(", "a", ",", "axis", "=", "None", ",", "out", "=", "None", ",", "keepdims", "=", "np", ".", "_NoValue", ")", ":", "arr", "=", "asanyarray", "(", "a", ")", "kwargs", "=", "{", "}", "if", "keepdims", "is", "not", "np", ".", "_NoValue", ":", "kwargs", "[", "'keepdims'", "]", "=", "keepdims", "return", "arr", ".", "any", "(", "axis", "=", "axis", ",", "out", "=", "out", ",", "*", "*", "kwargs", ")" ]
https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/ingest_feed_lambda/numpy/core/fromnumeric.py#L1852-L1867
SanPen/GridCal
d3f4566d2d72c11c7e910c9d162538ef0e60df31
src/GridCal/Gui/GridEditorWidget/vsc_graphics.py
python
VscGraphicItem.remove
(self, ask=True)
Remove this object in the diagram and the API @return:
Remove this object in the diagram and the API
[ "Remove", "this", "object", "in", "the", "diagram", "and", "the", "API" ]
def remove(self, ask=True): """ Remove this object in the diagram and the API @return: """ if ask: ok = yes_no_question('Do you want to remove this VSC?', 'Remove VSC') else: ok = True if ok: self.diagramScene.circuit.delete_vsc_converter(self.api_object) self.diagramScene.removeItem(self)
[ "def", "remove", "(", "self", ",", "ask", "=", "True", ")", ":", "if", "ask", ":", "ok", "=", "yes_no_question", "(", "'Do you want to remove this VSC?'", ",", "'Remove VSC'", ")", "else", ":", "ok", "=", "True", "if", "ok", ":", "self", ".", "diagramScene", ".", "circuit", ".", "delete_vsc_converter", "(", "self", ".", "api_object", ")", "self", ".", "diagramScene", ".", "removeItem", "(", "self", ")" ]
https://github.com/SanPen/GridCal/blob/d3f4566d2d72c11c7e910c9d162538ef0e60df31/src/GridCal/Gui/GridEditorWidget/vsc_graphics.py#L380-L392
larryhastings/gilectomy
4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a
Lib/_pydecimal.py
python
Decimal._iseven
(self)
return self._int[-1+self._exp] in '02468'
Returns True if self is even. Assumes self is an integer.
Returns True if self is even. Assumes self is an integer.
[ "Returns", "True", "if", "self", "is", "even", ".", "Assumes", "self", "is", "an", "integer", "." ]
def _iseven(self): """Returns True if self is even. Assumes self is an integer.""" if not self or self._exp > 0: return True return self._int[-1+self._exp] in '02468'
[ "def", "_iseven", "(", "self", ")", ":", "if", "not", "self", "or", "self", ".", "_exp", ">", "0", ":", "return", "True", "return", "self", ".", "_int", "[", "-", "1", "+", "self", ".", "_exp", "]", "in", "'02468'" ]
https://github.com/larryhastings/gilectomy/blob/4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a/Lib/_pydecimal.py#L2939-L2943
spyder-ide/spyder
55da47c032dfcf519600f67f8b30eab467f965e7
spyder/utils/qthelpers.py
python
get_origin_filename
()
return f.f_code.co_filename
Return the filename at the top of the stack
Return the filename at the top of the stack
[ "Return", "the", "filename", "at", "the", "top", "of", "the", "stack" ]
def get_origin_filename(): """Return the filename at the top of the stack""" # Get top frame f = sys._getframe() while f.f_back is not None: f = f.f_back return f.f_code.co_filename
[ "def", "get_origin_filename", "(", ")", ":", "# Get top frame", "f", "=", "sys", ".", "_getframe", "(", ")", "while", "f", ".", "f_back", "is", "not", "None", ":", "f", "=", "f", ".", "f_back", "return", "f", ".", "f_code", ".", "co_filename" ]
https://github.com/spyder-ide/spyder/blob/55da47c032dfcf519600f67f8b30eab467f965e7/spyder/utils/qthelpers.py#L90-L96
implus/GFocal
db48199ff91466f071519cefd068fb05359dcbf2
tools/upgrade_model_version.py
python
convert
(in_file, out_file)
Convert keys in checkpoints. There can be some breaking changes during the development of mmdetection, and this tool is used for upgrading checkpoints trained with old versions to the latest one.
Convert keys in checkpoints.
[ "Convert", "keys", "in", "checkpoints", "." ]
def convert(in_file, out_file): """Convert keys in checkpoints. There can be some breaking changes during the development of mmdetection, and this tool is used for upgrading checkpoints trained with old versions to the latest one. """ checkpoint = torch.load(in_file) in_state_dict = checkpoint.pop('state_dict') out_state_dict = OrderedDict() for key, val in in_state_dict.items(): # Use ConvModule instead of nn.Conv2d in RetinaNet # cls_convs.0.weight -> cls_convs.0.conv.weight m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key) if m is not None: param = m.groups()[1] new_key = key.replace(param, 'conv.{}'.format(param)) out_state_dict[new_key] = val continue out_state_dict[key] = val checkpoint['state_dict'] = out_state_dict torch.save(checkpoint, out_file)
[ "def", "convert", "(", "in_file", ",", "out_file", ")", ":", "checkpoint", "=", "torch", ".", "load", "(", "in_file", ")", "in_state_dict", "=", "checkpoint", ".", "pop", "(", "'state_dict'", ")", "out_state_dict", "=", "OrderedDict", "(", ")", "for", "key", ",", "val", "in", "in_state_dict", ".", "items", "(", ")", ":", "# Use ConvModule instead of nn.Conv2d in RetinaNet", "# cls_convs.0.weight -> cls_convs.0.conv.weight", "m", "=", "re", ".", "search", "(", "r'(cls_convs|reg_convs).\\d.(weight|bias)'", ",", "key", ")", "if", "m", "is", "not", "None", ":", "param", "=", "m", ".", "groups", "(", ")", "[", "1", "]", "new_key", "=", "key", ".", "replace", "(", "param", ",", "'conv.{}'", ".", "format", "(", "param", ")", ")", "out_state_dict", "[", "new_key", "]", "=", "val", "continue", "out_state_dict", "[", "key", "]", "=", "val", "checkpoint", "[", "'state_dict'", "]", "=", "out_state_dict", "torch", ".", "save", "(", "checkpoint", ",", "out_file", ")" ]
https://github.com/implus/GFocal/blob/db48199ff91466f071519cefd068fb05359dcbf2/tools/upgrade_model_version.py#L8-L30
Chaffelson/nipyapi
d3b186fd701ce308c2812746d98af9120955e810
nipyapi/nifi/models/prioritizer_types_entity.py
python
PrioritizerTypesEntity.__init__
(self, prioritizer_types=None)
PrioritizerTypesEntity - a model defined in Swagger
PrioritizerTypesEntity - a model defined in Swagger
[ "PrioritizerTypesEntity", "-", "a", "model", "defined", "in", "Swagger" ]
def __init__(self, prioritizer_types=None): """ PrioritizerTypesEntity - a model defined in Swagger """ self._prioritizer_types = None if prioritizer_types is not None: self.prioritizer_types = prioritizer_types
[ "def", "__init__", "(", "self", ",", "prioritizer_types", "=", "None", ")", ":", "self", ".", "_prioritizer_types", "=", "None", "if", "prioritizer_types", "is", "not", "None", ":", "self", ".", "prioritizer_types", "=", "prioritizer_types" ]
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/models/prioritizer_types_entity.py#L41-L49
fossasia/knittingpattern
e440429884182d6f2684b0ac051c0605ba31ae75
knittingpattern/Loader.py
python
ContentLoader.url
(self, url, encoding="UTF-8")
return self.string(webpage_content)
load and process the content behind a url :return: the processed result of the :paramref:`url's <url>` content :param str url: the url to retrieve the content from :param str encoding: the encoding of the retrieved content. The default encoding is UTF-8.
load and process the content behind a url
[ "load", "and", "process", "the", "content", "behind", "a", "url" ]
def url(self, url, encoding="UTF-8"): """load and process the content behind a url :return: the processed result of the :paramref:`url's <url>` content :param str url: the url to retrieve the content from :param str encoding: the encoding of the retrieved content. The default encoding is UTF-8. """ import urllib.request with urllib.request.urlopen(url) as file: webpage_content = file.read() webpage_content = webpage_content.decode(encoding) return self.string(webpage_content)
[ "def", "url", "(", "self", ",", "url", ",", "encoding", "=", "\"UTF-8\"", ")", ":", "import", "urllib", ".", "request", "with", "urllib", ".", "request", ".", "urlopen", "(", "url", ")", "as", "file", ":", "webpage_content", "=", "file", ".", "read", "(", ")", "webpage_content", "=", "webpage_content", ".", "decode", "(", "encoding", ")", "return", "self", ".", "string", "(", "webpage_content", ")" ]
https://github.com/fossasia/knittingpattern/blob/e440429884182d6f2684b0ac051c0605ba31ae75/knittingpattern/Loader.py#L198-L211
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/pip/utils/glibc.py
python
libc_ver
()
[]
def libc_ver(): glibc_version = glibc_version_string() if glibc_version is None: # For non-glibc platforms, fall back on platform.libc_ver return platform.libc_ver() else: return ("glibc", glibc_version)
[ "def", "libc_ver", "(", ")", ":", "glibc_version", "=", "glibc_version_string", "(", ")", "if", "glibc_version", "is", "None", ":", "# For non-glibc platforms, fall back on platform.libc_ver", "return", "platform", ".", "libc_ver", "(", ")", "else", ":", "return", "(", "\"glibc\"", ",", "glibc_version", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/pip/utils/glibc.py#L75-L81
etetoolkit/ete
2b207357dc2a40ccad7bfd8f54964472c72e4726
ete3/nexml/_nexml.py
python
DNAMatrixSeqRow.export
(self, outfile, level, namespace_='', name_='DNAMatrixSeqRow', namespacedef_='')
[]
def export(self, outfile, level, namespace_='', name_='DNAMatrixSeqRow', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = [] self.exportAttributes(outfile, level, already_processed, namespace_, name_='DNAMatrixSeqRow') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write('/>\n')
[ "def", "export", "(", "self", ",", "outfile", ",", "level", ",", "namespace_", "=", "''", ",", "name_", "=", "'DNAMatrixSeqRow'", ",", "namespacedef_", "=", "''", ")", ":", "showIndent", "(", "outfile", ",", "level", ")", "outfile", ".", "write", "(", "'<%s%s%s'", "%", "(", "namespace_", ",", "name_", ",", "namespacedef_", "and", "' '", "+", "namespacedef_", "or", "''", ",", ")", ")", "already_processed", "=", "[", "]", "self", ".", "exportAttributes", "(", "outfile", ",", "level", ",", "already_processed", ",", "namespace_", ",", "name_", "=", "'DNAMatrixSeqRow'", ")", "if", "self", ".", "hasContent_", "(", ")", ":", "outfile", ".", "write", "(", "'>\\n'", ")", "self", ".", "exportChildren", "(", "outfile", ",", "level", "+", "1", ",", "namespace_", ",", "name_", ")", "showIndent", "(", "outfile", ",", "level", ")", "outfile", ".", "write", "(", "'</%s%s>\\n'", "%", "(", "namespace_", ",", "name_", ")", ")", "else", ":", "outfile", ".", "write", "(", "'/>\\n'", ")" ]
https://github.com/etetoolkit/ete/blob/2b207357dc2a40ccad7bfd8f54964472c72e4726/ete3/nexml/_nexml.py#L13463-L13474
LinkedInAttic/indextank-service
880c6295ce8e7a3a55bf9b3777cc35c7680e0d7e
storefront/boto/sdb/connection.py
python
SDBConnection.query_with_attributes
(self, domain_or_name, query='', attr_names=None, max_items=None, next_token=None)
return self.get_list('QueryWithAttributes', params, [('Item', self.item_cls)], parent=domain)
Returns a set of Attributes for item names within domain_name that match the query. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type query: string :param query: The SimpleDB query to be performed. :type attr_names: list :param attr_names: The name of the attributes to be returned. If no attributes are specified, all attributes will be returned. :type max_items: int :param max_items: The maximum number of items to return. If not supplied, the default is None which returns all items matching the query. :rtype: ResultSet :return: An iterator containing the results.
Returns a set of Attributes for item names within domain_name that match the query. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object
[ "Returns", "a", "set", "of", "Attributes", "for", "item", "names", "within", "domain_name", "that", "match", "the", "query", ".", ":", "type", "domain_or_name", ":", "string", "or", ":", "class", ":", "boto", ".", "sdb", ".", "domain", ".", "Domain", "object", ".", ":", "param", "domain_or_name", ":", "Either", "the", "name", "of", "a", "domain", "or", "a", "Domain", "object" ]
def query_with_attributes(self, domain_or_name, query='', attr_names=None, max_items=None, next_token=None): """ Returns a set of Attributes for item names within domain_name that match the query. :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. :param domain_or_name: Either the name of a domain or a Domain object :type query: string :param query: The SimpleDB query to be performed. :type attr_names: list :param attr_names: The name of the attributes to be returned. If no attributes are specified, all attributes will be returned. :type max_items: int :param max_items: The maximum number of items to return. If not supplied, the default is None which returns all items matching the query. :rtype: ResultSet :return: An iterator containing the results. """ warnings.warn('Query interface is deprecated', DeprecationWarning) domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName':domain_name, 'QueryExpression' : query} if max_items: params['MaxNumberOfItems'] = max_items if next_token: params['NextToken'] = next_token if attr_names: self.build_list_params(params, attr_names, 'AttributeName') return self.get_list('QueryWithAttributes', params, [('Item', self.item_cls)], parent=domain)
[ "def", "query_with_attributes", "(", "self", ",", "domain_or_name", ",", "query", "=", "''", ",", "attr_names", "=", "None", ",", "max_items", "=", "None", ",", "next_token", "=", "None", ")", ":", "warnings", ".", "warn", "(", "'Query interface is deprecated'", ",", "DeprecationWarning", ")", "domain", ",", "domain_name", "=", "self", ".", "get_domain_and_name", "(", "domain_or_name", ")", "params", "=", "{", "'DomainName'", ":", "domain_name", ",", "'QueryExpression'", ":", "query", "}", "if", "max_items", ":", "params", "[", "'MaxNumberOfItems'", "]", "=", "max_items", "if", "next_token", ":", "params", "[", "'NextToken'", "]", "=", "next_token", "if", "attr_names", ":", "self", ".", "build_list_params", "(", "params", ",", "attr_names", ",", "'AttributeName'", ")", "return", "self", ".", "get_list", "(", "'QueryWithAttributes'", ",", "params", ",", "[", "(", "'Item'", ",", "self", ".", "item_cls", ")", "]", ",", "parent", "=", "domain", ")" ]
https://github.com/LinkedInAttic/indextank-service/blob/880c6295ce8e7a3a55bf9b3777cc35c7680e0d7e/storefront/boto/sdb/connection.py#L383-L417
OpenMDAO/OpenMDAO-Framework
f2e37b7de3edeaaeb2d251b375917adec059db9b
openmdao.main/src/openmdao/main/container.py
python
Container.save_to_egg
(self, name, version, py_dir=None, src_dir=None, src_files=None, child_objs=None, dst_dir=None, observer=None, need_requirements=True)
Save state and other files to an egg. Typically used to copy all or part of a simulation to another user or machine. By specifying child containers in `child_objs`, it will be possible to create instances of just those containers from the installed egg. Child container names should be specified relative to this container. name: string Name for egg; must be an alphanumeric string. version: string Version for egg; must be an alphanumeric string. py_dir: string The (root) directory for local Python files. It defaults to the current directory. src_dir: string The root of all (relative) `src_files`. src_files: list List of paths to files to be included in the egg. child_objs: list List of child objects for additional entry points. dst_dir: string The directory to write the egg in. observer: callable Will be called via an :class:`EggObserver`. need_requirements: bool Passed to :meth:`eggsaver.save_to_egg`. After collecting entry point information, calls :meth:`eggsaver.save_to_egg`. Returns ``(egg_filename, required_distributions, orphan_modules)``.
Save state and other files to an egg. Typically used to copy all or part of a simulation to another user or machine. By specifying child containers in `child_objs`, it will be possible to create instances of just those containers from the installed egg. Child container names should be specified relative to this container.
[ "Save", "state", "and", "other", "files", "to", "an", "egg", ".", "Typically", "used", "to", "copy", "all", "or", "part", "of", "a", "simulation", "to", "another", "user", "or", "machine", ".", "By", "specifying", "child", "containers", "in", "child_objs", "it", "will", "be", "possible", "to", "create", "instances", "of", "just", "those", "containers", "from", "the", "installed", "egg", ".", "Child", "container", "names", "should", "be", "specified", "relative", "to", "this", "container", "." ]
def save_to_egg(self, name, version, py_dir=None, src_dir=None, src_files=None, child_objs=None, dst_dir=None, observer=None, need_requirements=True): """Save state and other files to an egg. Typically used to copy all or part of a simulation to another user or machine. By specifying child containers in `child_objs`, it will be possible to create instances of just those containers from the installed egg. Child container names should be specified relative to this container. name: string Name for egg; must be an alphanumeric string. version: string Version for egg; must be an alphanumeric string. py_dir: string The (root) directory for local Python files. It defaults to the current directory. src_dir: string The root of all (relative) `src_files`. src_files: list List of paths to files to be included in the egg. child_objs: list List of child objects for additional entry points. dst_dir: string The directory to write the egg in. observer: callable Will be called via an :class:`EggObserver`. need_requirements: bool Passed to :meth:`eggsaver.save_to_egg`. After collecting entry point information, calls :meth:`eggsaver.save_to_egg`. Returns ``(egg_filename, required_distributions, orphan_modules)``. """ assert name and isinstance(name, basestring) assert version and isinstance(version, basestring) if not version.endswith('.'): version += '.' now = datetime.datetime.now() # Could consider using utcnow(). tstamp = '%d.%02d.%02d.%02d.%02d' % \ (now.year, now.month, now.day, now.hour, now.minute) version += tstamp observer = eggobserver.EggObserver(observer, self._logger) # Child entry point names are the pathname, starting at self. entry_pts = [(self, name, _get_entry_group(self))] if child_objs is not None: root_pathname = self.get_pathname() root_start = root_pathname.rfind('.') root_start = root_start+1 if root_start >= 0 else 0 root_pathname += '.' for child in child_objs: pathname = child.get_pathname() if not pathname.startswith(root_pathname): msg = '%s is not a child of %s' % (pathname, root_pathname) observer.exception(msg) self.raise_exception(msg, RuntimeError) entry_pts.append((child, pathname[root_start:], _get_entry_group(child))) parent = self.parent self.parent = None # Don't want to save stuff above us. try: return eggsaver.save_to_egg(entry_pts, version, py_dir, src_dir, src_files, dst_dir, self._logger, observer.observer, need_requirements) except Exception: self.reraise_exception(info=sys.exc_info()) # Just to get a pathname. finally: self.parent = parent
[ "def", "save_to_egg", "(", "self", ",", "name", ",", "version", ",", "py_dir", "=", "None", ",", "src_dir", "=", "None", ",", "src_files", "=", "None", ",", "child_objs", "=", "None", ",", "dst_dir", "=", "None", ",", "observer", "=", "None", ",", "need_requirements", "=", "True", ")", ":", "assert", "name", "and", "isinstance", "(", "name", ",", "basestring", ")", "assert", "version", "and", "isinstance", "(", "version", ",", "basestring", ")", "if", "not", "version", ".", "endswith", "(", "'.'", ")", ":", "version", "+=", "'.'", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "# Could consider using utcnow().", "tstamp", "=", "'%d.%02d.%02d.%02d.%02d'", "%", "(", "now", ".", "year", ",", "now", ".", "month", ",", "now", ".", "day", ",", "now", ".", "hour", ",", "now", ".", "minute", ")", "version", "+=", "tstamp", "observer", "=", "eggobserver", ".", "EggObserver", "(", "observer", ",", "self", ".", "_logger", ")", "# Child entry point names are the pathname, starting at self.", "entry_pts", "=", "[", "(", "self", ",", "name", ",", "_get_entry_group", "(", "self", ")", ")", "]", "if", "child_objs", "is", "not", "None", ":", "root_pathname", "=", "self", ".", "get_pathname", "(", ")", "root_start", "=", "root_pathname", ".", "rfind", "(", "'.'", ")", "root_start", "=", "root_start", "+", "1", "if", "root_start", ">=", "0", "else", "0", "root_pathname", "+=", "'.'", "for", "child", "in", "child_objs", ":", "pathname", "=", "child", ".", "get_pathname", "(", ")", "if", "not", "pathname", ".", "startswith", "(", "root_pathname", ")", ":", "msg", "=", "'%s is not a child of %s'", "%", "(", "pathname", ",", "root_pathname", ")", "observer", ".", "exception", "(", "msg", ")", "self", ".", "raise_exception", "(", "msg", ",", "RuntimeError", ")", "entry_pts", ".", "append", "(", "(", "child", ",", "pathname", "[", "root_start", ":", "]", ",", "_get_entry_group", "(", "child", ")", ")", ")", "parent", "=", "self", ".", "parent", "self", ".", "parent", "=", "None", "# Don't want to save stuff above us.", "try", ":", "return", "eggsaver", ".", "save_to_egg", "(", "entry_pts", ",", "version", ",", "py_dir", ",", "src_dir", ",", "src_files", ",", "dst_dir", ",", "self", ".", "_logger", ",", "observer", ".", "observer", ",", "need_requirements", ")", "except", "Exception", ":", "self", ".", "reraise_exception", "(", "info", "=", "sys", ".", "exc_info", "(", ")", ")", "# Just to get a pathname.", "finally", ":", "self", ".", "parent", "=", "parent" ]
https://github.com/OpenMDAO/OpenMDAO-Framework/blob/f2e37b7de3edeaaeb2d251b375917adec059db9b/openmdao.main/src/openmdao/main/container.py#L1030-L1108
replit-archive/empythoned
977ec10ced29a3541a4973dc2b59910805695752
cpython/Lib/lib-tk/Tkinter.py
python
Misc.tk_menuBar
(self, *args)
Do not use. Needed in Tk 3.6 and earlier.
Do not use. Needed in Tk 3.6 and earlier.
[ "Do", "not", "use", ".", "Needed", "in", "Tk", "3", ".", "6", "and", "earlier", "." ]
def tk_menuBar(self, *args): """Do not use. Needed in Tk 3.6 and earlier.""" pass # obsolete since Tk 4.0
[ "def", "tk_menuBar", "(", "self", ",", "*", "args", ")", ":", "pass", "# obsolete since Tk 4.0" ]
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/cpython/Lib/lib-tk/Tkinter.py#L388-L390