code stringlengths 70 11.9k | docstring stringlengths 4 7.08k | text stringlengths 128 15k |
|---|---|---|
def read (path, tabwidth=8, **kwargs):
datamode = False
fixedcols = {}
for text in _trimmedlines (path, **kwargs):
text = text.expandtabs (tabwidth)
if datamode:
h = Holder ()
h.set (**fixedcols)
for name, cslice, parser in info:
try:
v = parser (text[cslice].strip ())
except:
reraise_context (, text[cslice].strip ())
h.set_one (name, v)
yield h
elif text[0] != :
padnamekind, padval = text.split (, 1)
name, parser = _getparser (padnamekind.strip ())
fixedcols[name] = parser (padval.strip ())
else:
n = len (text)
assert n > 1
start = 0
info = []
while start < n:
end = start + 1
while end < n and (not text[end].isspace ()):
end += 1
if start == 0:
namekind = text[start+1:end]
else:
namekind = text[start:end]
while end < n and text[end].isspace ():
end += 1
name, parser = _getparser (namekind)
if parser is None:
skippedlast = True
else:
skippedlast = False
info.append ((name, slice (start, end), parser))
start = end
datamode = True
if not skippedlast:
lname, lslice, lparser = info[-1]
info[-1] = lname, slice (lslice.start, None), lparser | Read a typed tabular text file into a stream of Holders.
Arguments:
path
The path of the file to read.
tabwidth=8
The tab width to assume. Please don't monkey with it.
mode='rt'
The file open mode (passed to io.open()).
noexistok=False
If True and the file is missing, treat it as empty.
``**kwargs``
Passed to io.open ().
Returns a generator for a stream of `pwkit.Holder`s, each of which will
contain ints, strings, or some kind of measurement (cf `pwkit.msmt`). | ### Input:
Read a typed tabular text file into a stream of Holders.
Arguments:
path
The path of the file to read.
tabwidth=8
The tab width to assume. Please don't monkey with it.
mode='rt'
The file open mode (passed to io.open()).
noexistok=False
If True and the file is missing, treat it as empty.
``**kwargs``
Passed to io.open ().
Returns a generator for a stream of `pwkit.Holder`s, each of which will
contain ints, strings, or some kind of measurement (cf `pwkit.msmt`).
### Response:
def read (path, tabwidth=8, **kwargs):
datamode = False
fixedcols = {}
for text in _trimmedlines (path, **kwargs):
text = text.expandtabs (tabwidth)
if datamode:
h = Holder ()
h.set (**fixedcols)
for name, cslice, parser in info:
try:
v = parser (text[cslice].strip ())
except:
reraise_context (, text[cslice].strip ())
h.set_one (name, v)
yield h
elif text[0] != :
padnamekind, padval = text.split (, 1)
name, parser = _getparser (padnamekind.strip ())
fixedcols[name] = parser (padval.strip ())
else:
n = len (text)
assert n > 1
start = 0
info = []
while start < n:
end = start + 1
while end < n and (not text[end].isspace ()):
end += 1
if start == 0:
namekind = text[start+1:end]
else:
namekind = text[start:end]
while end < n and text[end].isspace ():
end += 1
name, parser = _getparser (namekind)
if parser is None:
skippedlast = True
else:
skippedlast = False
info.append ((name, slice (start, end), parser))
start = end
datamode = True
if not skippedlast:
lname, lslice, lparser = info[-1]
info[-1] = lname, slice (lslice.start, None), lparser |
def coherence(self, other, fftlength=None, overlap=None,
window=, **kwargs):
from matplotlib import mlab
from ..frequencyseries import FrequencySeries
if self.sample_rate.to() != other.sample_rate.to():
sampling = min(self.sample_rate.value, other.sample_rate.value)
if self.sample_rate.value == sampling:
other = other.resample(sampling)
self_ = self
else:
self_ = self.resample(sampling)
else:
sampling = self.sample_rate.value
self_ = self
if overlap is None:
overlap = 0
else:
overlap = int((overlap * self_.sample_rate).decompose().value)
if fftlength is None:
fftlength = int(self_.size/2. + overlap/2.)
else:
fftlength = int((fftlength * self_.sample_rate).decompose().value)
if window is not None:
kwargs[] = signal.get_window(window, fftlength)
coh, freqs = mlab.cohere(self_.value, other.value, NFFT=fftlength,
Fs=sampling, noverlap=overlap, **kwargs)
out = coh.view(FrequencySeries)
out.xindex = freqs
out.epoch = self.epoch
out.name = % (self.name, other.name)
out.unit =
return out | Calculate the frequency-coherence between this `TimeSeries`
and another.
Parameters
----------
other : `TimeSeries`
`TimeSeries` signal to calculate coherence with
fftlength : `float`, optional
number of seconds in single FFT, defaults to a single FFT
covering the full duration
overlap : `float`, optional
number of seconds of overlap between FFTs, defaults to the
recommended overlap for the given window (if given), or 0
window : `str`, `numpy.ndarray`, optional
window function to apply to timeseries prior to FFT,
see :func:`scipy.signal.get_window` for details on acceptable
formats
**kwargs
any other keyword arguments accepted by
:func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``,
and ``noverlap`` which are superceded by the above keyword
arguments
Returns
-------
coherence : `~gwpy.frequencyseries.FrequencySeries`
the coherence `FrequencySeries` of this `TimeSeries`
with the other
Notes
-----
If `self` and `other` have difference
:attr:`TimeSeries.sample_rate` values, the higher sampled
`TimeSeries` will be down-sampled to match the lower.
See Also
--------
:func:`matplotlib.mlab.cohere`
for details of the coherence calculator | ### Input:
Calculate the frequency-coherence between this `TimeSeries`
and another.
Parameters
----------
other : `TimeSeries`
`TimeSeries` signal to calculate coherence with
fftlength : `float`, optional
number of seconds in single FFT, defaults to a single FFT
covering the full duration
overlap : `float`, optional
number of seconds of overlap between FFTs, defaults to the
recommended overlap for the given window (if given), or 0
window : `str`, `numpy.ndarray`, optional
window function to apply to timeseries prior to FFT,
see :func:`scipy.signal.get_window` for details on acceptable
formats
**kwargs
any other keyword arguments accepted by
:func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``,
and ``noverlap`` which are superceded by the above keyword
arguments
Returns
-------
coherence : `~gwpy.frequencyseries.FrequencySeries`
the coherence `FrequencySeries` of this `TimeSeries`
with the other
Notes
-----
If `self` and `other` have difference
:attr:`TimeSeries.sample_rate` values, the higher sampled
`TimeSeries` will be down-sampled to match the lower.
See Also
--------
:func:`matplotlib.mlab.cohere`
for details of the coherence calculator
### Response:
def coherence(self, other, fftlength=None, overlap=None,
window=, **kwargs):
from matplotlib import mlab
from ..frequencyseries import FrequencySeries
if self.sample_rate.to() != other.sample_rate.to():
sampling = min(self.sample_rate.value, other.sample_rate.value)
if self.sample_rate.value == sampling:
other = other.resample(sampling)
self_ = self
else:
self_ = self.resample(sampling)
else:
sampling = self.sample_rate.value
self_ = self
if overlap is None:
overlap = 0
else:
overlap = int((overlap * self_.sample_rate).decompose().value)
if fftlength is None:
fftlength = int(self_.size/2. + overlap/2.)
else:
fftlength = int((fftlength * self_.sample_rate).decompose().value)
if window is not None:
kwargs[] = signal.get_window(window, fftlength)
coh, freqs = mlab.cohere(self_.value, other.value, NFFT=fftlength,
Fs=sampling, noverlap=overlap, **kwargs)
out = coh.view(FrequencySeries)
out.xindex = freqs
out.epoch = self.epoch
out.name = % (self.name, other.name)
out.unit =
return out |
def main():
print("ERROR: Use the command instead.", file=sys.stderr)
print("> slcli %s" % .join(sys.argv[1:]), file=sys.stderr)
exit(-1) | Main function for the deprecated 'sl' command. | ### Input:
Main function for the deprecated 'sl' command.
### Response:
def main():
print("ERROR: Use the command instead.", file=sys.stderr)
print("> slcli %s" % .join(sys.argv[1:]), file=sys.stderr)
exit(-1) |
def universal_exception(coro):
@functools.wraps(coro)
async def wrapper(*args, **kwargs):
try:
return await coro(*args, **kwargs)
except (asyncio.CancelledError, NotImplementedError,
StopAsyncIteration):
raise
except Exception:
raise errors.PathIOError(reason=sys.exc_info())
return wrapper | Decorator. Reraising any exception (except `CancelledError` and
`NotImplementedError`) with universal exception
:py:class:`aioftp.PathIOError` | ### Input:
Decorator. Reraising any exception (except `CancelledError` and
`NotImplementedError`) with universal exception
:py:class:`aioftp.PathIOError`
### Response:
def universal_exception(coro):
@functools.wraps(coro)
async def wrapper(*args, **kwargs):
try:
return await coro(*args, **kwargs)
except (asyncio.CancelledError, NotImplementedError,
StopAsyncIteration):
raise
except Exception:
raise errors.PathIOError(reason=sys.exc_info())
return wrapper |
def get_logs_multipart(
w3,
startBlock,
stopBlock,
address,
topics,
max_blocks):
_block_ranges = block_ranges(startBlock, stopBlock, max_blocks)
for from_block, to_block in _block_ranges:
params = {
: from_block,
: to_block,
: address,
: topics
}
yield w3.eth.getLogs(
drop_items_with_none_value(params)) | Used to break up requests to ``eth_getLogs``
The getLog request is partitioned into multiple calls of the max number of blocks
``max_blocks``. | ### Input:
Used to break up requests to ``eth_getLogs``
The getLog request is partitioned into multiple calls of the max number of blocks
``max_blocks``.
### Response:
def get_logs_multipart(
w3,
startBlock,
stopBlock,
address,
topics,
max_blocks):
_block_ranges = block_ranges(startBlock, stopBlock, max_blocks)
for from_block, to_block in _block_ranges:
params = {
: from_block,
: to_block,
: address,
: topics
}
yield w3.eth.getLogs(
drop_items_with_none_value(params)) |
def create(options, timer=None, use_deque=True):
if options is None:
cache_cls = DequeOutTTLCache if use_deque else cachetools.TTLCache
return LockedObject(
cache_cls(
options.num_entries,
ttl=ttl.total_seconds(),
timer=to_cache_timer(timer)
))
cache_cls = DequeOutLRUCache if use_deque else cachetools.LRUCache
return LockedObject(cache_cls(options.num_entries)) | Create a cache specified by ``options``
``options`` is an instance of either
:class:`endpoints_management.control.caches.CheckOptions` or
:class:`endpoints_management.control.caches.ReportOptions`
The returned cache is wrapped in a :class:`LockedObject`, requiring it to
be accessed in a with statement that gives synchronized access
Example:
>>> options = CheckOptions()
>>> synced_cache = make_cache(options)
>>> with synced_cache as cache: # acquire the lock
... cache['a_key'] = 'a_value'
Args:
options (object): an instance of either of the options classes
Returns:
:class:`cachetools.Cache`: the cache implementation specified by options
or None: if options is ``None`` or if options.num_entries < 0
Raises:
ValueError: if options is not a support type | ### Input:
Create a cache specified by ``options``
``options`` is an instance of either
:class:`endpoints_management.control.caches.CheckOptions` or
:class:`endpoints_management.control.caches.ReportOptions`
The returned cache is wrapped in a :class:`LockedObject`, requiring it to
be accessed in a with statement that gives synchronized access
Example:
>>> options = CheckOptions()
>>> synced_cache = make_cache(options)
>>> with synced_cache as cache: # acquire the lock
... cache['a_key'] = 'a_value'
Args:
options (object): an instance of either of the options classes
Returns:
:class:`cachetools.Cache`: the cache implementation specified by options
or None: if options is ``None`` or if options.num_entries < 0
Raises:
ValueError: if options is not a support type
### Response:
def create(options, timer=None, use_deque=True):
if options is None:
cache_cls = DequeOutTTLCache if use_deque else cachetools.TTLCache
return LockedObject(
cache_cls(
options.num_entries,
ttl=ttl.total_seconds(),
timer=to_cache_timer(timer)
))
cache_cls = DequeOutLRUCache if use_deque else cachetools.LRUCache
return LockedObject(cache_cls(options.num_entries)) |
def nameValue(name, value, valueType=str, quotes=False):
if valueType == bool:
if value:
return "--%s" % name
return ""
if value is None:
return ""
if quotes:
return "--%s " % (name, valueType(value))
return "--%s %s" % (name, valueType(value)) | Little function to make it easier to make name value strings for commands. | ### Input:
Little function to make it easier to make name value strings for commands.
### Response:
def nameValue(name, value, valueType=str, quotes=False):
if valueType == bool:
if value:
return "--%s" % name
return ""
if value is None:
return ""
if quotes:
return "--%s " % (name, valueType(value))
return "--%s %s" % (name, valueType(value)) |
def add_make_function_rule(self, rule, opname, attr, customize):
if self.version >= 3.3:
new_rule = rule % (() * 1)
else:
new_rule = rule % (() * 0)
self.add_unique_rule(new_rule, opname, attr, customize) | Python 3.3 added a an addtional LOAD_CONST before MAKE_FUNCTION and
this has an effect on many rules. | ### Input:
Python 3.3 added a an addtional LOAD_CONST before MAKE_FUNCTION and
this has an effect on many rules.
### Response:
def add_make_function_rule(self, rule, opname, attr, customize):
if self.version >= 3.3:
new_rule = rule % (() * 1)
else:
new_rule = rule % (() * 0)
self.add_unique_rule(new_rule, opname, attr, customize) |
def filter(self, networks):
cues = self.stimuli + self.inhibitors
active_cues = set()
active_readouts = set()
for clause, var in networks.mappings:
active_cues = active_cues.union((l for (l, s) in clause if l in cues))
if var in self.readouts:
active_readouts.add(var)
return Setup(active_cues.intersection(self.stimuli), active_cues.intersection(self.inhibitors), active_readouts) | Returns a new experimental setup restricted to species present in the given list of networks
Parameters
----------
networks : :class:`caspo.core.logicalnetwork.LogicalNetworkList`
List of logical networks
Returns
-------
caspo.core.setup.Setup
The restricted experimental setup | ### Input:
Returns a new experimental setup restricted to species present in the given list of networks
Parameters
----------
networks : :class:`caspo.core.logicalnetwork.LogicalNetworkList`
List of logical networks
Returns
-------
caspo.core.setup.Setup
The restricted experimental setup
### Response:
def filter(self, networks):
cues = self.stimuli + self.inhibitors
active_cues = set()
active_readouts = set()
for clause, var in networks.mappings:
active_cues = active_cues.union((l for (l, s) in clause if l in cues))
if var in self.readouts:
active_readouts.add(var)
return Setup(active_cues.intersection(self.stimuli), active_cues.intersection(self.inhibitors), active_readouts) |
def read_windows_environ():
res = winapi.GetEnvironmentStringsW()
if not res:
raise ctypes.WinError()
res = ctypes.cast(res, ctypes.POINTER(ctypes.c_wchar))
done = []
current = u""
i = 0
while 1:
c = res[i]
i += 1
if c == u"\x00":
if not current:
break
done.append(current)
current = u""
continue
current += c
dict_ = {}
for entry in done:
try:
key, value = entry.split(u"=", 1)
except ValueError:
continue
key = _norm_key(key)
dict_[key] = value
status = winapi.FreeEnvironmentStringsW(res)
if status == 0:
raise ctypes.WinError()
return dict_ | Returns a unicode dict of the Windows environment.
Raises:
WindowsEnvironError | ### Input:
Returns a unicode dict of the Windows environment.
Raises:
WindowsEnvironError
### Response:
def read_windows_environ():
res = winapi.GetEnvironmentStringsW()
if not res:
raise ctypes.WinError()
res = ctypes.cast(res, ctypes.POINTER(ctypes.c_wchar))
done = []
current = u""
i = 0
while 1:
c = res[i]
i += 1
if c == u"\x00":
if not current:
break
done.append(current)
current = u""
continue
current += c
dict_ = {}
for entry in done:
try:
key, value = entry.split(u"=", 1)
except ValueError:
continue
key = _norm_key(key)
dict_[key] = value
status = winapi.FreeEnvironmentStringsW(res)
if status == 0:
raise ctypes.WinError()
return dict_ |
def is_ambiguous(self, dt, idx=None):
if idx is None:
idx = self._find_last_transition(dt)
timestamp = _datetime_to_timestamp(dt)
tti = self._get_ttinfo(idx)
if idx is None or idx <= 0:
return False
od = self._get_ttinfo(idx - 1).offset - tti.offset
tt = self._trans_list[idx]
return timestamp < tt + od | Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0 | ### Input:
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
### Response:
def is_ambiguous(self, dt, idx=None):
if idx is None:
idx = self._find_last_transition(dt)
timestamp = _datetime_to_timestamp(dt)
tti = self._get_ttinfo(idx)
if idx is None or idx <= 0:
return False
od = self._get_ttinfo(idx - 1).offset - tti.offset
tt = self._trans_list[idx]
return timestamp < tt + od |
def setUp(self, item):
assert isinstance(item, WSDLTools.Binding), \
portType = item.getPortType()
self._kwargs = KW.copy()
self._kwargs[] = NC_to_CN(item.name)
self.operations = []
self.rProp = portType.getResourceProperties()
soap_binding = item.findBinding(WSDLTools.SoapBinding)
if soap_binding is None:
raise Wsdl2PythonError,\
%item.name
for bop in item.operations:
soap_bop = bop.findBinding(WSDLTools.SoapOperationBinding)
if soap_bop is None:
self.logger.warning(\
\
%(item.name, bop.name),
)
continue
if bop.input is not None:
soapBodyBind = bop.input.findBinding(WSDLTools.SoapBodyBinding)
if soapBodyBind is None:
self.logger.warning(\
\
%(item.name, bop.name, bop.extensions)
)
continue
op = portType.operations.get(bop.name)
if op is None:
raise Wsdl2PythonError,\
% bop.name
c = self.operationclass(useWSA=self.useWSA,
do_extended=self.do_extended)
c.setUp(bop)
self.operations.append(c) | This method finds all SOAP Binding Operations, it will skip
all bindings that are not SOAP.
item -- WSDL.Binding instance | ### Input:
This method finds all SOAP Binding Operations, it will skip
all bindings that are not SOAP.
item -- WSDL.Binding instance
### Response:
def setUp(self, item):
assert isinstance(item, WSDLTools.Binding), \
portType = item.getPortType()
self._kwargs = KW.copy()
self._kwargs[] = NC_to_CN(item.name)
self.operations = []
self.rProp = portType.getResourceProperties()
soap_binding = item.findBinding(WSDLTools.SoapBinding)
if soap_binding is None:
raise Wsdl2PythonError,\
%item.name
for bop in item.operations:
soap_bop = bop.findBinding(WSDLTools.SoapOperationBinding)
if soap_bop is None:
self.logger.warning(\
\
%(item.name, bop.name),
)
continue
if bop.input is not None:
soapBodyBind = bop.input.findBinding(WSDLTools.SoapBodyBinding)
if soapBodyBind is None:
self.logger.warning(\
\
%(item.name, bop.name, bop.extensions)
)
continue
op = portType.operations.get(bop.name)
if op is None:
raise Wsdl2PythonError,\
% bop.name
c = self.operationclass(useWSA=self.useWSA,
do_extended=self.do_extended)
c.setUp(bop)
self.operations.append(c) |
def __recieve_chunk(self):
if self.__response == const.CMD_DATA:
if self.tcp:
if self.verbose: print ("_rc_DATA! is {} bytes, tcp length is {}".format(len(self.__data), self.__tcp_length))
if len(self.__data) < (self.__tcp_length - 8):
need = (self.__tcp_length - 8) - len(self.__data)
if self.verbose: print ("need more data: {}".format(need))
more_data = self.__recieve_raw_data(need)
return b.join([self.__data, more_data])
else:
if self.verbose: print ("Enough data")
return self.__data
else:
if self.verbose: print ("_rc len is {}".format(len(self.__data)))
return self.__data
elif self.__response == const.CMD_PREPARE_DATA:
data = []
size = self.__get_data_size()
if self.verbose: print ("recieve chunk: prepare data size is {}".format(size))
if self.tcp:
if len(self.__data) >= (8 + size):
data_recv = self.__data[8:]
else:
data_recv = self.__data[8:] + self.__sock.recv(size + 32)
resp, broken_header = self.__recieve_tcp_data(data_recv, size)
data.append(resp)
if len(broken_header) < 16:
data_recv = broken_header + self.__sock.recv(16)
else:
data_recv = broken_header
if len(data_recv) < 16:
print ("trying to complete broken ACK %s /16" % len(data_recv))
if self.verbose: print (data_recv.encode())
data_recv += self.__sock.recv(16 - len(data_recv))
if not self.__test_tcp_top(data_recv):
if self.verbose: print ("invalid chunk tcp ACK OK")
return None
response = unpack(, data_recv[8:16])[0]
if response == const.CMD_ACK_OK:
if self.verbose: print ("chunk tcp ACK OK!")
return b.join(data)
if self.verbose: print("bad response %s" % data_recv)
if self.verbose: print (codecs.encode(data,))
return None
return resp
while True:
data_recv = self.__sock.recv(1024+8)
response = unpack(, data_recv[:8])[0]
if self.verbose: print ("
if response == const.CMD_DATA:
data.append(data_recv[8:])
size -= 1024
elif response == const.CMD_ACK_OK:
break
else:
if self.verbose: print ("broken!")
break
if self.verbose: print ("still needs %s" % size)
return b.join(data)
else:
if self.verbose: print ("invalid response %s" % self.__response)
return None | recieve a chunk | ### Input:
recieve a chunk
### Response:
def __recieve_chunk(self):
if self.__response == const.CMD_DATA:
if self.tcp:
if self.verbose: print ("_rc_DATA! is {} bytes, tcp length is {}".format(len(self.__data), self.__tcp_length))
if len(self.__data) < (self.__tcp_length - 8):
need = (self.__tcp_length - 8) - len(self.__data)
if self.verbose: print ("need more data: {}".format(need))
more_data = self.__recieve_raw_data(need)
return b.join([self.__data, more_data])
else:
if self.verbose: print ("Enough data")
return self.__data
else:
if self.verbose: print ("_rc len is {}".format(len(self.__data)))
return self.__data
elif self.__response == const.CMD_PREPARE_DATA:
data = []
size = self.__get_data_size()
if self.verbose: print ("recieve chunk: prepare data size is {}".format(size))
if self.tcp:
if len(self.__data) >= (8 + size):
data_recv = self.__data[8:]
else:
data_recv = self.__data[8:] + self.__sock.recv(size + 32)
resp, broken_header = self.__recieve_tcp_data(data_recv, size)
data.append(resp)
if len(broken_header) < 16:
data_recv = broken_header + self.__sock.recv(16)
else:
data_recv = broken_header
if len(data_recv) < 16:
print ("trying to complete broken ACK %s /16" % len(data_recv))
if self.verbose: print (data_recv.encode())
data_recv += self.__sock.recv(16 - len(data_recv))
if not self.__test_tcp_top(data_recv):
if self.verbose: print ("invalid chunk tcp ACK OK")
return None
response = unpack(, data_recv[8:16])[0]
if response == const.CMD_ACK_OK:
if self.verbose: print ("chunk tcp ACK OK!")
return b.join(data)
if self.verbose: print("bad response %s" % data_recv)
if self.verbose: print (codecs.encode(data,))
return None
return resp
while True:
data_recv = self.__sock.recv(1024+8)
response = unpack(, data_recv[:8])[0]
if self.verbose: print ("
if response == const.CMD_DATA:
data.append(data_recv[8:])
size -= 1024
elif response == const.CMD_ACK_OK:
break
else:
if self.verbose: print ("broken!")
break
if self.verbose: print ("still needs %s" % size)
return b.join(data)
else:
if self.verbose: print ("invalid response %s" % self.__response)
return None |
def generateRecursive(self, component, all_components, builddir=None, modbuilddir=None, processed_components=None, application=None):
assert(self.configured)
if builddir is None:
builddir = self.buildroot
if modbuilddir is None:
modbuilddir = os.path.join(builddir, )
if processed_components is None:
processed_components = dict()
if not self.target:
yield % self.target
toplevel = not len(processed_components)
logger.debug( % (component, self.target))
recursive_deps = component.getDependenciesRecursive(
available_components = all_components,
target = self.target,
available_only = True,
test = True
)
dependencies = component.getDependencies(
all_components,
target = self.target,
available_only = True,
test = True
)
for name, dep in dependencies.items():
if not dep:
if dep.isTestDependency():
logger.debug( % (name, component))
else:
yield % (name, component)
processed_components[component.getName()] = component
new_dependencies = OrderedDict([(name,c) for name,c in dependencies.items() if c and not name in processed_components])
self.generate(builddir, modbuilddir, component, new_dependencies, dependencies, recursive_deps, application, toplevel)
logger.debug( % component)
for d in recursive_deps.values():
logger.debug( % d)
processed_components.update(new_dependencies)
for name, c in new_dependencies.items():
for error in self.generateRecursive(
c, all_components, os.path.join(modbuilddir, name), modbuilddir, processed_components, application=application
):
yield error | generate top-level CMakeLists for this component and its
dependencies: the CMakeLists are all generated in self.buildroot,
which MUST be out-of-source
!!! NOTE: experimenting with a slightly different way of doing
things here, this function is a generator that yields any errors
produced, so the correct use is:
for error in gen.generateRecursive(...):
print(error) | ### Input:
generate top-level CMakeLists for this component and its
dependencies: the CMakeLists are all generated in self.buildroot,
which MUST be out-of-source
!!! NOTE: experimenting with a slightly different way of doing
things here, this function is a generator that yields any errors
produced, so the correct use is:
for error in gen.generateRecursive(...):
print(error)
### Response:
def generateRecursive(self, component, all_components, builddir=None, modbuilddir=None, processed_components=None, application=None):
assert(self.configured)
if builddir is None:
builddir = self.buildroot
if modbuilddir is None:
modbuilddir = os.path.join(builddir, )
if processed_components is None:
processed_components = dict()
if not self.target:
yield % self.target
toplevel = not len(processed_components)
logger.debug( % (component, self.target))
recursive_deps = component.getDependenciesRecursive(
available_components = all_components,
target = self.target,
available_only = True,
test = True
)
dependencies = component.getDependencies(
all_components,
target = self.target,
available_only = True,
test = True
)
for name, dep in dependencies.items():
if not dep:
if dep.isTestDependency():
logger.debug( % (name, component))
else:
yield % (name, component)
processed_components[component.getName()] = component
new_dependencies = OrderedDict([(name,c) for name,c in dependencies.items() if c and not name in processed_components])
self.generate(builddir, modbuilddir, component, new_dependencies, dependencies, recursive_deps, application, toplevel)
logger.debug( % component)
for d in recursive_deps.values():
logger.debug( % d)
processed_components.update(new_dependencies)
for name, c in new_dependencies.items():
for error in self.generateRecursive(
c, all_components, os.path.join(modbuilddir, name), modbuilddir, processed_components, application=application
):
yield error |
def compiled_model(self):
return ALCHEMY_TEMPLATES.model.safe_substitute(class_name=self.class_name,
table_name=self.table_name,
column_definitions=self.compiled_columns,
init_function=self.compiled_init_func,
update_function=self.compiled_update_func,
hash_function=self.compiled_hash_func,
eq_function=self.compiled_eq_func,
neq_function=self.compiled_neq_func,
str_function=self.compiled_str_func,
unicode_function=self.compiled_unicode_func,
repr_function=self.compiled_repr_func,
types=", ".join(self.basic_types),
username=self.username,
foreign_keys=self.compiled_foreign_keys,
relationships=self.compiled_relationships,
named_imports=self.compiled_named_imports,
orm_imports=self.compiled_orm_imports,
get_proxy_cls_function=self.compiled_proxy_cls_func,
add_function=ALCHEMY_TEMPLATES.add_function.template,
delete_function=ALCHEMY_TEMPLATES.delete_function.template,
to_dict_function=ALCHEMY_TEMPLATES.to_dict_function.template,
to_proxy_function=ALCHEMY_TEMPLATES.to_proxy_function.template,
from_proxy_function=ALCHEMY_TEMPLATES.from_proxy_function.template) | Returns compile ORM class for the user supplied model | ### Input:
Returns compile ORM class for the user supplied model
### Response:
def compiled_model(self):
return ALCHEMY_TEMPLATES.model.safe_substitute(class_name=self.class_name,
table_name=self.table_name,
column_definitions=self.compiled_columns,
init_function=self.compiled_init_func,
update_function=self.compiled_update_func,
hash_function=self.compiled_hash_func,
eq_function=self.compiled_eq_func,
neq_function=self.compiled_neq_func,
str_function=self.compiled_str_func,
unicode_function=self.compiled_unicode_func,
repr_function=self.compiled_repr_func,
types=", ".join(self.basic_types),
username=self.username,
foreign_keys=self.compiled_foreign_keys,
relationships=self.compiled_relationships,
named_imports=self.compiled_named_imports,
orm_imports=self.compiled_orm_imports,
get_proxy_cls_function=self.compiled_proxy_cls_func,
add_function=ALCHEMY_TEMPLATES.add_function.template,
delete_function=ALCHEMY_TEMPLATES.delete_function.template,
to_dict_function=ALCHEMY_TEMPLATES.to_dict_function.template,
to_proxy_function=ALCHEMY_TEMPLATES.to_proxy_function.template,
from_proxy_function=ALCHEMY_TEMPLATES.from_proxy_function.template) |
def transform(self, offset, newseqid=None):
for feature in self:
feature._range.transform(offset)
if newseqid is not None:
feature.seqid = newseqid | Transform the feature's coordinates by the given offset. | ### Input:
Transform the feature's coordinates by the given offset.
### Response:
def transform(self, offset, newseqid=None):
for feature in self:
feature._range.transform(offset)
if newseqid is not None:
feature.seqid = newseqid |
def _raw_input_contains_national_prefix(raw_input, national_prefix, region_code):
nnn = normalize_digits_only(raw_input)
if nnn.startswith(national_prefix):
try:
return is_valid_number(parse(nnn[len(national_prefix):], region_code))
except NumberParseException:
return False
return False | Check if raw_input, which is assumed to be in the national format, has a
national prefix. The national prefix is assumed to be in digits-only
form. | ### Input:
Check if raw_input, which is assumed to be in the national format, has a
national prefix. The national prefix is assumed to be in digits-only
form.
### Response:
def _raw_input_contains_national_prefix(raw_input, national_prefix, region_code):
nnn = normalize_digits_only(raw_input)
if nnn.startswith(national_prefix):
try:
return is_valid_number(parse(nnn[len(national_prefix):], region_code))
except NumberParseException:
return False
return False |
def makedirs(path):
if not os.path.isdir(path):
os.makedirs(path)
return path | Create directories if they do not exist, otherwise do nothing.
Return path for convenience | ### Input:
Create directories if they do not exist, otherwise do nothing.
Return path for convenience
### Response:
def makedirs(path):
if not os.path.isdir(path):
os.makedirs(path)
return path |
def propertyContainer(self, ulBuffer):
fn = self.function_table.propertyContainer
result = fn(ulBuffer)
return result | retrieves the property container of an buffer. | ### Input:
retrieves the property container of an buffer.
### Response:
def propertyContainer(self, ulBuffer):
fn = self.function_table.propertyContainer
result = fn(ulBuffer)
return result |
def max(self, axis=None, skipna=True):
nv.validate_minmax_axis(axis)
return nanops.nanmax(self._values, skipna=skipna) | Return the maximum value of the Index.
Parameters
----------
axis : int, optional
For compatibility with NumPy. Only 0 or None are allowed.
skipna : bool, default True
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2) | ### Input:
Return the maximum value of the Index.
Parameters
----------
axis : int, optional
For compatibility with NumPy. Only 0 or None are allowed.
skipna : bool, default True
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
### Response:
def max(self, axis=None, skipna=True):
nv.validate_minmax_axis(axis)
return nanops.nanmax(self._values, skipna=skipna) |
def set_body_s(self, stream):
if self.argstreams[2].state == StreamState.init:
self.argstreams[2] = stream
else:
raise TChannelError(
"Unable to change the body since the streaming has started") | Set customized body stream.
Note: the body stream can only be changed before the stream
is consumed.
:param stream: InMemStream/PipeStream for body
:except TChannelError:
Raise TChannelError if the stream is being sent when you try
to change the stream. | ### Input:
Set customized body stream.
Note: the body stream can only be changed before the stream
is consumed.
:param stream: InMemStream/PipeStream for body
:except TChannelError:
Raise TChannelError if the stream is being sent when you try
to change the stream.
### Response:
def set_body_s(self, stream):
if self.argstreams[2].state == StreamState.init:
self.argstreams[2] = stream
else:
raise TChannelError(
"Unable to change the body since the streaming has started") |
def render(self, rect, data):
x = rect.x
y = rect.y
w = size.x
h = size.y
extra_width = rect.w - w
extra_height = rect.h - h
if self.horizontal_align == AlignLM.ALIGN_CENTER:
x += extra_width * 0.5
elif self.horizontal_align == AlignLM.ALIGN_RIGHT:
x += extra_width
elif self.horizontal_align == AlignLM.GROW_X:
w = rect.w
if self.vertical_align == AlignLM.ALIGN_MIDDLE:
y += extra_height * 0.5
elif self.vertical_align == AlignLM.ALIGN_TOP:
y += extra_height
elif self.vertical_align == AlignLM.GROW_Y:
h = rect.h
self.element.render(datatypes.Rectangle(x, y, w, h), data) | Draws the managed element in the correct alignment. | ### Input:
Draws the managed element in the correct alignment.
### Response:
def render(self, rect, data):
x = rect.x
y = rect.y
w = size.x
h = size.y
extra_width = rect.w - w
extra_height = rect.h - h
if self.horizontal_align == AlignLM.ALIGN_CENTER:
x += extra_width * 0.5
elif self.horizontal_align == AlignLM.ALIGN_RIGHT:
x += extra_width
elif self.horizontal_align == AlignLM.GROW_X:
w = rect.w
if self.vertical_align == AlignLM.ALIGN_MIDDLE:
y += extra_height * 0.5
elif self.vertical_align == AlignLM.ALIGN_TOP:
y += extra_height
elif self.vertical_align == AlignLM.GROW_Y:
h = rect.h
self.element.render(datatypes.Rectangle(x, y, w, h), data) |
def convert_supplementary_material_elements(self):
for supplementary in self.main.getroot().findall():
suppl_div = etree.Element()
if in supplementary.attrib:
suppl_div.attrib[] = supplementary.attrib[]
insert_before(supplementary, suppl_div)
label = supplementary.find()
caption = supplementary.find()
ns_xlink_href = ns_format(supplementary, )
xlink_href = supplementary.attrib[ns_xlink_href]
resource_url = self.fetch_single_representation(xlink_href)
if label is not None:
label.tag =
label.attrib[] = resource_url
append_new_text(label, , join_str=)
suppl_div.append(label)
if caption is not None:
title = caption.find()
paragraphs = caption.findall()
if title is not None:
title.tag =
suppl_div.append(title)
for paragraph in paragraphs:
suppl_div.append(paragraph)
suppl_div.append(paragraph)
remove(supplementary) | Supplementary material are not, nor are they generally expected to be,
packaged into the epub file. Though this is a technical possibility,
and certain epub reading systems (such as those run on a PC) might be
reasonably capable of the external handling of diverse file formats
I presume that supplementary material will remain separate from the
document. So special cases aside, external links to supplementary
material will be employed; this will require internet connection for
access.
As for content in <supplementary-material>, they appear to strictly
contain 1 <label> element, followed by a <caption><title><p></caption>
substructure. | ### Input:
Supplementary material are not, nor are they generally expected to be,
packaged into the epub file. Though this is a technical possibility,
and certain epub reading systems (such as those run on a PC) might be
reasonably capable of the external handling of diverse file formats
I presume that supplementary material will remain separate from the
document. So special cases aside, external links to supplementary
material will be employed; this will require internet connection for
access.
As for content in <supplementary-material>, they appear to strictly
contain 1 <label> element, followed by a <caption><title><p></caption>
substructure.
### Response:
def convert_supplementary_material_elements(self):
for supplementary in self.main.getroot().findall():
suppl_div = etree.Element()
if in supplementary.attrib:
suppl_div.attrib[] = supplementary.attrib[]
insert_before(supplementary, suppl_div)
label = supplementary.find()
caption = supplementary.find()
ns_xlink_href = ns_format(supplementary, )
xlink_href = supplementary.attrib[ns_xlink_href]
resource_url = self.fetch_single_representation(xlink_href)
if label is not None:
label.tag =
label.attrib[] = resource_url
append_new_text(label, , join_str=)
suppl_div.append(label)
if caption is not None:
title = caption.find()
paragraphs = caption.findall()
if title is not None:
title.tag =
suppl_div.append(title)
for paragraph in paragraphs:
suppl_div.append(paragraph)
suppl_div.append(paragraph)
remove(supplementary) |
def retrieveVals(self):
opcinfo = OPCinfo(self._host, self._port, self._user, self._password,
self._monpath, self._ssl)
stats = opcinfo.getAllStats()
if self.hasGraph() and stats:
mem = stats[]
keys = (, , )
map(lambda k:self.setGraphVal(,k,mem[k]), keys)
if self.hasGraph() and stats:
st = stats[]
self.setGraphVal(, ,
st[])
self.setGraphVal(, ,
st[])
if self.hasGraph() and stats:
st = stats[]
self.setGraphVal(, ,
st[])
if self.hasGraph() and stats:
st = stats[]
wasted = st[] - st[]
free = st[] - st[]
self.setGraphVal(, , st[])
self.setGraphVal(, , wasted)
self.setGraphVal(, , free) | Retrieve values for graphs. | ### Input:
Retrieve values for graphs.
### Response:
def retrieveVals(self):
opcinfo = OPCinfo(self._host, self._port, self._user, self._password,
self._monpath, self._ssl)
stats = opcinfo.getAllStats()
if self.hasGraph() and stats:
mem = stats[]
keys = (, , )
map(lambda k:self.setGraphVal(,k,mem[k]), keys)
if self.hasGraph() and stats:
st = stats[]
self.setGraphVal(, ,
st[])
self.setGraphVal(, ,
st[])
if self.hasGraph() and stats:
st = stats[]
self.setGraphVal(, ,
st[])
if self.hasGraph() and stats:
st = stats[]
wasted = st[] - st[]
free = st[] - st[]
self.setGraphVal(, , st[])
self.setGraphVal(, , wasted)
self.setGraphVal(, , free) |
def setup_blueprint(self):
self.blueprint.add_url_rule("/", "status", self.status)
self.blueprint.add_url_rule("/healthy", "health", self.healthy)
self.blueprint.add_url_rule("/ready", "ready", self.ready)
self.blueprint.add_url_rule("/threads", "threads", self.threads_bt) | Initialize the blueprint. | ### Input:
Initialize the blueprint.
### Response:
def setup_blueprint(self):
self.blueprint.add_url_rule("/", "status", self.status)
self.blueprint.add_url_rule("/healthy", "health", self.healthy)
self.blueprint.add_url_rule("/ready", "ready", self.ready)
self.blueprint.add_url_rule("/threads", "threads", self.threads_bt) |
def list_all_order_line_items(cls, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._list_all_order_line_items_with_http_info(**kwargs)
else:
(data) = cls._list_all_order_line_items_with_http_info(**kwargs)
return data | List OrderLineItems
Return a list of OrderLineItems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_order_line_items(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[OrderLineItem]
If the method is called asynchronously,
returns the request thread. | ### Input:
List OrderLineItems
Return a list of OrderLineItems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_order_line_items(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[OrderLineItem]
If the method is called asynchronously,
returns the request thread.
### Response:
def list_all_order_line_items(cls, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._list_all_order_line_items_with_http_info(**kwargs)
else:
(data) = cls._list_all_order_line_items_with_http_info(**kwargs)
return data |
def transform(geom, to_sref):
if not geom_sref.IsSame(to_sref):
geom = geom.Clone()
geom.TransformTo(to_sref)
return geom | Returns a transformed Geometry.
Arguments:
geom -- any coercible Geometry value or Envelope
to_sref -- SpatialReference or EPSG ID as int | ### Input:
Returns a transformed Geometry.
Arguments:
geom -- any coercible Geometry value or Envelope
to_sref -- SpatialReference or EPSG ID as int
### Response:
def transform(geom, to_sref):
if not geom_sref.IsSame(to_sref):
geom = geom.Clone()
geom.TransformTo(to_sref)
return geom |
def limit_step(self, step):
if self.qmax is None:
return step
else:
return np.clip(step, -self.qmax, self.qmax) | Clip the a step within the maximum allowed range | ### Input:
Clip the a step within the maximum allowed range
### Response:
def limit_step(self, step):
if self.qmax is None:
return step
else:
return np.clip(step, -self.qmax, self.qmax) |
def init_word_db(cls, name, text):
text = text.replace(, ).replace(, )
words = [w.strip() for w in text.split() if w.strip()]
assert len(words) > 2, \
freqs = {}
for i in range(len(words) - 2):
w1 = words[i]
w2 = words[i + 1]
w3 = words[i + 2]
key = (w1, w2)
if key in freqs:
freqs[key].append(w3)
else:
freqs[key] = [w3]
cls._dbs[name] = {
: freqs,
: words,
: len(words) - 2
} | Initialize a database of words for the maker with the given name | ### Input:
Initialize a database of words for the maker with the given name
### Response:
def init_word_db(cls, name, text):
text = text.replace(, ).replace(, )
words = [w.strip() for w in text.split() if w.strip()]
assert len(words) > 2, \
freqs = {}
for i in range(len(words) - 2):
w1 = words[i]
w2 = words[i + 1]
w3 = words[i + 2]
key = (w1, w2)
if key in freqs:
freqs[key].append(w3)
else:
freqs[key] = [w3]
cls._dbs[name] = {
: freqs,
: words,
: len(words) - 2
} |
def get_or(self, cache_key, callback, *args, **kwargs):
cached = self.cache.get(cache_key)
if cached is not None:
return cached
else:
try:
output = callback(*args, **kwargs)
except MyCapytain.errors.UnknownCollection as E:
raise UnknownCollection(str(E))
except Exception as E:
raise E
self.cache.set(cache_key, output, self.TIMEOUT)
return output | Get or set the cache using callback and arguments
:param cache_key: Cache key for given resource
:param callback: Callback if object does not exist
:param args: Ordered Argument for the callback
:param kwargs: Keyword argument for the callback
:return: Output of the callback | ### Input:
Get or set the cache using callback and arguments
:param cache_key: Cache key for given resource
:param callback: Callback if object does not exist
:param args: Ordered Argument for the callback
:param kwargs: Keyword argument for the callback
:return: Output of the callback
### Response:
def get_or(self, cache_key, callback, *args, **kwargs):
cached = self.cache.get(cache_key)
if cached is not None:
return cached
else:
try:
output = callback(*args, **kwargs)
except MyCapytain.errors.UnknownCollection as E:
raise UnknownCollection(str(E))
except Exception as E:
raise E
self.cache.set(cache_key, output, self.TIMEOUT)
return output |
def moys_dict(self):
moy_dict = {}
for val, dt in zip(self.values, self.datetimes):
moy_dict[dt.moy] = val
return moy_dict | Return a dictionary of this collection's values where the keys are the moys.
This is useful for aligning the values with another list of datetimes. | ### Input:
Return a dictionary of this collection's values where the keys are the moys.
This is useful for aligning the values with another list of datetimes.
### Response:
def moys_dict(self):
moy_dict = {}
for val, dt in zip(self.values, self.datetimes):
moy_dict[dt.moy] = val
return moy_dict |
def data(offset, bytes):
assert 0 <= offset < 65536
assert 0 < len(bytes) < 256
b = [len(bytes), (offset>>8)&0x0FF, offset&0x0FF, 0x00] + bytes
return Record._from_bytes(b) | Return Data record. This constructs the full record, including
the length information, the record type (0x00), the
checksum, and the offset.
@param offset load offset of first byte.
@param bytes list of byte values to pack into record.
@return String representation of one HEX record | ### Input:
Return Data record. This constructs the full record, including
the length information, the record type (0x00), the
checksum, and the offset.
@param offset load offset of first byte.
@param bytes list of byte values to pack into record.
@return String representation of one HEX record
### Response:
def data(offset, bytes):
assert 0 <= offset < 65536
assert 0 < len(bytes) < 256
b = [len(bytes), (offset>>8)&0x0FF, offset&0x0FF, 0x00] + bytes
return Record._from_bytes(b) |
def _chooseSmartIndenter(self, syntax):
if syntax.indenter is not None:
try:
return _getSmartIndenter(syntax.indenter, self._qpart, self)
except KeyError:
logger.error("Indenter is not finished yet. But you can do it!" % syntax.indenter)
try:
return _getSmartIndenter(syntax.name, self._qpart, self)
except KeyError:
pass
return _getSmartIndenter(, self._qpart, self) | Get indenter for syntax | ### Input:
Get indenter for syntax
### Response:
def _chooseSmartIndenter(self, syntax):
if syntax.indenter is not None:
try:
return _getSmartIndenter(syntax.indenter, self._qpart, self)
except KeyError:
logger.error("Indenter is not finished yet. But you can do it!" % syntax.indenter)
try:
return _getSmartIndenter(syntax.name, self._qpart, self)
except KeyError:
pass
return _getSmartIndenter(, self._qpart, self) |
def launch_app(self, app_id, force_launch=False, callback_function=False):
if not force_launch and self.app_id is None:
self.update_status(lambda response:
self._send_launch_message(app_id, force_launch,
callback_function))
else:
self._send_launch_message(app_id, force_launch, callback_function) | Launches an app on the Chromecast.
Will only launch if it is not currently running unless
force_launch=True. | ### Input:
Launches an app on the Chromecast.
Will only launch if it is not currently running unless
force_launch=True.
### Response:
def launch_app(self, app_id, force_launch=False, callback_function=False):
if not force_launch and self.app_id is None:
self.update_status(lambda response:
self._send_launch_message(app_id, force_launch,
callback_function))
else:
self._send_launch_message(app_id, force_launch, callback_function) |
def addSRNLayers(self, inc, hidc, outc):
self.addThreeLayers(inc, hidc, outc) | Wraps SRN.addThreeLayers() for compatibility. | ### Input:
Wraps SRN.addThreeLayers() for compatibility.
### Response:
def addSRNLayers(self, inc, hidc, outc):
self.addThreeLayers(inc, hidc, outc) |
def change_password(self, username, newpassword, raise_on_error=False):
response = self._put(self.rest_url + "/user/password",
data=json.dumps({"value": newpassword}),
params={"username": username})
if response.ok:
return True
if raise_on_error:
raise RuntimeError(response.json()[])
return False | Change new password for a user
Args:
username: The account username.
newpassword: The account new password.
raise_on_error: optional (default: False)
Returns:
True: Succeeded
False: If unsuccessful | ### Input:
Change new password for a user
Args:
username: The account username.
newpassword: The account new password.
raise_on_error: optional (default: False)
Returns:
True: Succeeded
False: If unsuccessful
### Response:
def change_password(self, username, newpassword, raise_on_error=False):
response = self._put(self.rest_url + "/user/password",
data=json.dumps({"value": newpassword}),
params={"username": username})
if response.ok:
return True
if raise_on_error:
raise RuntimeError(response.json()[])
return False |
def _clean(self, n):
import sqlite3
num_delete = int(self.num_entries / 100.0 * n)
logger.debug("removing %i entries from db" % num_delete)
lru_dbs = self._database.execute("select hash, lru_db from traj_info").fetchall()
lru_dbs.sort(key=itemgetter(1))
hashs_by_db = {}
age_by_hash = []
for k, v in itertools.groupby(lru_dbs, key=itemgetter(1)):
hashs_by_db[k] = list(x[0] for x in v)
len_by_db = {os.path.basename(db): len(hashs_by_db[db]) for db in hashs_by_db.keys()}
logger.debug("distribution of lru: %s", str(len_by_db))
for db in hashs_by_db.keys():
with sqlite3.connect(db, timeout=self.lru_timeout) as conn:
rows = conn.execute("select hash, last_read from usage").fetchall()
for r in rows:
age_by_hash.append((r[0], float(r[1]), db))
age_by_hash.sort(key=itemgetter(1))
if len(age_by_hash)>=2:
assert[age_by_hash[-1] > age_by_hash[-2]]
ids = map(itemgetter(0), age_by_hash[:num_delete])
ids = tuple(map(str, ids))
sql_compatible_ids = SqliteDB._format_tuple_for_sql(ids)
with self._database as c:
c.execute("DELETE FROM traj_info WHERE hash in (%s)" % sql_compatible_ids)
age_by_hash.sort(key=itemgetter(2))
for db, values in itertools.groupby(age_by_hash, key=itemgetter(2)):
values = tuple(v[0] for v in values)
with sqlite3.connect(db, timeout=self.lru_timeout) as conn:
stmnt = "DELETE FROM usage WHERE hash IN (%s)" \
% SqliteDB._format_tuple_for_sql(values)
curr = conn.execute(stmnt)
assert curr.rowcount == len(values), curr.rowcount | obtain n% oldest entries by looking into the usage databases. Then these entries
are deleted first from the traj_info db and afterwards from the associated LRU dbs.
:param n: delete n% entries in traj_info db [and associated LRU (usage) dbs]. | ### Input:
obtain n% oldest entries by looking into the usage databases. Then these entries
are deleted first from the traj_info db and afterwards from the associated LRU dbs.
:param n: delete n% entries in traj_info db [and associated LRU (usage) dbs].
### Response:
def _clean(self, n):
import sqlite3
num_delete = int(self.num_entries / 100.0 * n)
logger.debug("removing %i entries from db" % num_delete)
lru_dbs = self._database.execute("select hash, lru_db from traj_info").fetchall()
lru_dbs.sort(key=itemgetter(1))
hashs_by_db = {}
age_by_hash = []
for k, v in itertools.groupby(lru_dbs, key=itemgetter(1)):
hashs_by_db[k] = list(x[0] for x in v)
len_by_db = {os.path.basename(db): len(hashs_by_db[db]) for db in hashs_by_db.keys()}
logger.debug("distribution of lru: %s", str(len_by_db))
for db in hashs_by_db.keys():
with sqlite3.connect(db, timeout=self.lru_timeout) as conn:
rows = conn.execute("select hash, last_read from usage").fetchall()
for r in rows:
age_by_hash.append((r[0], float(r[1]), db))
age_by_hash.sort(key=itemgetter(1))
if len(age_by_hash)>=2:
assert[age_by_hash[-1] > age_by_hash[-2]]
ids = map(itemgetter(0), age_by_hash[:num_delete])
ids = tuple(map(str, ids))
sql_compatible_ids = SqliteDB._format_tuple_for_sql(ids)
with self._database as c:
c.execute("DELETE FROM traj_info WHERE hash in (%s)" % sql_compatible_ids)
age_by_hash.sort(key=itemgetter(2))
for db, values in itertools.groupby(age_by_hash, key=itemgetter(2)):
values = tuple(v[0] for v in values)
with sqlite3.connect(db, timeout=self.lru_timeout) as conn:
stmnt = "DELETE FROM usage WHERE hash IN (%s)" \
% SqliteDB._format_tuple_for_sql(values)
curr = conn.execute(stmnt)
assert curr.rowcount == len(values), curr.rowcount |
def _get_ids_from_name_public(self, name):
results = self.list_public_images(name=name)
return [result[] for result in results] | Get public images which match the given name. | ### Input:
Get public images which match the given name.
### Response:
def _get_ids_from_name_public(self, name):
results = self.list_public_images(name=name)
return [result[] for result in results] |
def mask(cls, dt, **options):
return dt.replace(hour=0, minute=0, second=0, microsecond=0) | Return a datetime with the same value as ``dt``, to a
resolution of days. | ### Input:
Return a datetime with the same value as ``dt``, to a
resolution of days.
### Response:
def mask(cls, dt, **options):
return dt.replace(hour=0, minute=0, second=0, microsecond=0) |
def model_train(self):
assert self.runner is not None, (
)
hparams = self.hparams
batch_size = hparams.batch_size
nb_epochs = hparams.nb_epochs
train_dir = hparams.save_dir
filename =
X_train = self.X_train
Y_train = self.Y_train
sess = self.sess
with sess.as_default():
X_batch = X_train[:batch_size]
Y_batch = Y_train[:batch_size]
self._init_tf(X_batch, Y_batch)
for epoch in six.moves.xrange(nb_epochs):
logging.info("Epoch " + str(epoch))
nb_batches = int(math.ceil(float(len(X_train)) / batch_size))
assert nb_batches * batch_size >= len(X_train)
index_shuf = list(range(len(X_train)))
self.rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
start, end = batch_indices(
batch, len(X_train), batch_size)
self._update_learning_params()
X_batch = X_train[index_shuf[start:end]]
Y_batch = Y_train[index_shuf[start:end]]
self._run({: X_batch, : Y_batch})
self._sync_params()
while not self.runner.is_finished():
self._run()
self._sync_params(forced=True)
assert end >= len(X_train), (
)
cur = time.time()
logging.info("\tEpoch took " + str(cur - prev) + " seconds")
prev = cur
self.eval()
cond = ((epoch+1) % hparams.save_steps == 0
or epoch == nb_epochs)
if hparams.save and cond:
save_path = os.path.join(train_dir, filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
logging.info("Model saved at: " + str(save_path))
logging.info("Completed model training.") | Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param hparams.save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy). | ### Input:
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param hparams.save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
### Response:
def model_train(self):
assert self.runner is not None, (
)
hparams = self.hparams
batch_size = hparams.batch_size
nb_epochs = hparams.nb_epochs
train_dir = hparams.save_dir
filename =
X_train = self.X_train
Y_train = self.Y_train
sess = self.sess
with sess.as_default():
X_batch = X_train[:batch_size]
Y_batch = Y_train[:batch_size]
self._init_tf(X_batch, Y_batch)
for epoch in six.moves.xrange(nb_epochs):
logging.info("Epoch " + str(epoch))
nb_batches = int(math.ceil(float(len(X_train)) / batch_size))
assert nb_batches * batch_size >= len(X_train)
index_shuf = list(range(len(X_train)))
self.rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
start, end = batch_indices(
batch, len(X_train), batch_size)
self._update_learning_params()
X_batch = X_train[index_shuf[start:end]]
Y_batch = Y_train[index_shuf[start:end]]
self._run({: X_batch, : Y_batch})
self._sync_params()
while not self.runner.is_finished():
self._run()
self._sync_params(forced=True)
assert end >= len(X_train), (
)
cur = time.time()
logging.info("\tEpoch took " + str(cur - prev) + " seconds")
prev = cur
self.eval()
cond = ((epoch+1) % hparams.save_steps == 0
or epoch == nb_epochs)
if hparams.save and cond:
save_path = os.path.join(train_dir, filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
logging.info("Model saved at: " + str(save_path))
logging.info("Completed model training.") |
def _get_function_ptr(self, name):
func = _make_function_ptr_instance
self._function_ptrs.setdefault(name, func(self, name))
return self._function_ptrs[name] | Get or create a function pointer of the given name. | ### Input:
Get or create a function pointer of the given name.
### Response:
def _get_function_ptr(self, name):
func = _make_function_ptr_instance
self._function_ptrs.setdefault(name, func(self, name))
return self._function_ptrs[name] |
def standardize_genes(self, inplace=False):
matrix = self.center_genes(inplace=inplace)
matrix.X[:,:] = matrix.X / \
np.tile(np.std(matrix.X, axis=1, ddof=1), (matrix.n, 1)).T
return matrix | Standardize the expression of each gene (row). | ### Input:
Standardize the expression of each gene (row).
### Response:
def standardize_genes(self, inplace=False):
matrix = self.center_genes(inplace=inplace)
matrix.X[:,:] = matrix.X / \
np.tile(np.std(matrix.X, axis=1, ddof=1), (matrix.n, 1)).T
return matrix |
def decrypt(text, key=None):
if key is None:
key = ENCRYPT_KEY
bits = len(key)
text = base64.b64decode(text)
iv = text[:16]
cipher = AES.new(key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(text[16:])) | Decrypts the inputted text using the inputted key.
:param text | <str>
key | <str>
:return <str> | ### Input:
Decrypts the inputted text using the inputted key.
:param text | <str>
key | <str>
:return <str>
### Response:
def decrypt(text, key=None):
if key is None:
key = ENCRYPT_KEY
bits = len(key)
text = base64.b64decode(text)
iv = text[:16]
cipher = AES.new(key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(text[16:])) |
def subscribe(self, topics):
return (yield from self._handler.mqtt_subscribe(topics, self.session.next_packet_id)) | Subscribe to some topics.
Send a MQTT `SUBSCRIBE <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718063>`_ message and wait for broker acknowledgment.
This method is a *coroutine*.
:param topics: array of topics pattern to subscribe with associated QoS.
:return: `SUBACK <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718068>`_ message return code.
Example of ``topics`` argument expected structure:
::
[
('$SYS/broker/uptime', QOS_1),
('$SYS/broker/load/#', QOS_2),
] | ### Input:
Subscribe to some topics.
Send a MQTT `SUBSCRIBE <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718063>`_ message and wait for broker acknowledgment.
This method is a *coroutine*.
:param topics: array of topics pattern to subscribe with associated QoS.
:return: `SUBACK <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718068>`_ message return code.
Example of ``topics`` argument expected structure:
::
[
('$SYS/broker/uptime', QOS_1),
('$SYS/broker/load/#', QOS_2),
]
### Response:
def subscribe(self, topics):
return (yield from self._handler.mqtt_subscribe(topics, self.session.next_packet_id)) |
def getRawReportDescriptor(self):
descriptor = _hidraw_report_descriptor()
size = ctypes.c_uint()
self._ioctl(_HIDIOCGRDESCSIZE, size, True)
descriptor.size = size
self._ioctl(_HIDIOCGRDESC, descriptor, True)
return .join(chr(x) for x in descriptor.value[:size.value]) | Return a binary string containing the raw HID report descriptor. | ### Input:
Return a binary string containing the raw HID report descriptor.
### Response:
def getRawReportDescriptor(self):
descriptor = _hidraw_report_descriptor()
size = ctypes.c_uint()
self._ioctl(_HIDIOCGRDESCSIZE, size, True)
descriptor.size = size
self._ioctl(_HIDIOCGRDESC, descriptor, True)
return .join(chr(x) for x in descriptor.value[:size.value]) |
def plot_clicked(self, mouse_event):
if type(self.peak_vals) is list:
self.peak_vals.append([mouse_event.xdata, mouse_event.ydata])
axes = self.matplotlibwidget.axes
self.matplotlibwidget.draw() | When a click is registered on the plot, appends the click location to self.peak_vals and plots a red dot in the
click location
:param mouse_event: a mouse event object for the click | ### Input:
When a click is registered on the plot, appends the click location to self.peak_vals and plots a red dot in the
click location
:param mouse_event: a mouse event object for the click
### Response:
def plot_clicked(self, mouse_event):
if type(self.peak_vals) is list:
self.peak_vals.append([mouse_event.xdata, mouse_event.ydata])
axes = self.matplotlibwidget.axes
self.matplotlibwidget.draw() |
def install():
from .core.import_hooks import ExtensionImporter
importer = ExtensionImporter()
sys.meta_path.append(importer) | Install the import hook to load extensions from the app Lib folder.
Like imports but leaves it in the meta_path, thus it is slower. | ### Input:
Install the import hook to load extensions from the app Lib folder.
Like imports but leaves it in the meta_path, thus it is slower.
### Response:
def install():
from .core.import_hooks import ExtensionImporter
importer = ExtensionImporter()
sys.meta_path.append(importer) |
def load_val_from_git_cfg(cfg_key_suffix):
cfg_key = f
cmd = "git", "config", "--local", "--get", cfg_key
try:
return (
subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
.strip()
.decode("utf-8")
)
except subprocess.CalledProcessError:
return None | Retrieve one option from Git config. | ### Input:
Retrieve one option from Git config.
### Response:
def load_val_from_git_cfg(cfg_key_suffix):
cfg_key = f
cmd = "git", "config", "--local", "--get", cfg_key
try:
return (
subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
.strip()
.decode("utf-8")
)
except subprocess.CalledProcessError:
return None |
def replace_braintree_gateway_by_id(cls, braintree_gateway_id, braintree_gateway, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._replace_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs)
else:
(data) = cls._replace_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs)
return data | Replace BraintreeGateway
Replace all attributes of BraintreeGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_braintree_gateway_by_id(braintree_gateway_id, braintree_gateway, async=True)
>>> result = thread.get()
:param async bool
:param str braintree_gateway_id: ID of braintreeGateway to replace (required)
:param BraintreeGateway braintree_gateway: Attributes of braintreeGateway to replace (required)
:return: BraintreeGateway
If the method is called asynchronously,
returns the request thread. | ### Input:
Replace BraintreeGateway
Replace all attributes of BraintreeGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_braintree_gateway_by_id(braintree_gateway_id, braintree_gateway, async=True)
>>> result = thread.get()
:param async bool
:param str braintree_gateway_id: ID of braintreeGateway to replace (required)
:param BraintreeGateway braintree_gateway: Attributes of braintreeGateway to replace (required)
:return: BraintreeGateway
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_braintree_gateway_by_id(cls, braintree_gateway_id, braintree_gateway, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._replace_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs)
else:
(data) = cls._replace_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs)
return data |
def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None):
results = []
while True:
decomp = LZMADecompressor(format, memlimit, filters)
try:
res = decomp.decompress(data)
except LZMAError:
if results:
break
else:
raise
results.append(res)
if not decomp.eof:
raise LZMAError("Compressed data ended before the "
"end-of-stream marker was reached")
data = decomp.unused_data
if not data:
break
return b"".join(results) | Decompress a block of data.
Refer to LZMADecompressor's docstring for a description of the
optional arguments *format*, *check* and *filters*.
For incremental decompression, use a LZMADecompressor object instead. | ### Input:
Decompress a block of data.
Refer to LZMADecompressor's docstring for a description of the
optional arguments *format*, *check* and *filters*.
For incremental decompression, use a LZMADecompressor object instead.
### Response:
def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None):
results = []
while True:
decomp = LZMADecompressor(format, memlimit, filters)
try:
res = decomp.decompress(data)
except LZMAError:
if results:
break
else:
raise
results.append(res)
if not decomp.eof:
raise LZMAError("Compressed data ended before the "
"end-of-stream marker was reached")
data = decomp.unused_data
if not data:
break
return b"".join(results) |
def classify(self, dataset, verbose=True, batch_size=64):
prob_vector = self.predict(dataset, output_type=,
verbose=verbose, batch_size=batch_size)
id_to_label = self._id_to_class_label
return _tc.SFrame({
: prob_vector.apply(lambda v: id_to_label[_np.argmax(v)]),
: prob_vector.apply(_np.max)
}) | Return the classification for each examples in the ``dataset``.
The output SFrame contains predicted class labels and its probability.
Parameters
----------
dataset : SFrame | SArray | dict
The audio data to be classified.
If dataset is an SFrame, it must have a column with the same name as
the feature used for model training, but does not require a target
column. Additional columns are ignored.
verbose : bool, optional
If True, prints progress updates and model details.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve performance.
Returns
-------
out : SFrame
An SFrame with model predictions, both class labels and probabilities.
See Also
----------
create, evaluate, predict
Examples
----------
>>> classes = model.classify(data) | ### Input:
Return the classification for each examples in the ``dataset``.
The output SFrame contains predicted class labels and its probability.
Parameters
----------
dataset : SFrame | SArray | dict
The audio data to be classified.
If dataset is an SFrame, it must have a column with the same name as
the feature used for model training, but does not require a target
column. Additional columns are ignored.
verbose : bool, optional
If True, prints progress updates and model details.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve performance.
Returns
-------
out : SFrame
An SFrame with model predictions, both class labels and probabilities.
See Also
----------
create, evaluate, predict
Examples
----------
>>> classes = model.classify(data)
### Response:
def classify(self, dataset, verbose=True, batch_size=64):
prob_vector = self.predict(dataset, output_type=,
verbose=verbose, batch_size=batch_size)
id_to_label = self._id_to_class_label
return _tc.SFrame({
: prob_vector.apply(lambda v: id_to_label[_np.argmax(v)]),
: prob_vector.apply(_np.max)
}) |
def drop_columns(cr, column_spec):
for (table, column) in column_spec:
logger.info("table %s: drop column %s",
table, column)
if column_exists(cr, table, column):
cr.execute( %
(table, column))
else:
logger.warn("table %s: column %s did not exist",
table, column) | Drop columns but perform an additional check if a column exists.
This covers the case of function fields that may or may not be stored.
Consider that this may not be obvious: an additional module can govern
a function fields' store properties.
:param column_spec: a list of (table, column) tuples | ### Input:
Drop columns but perform an additional check if a column exists.
This covers the case of function fields that may or may not be stored.
Consider that this may not be obvious: an additional module can govern
a function fields' store properties.
:param column_spec: a list of (table, column) tuples
### Response:
def drop_columns(cr, column_spec):
for (table, column) in column_spec:
logger.info("table %s: drop column %s",
table, column)
if column_exists(cr, table, column):
cr.execute( %
(table, column))
else:
logger.warn("table %s: column %s did not exist",
table, column) |
def eci2ecef (x, y, z, gmst=None):
if gmst is None:
gmst = dmc.toGMST()
X = (x * math.cos(gmst)) + (y * math.sin(gmst))
Y = (x * (-math.sin(gmst))) + (y * math.cos(gmst))
Z = z
return X, Y, Z | Converts the given ECI coordinates to ECEF at the given Greenwich
Mean Sidereal Time (GMST) (defaults to now).
This code was adapted from
`shashwatak/satellite-js <https://github.com/shashwatak/satellite-js/blob/master/src/coordinate-transforms.js>`_
and http://ccar.colorado.edu/ASEN5070/handouts/coordsys.doc | ### Input:
Converts the given ECI coordinates to ECEF at the given Greenwich
Mean Sidereal Time (GMST) (defaults to now).
This code was adapted from
`shashwatak/satellite-js <https://github.com/shashwatak/satellite-js/blob/master/src/coordinate-transforms.js>`_
and http://ccar.colorado.edu/ASEN5070/handouts/coordsys.doc
### Response:
def eci2ecef (x, y, z, gmst=None):
if gmst is None:
gmst = dmc.toGMST()
X = (x * math.cos(gmst)) + (y * math.sin(gmst))
Y = (x * (-math.sin(gmst))) + (y * math.cos(gmst))
Z = z
return X, Y, Z |
def _route(self, attr, args, kwargs, **fkwargs):
return self.cluster.hosts.keys() | Perform routing and return db_nums | ### Input:
Perform routing and return db_nums
### Response:
def _route(self, attr, args, kwargs, **fkwargs):
return self.cluster.hosts.keys() |
def output_compressed_dinf(dinfflowang, compdinffile, weightfile):
dinf_r = RasterUtilClass.read_raster(dinfflowang)
data = dinf_r.data
xsize = dinf_r.nCols
ysize = dinf_r.nRows
nodata_value = dinf_r.noDataValue
cal_dir_code = frompyfunc(DinfUtil.compress_dinf, 2, 3)
updated_angle, dir_code, weight = cal_dir_code(data, nodata_value)
RasterUtilClass.write_gtiff_file(dinfflowang, ysize, xsize, updated_angle,
dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32)
RasterUtilClass.write_gtiff_file(compdinffile, ysize, xsize, dir_code,
dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Int16)
RasterUtilClass.write_gtiff_file(weightfile, ysize, xsize, weight,
dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32) | Output compressed Dinf flow direction and weight to raster file
Args:
dinfflowang: Dinf flow direction raster file
compdinffile: Compressed D8 flow code
weightfile: The correspond weight | ### Input:
Output compressed Dinf flow direction and weight to raster file
Args:
dinfflowang: Dinf flow direction raster file
compdinffile: Compressed D8 flow code
weightfile: The correspond weight
### Response:
def output_compressed_dinf(dinfflowang, compdinffile, weightfile):
dinf_r = RasterUtilClass.read_raster(dinfflowang)
data = dinf_r.data
xsize = dinf_r.nCols
ysize = dinf_r.nRows
nodata_value = dinf_r.noDataValue
cal_dir_code = frompyfunc(DinfUtil.compress_dinf, 2, 3)
updated_angle, dir_code, weight = cal_dir_code(data, nodata_value)
RasterUtilClass.write_gtiff_file(dinfflowang, ysize, xsize, updated_angle,
dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32)
RasterUtilClass.write_gtiff_file(compdinffile, ysize, xsize, dir_code,
dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Int16)
RasterUtilClass.write_gtiff_file(weightfile, ysize, xsize, weight,
dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32) |
def remove_nonspeech_fragments(self, zero_length_only=False):
self.log(u"Removing nonspeech fragments...")
nonspeech = list(self.nonspeech_fragments)
if zero_length_only:
nonspeech = [(i, f) for i, f in nonspeech if f.has_zero_length]
nonspeech_indices = [i for i, f in nonspeech]
self.remove(nonspeech_indices)
if zero_length_only:
for i, f in list(self.nonspeech_fragments):
f.fragment_type = SyncMapFragment.REGULAR
self.log(u"Removing nonspeech fragments... done") | Remove ``NONSPEECH`` fragments from the list.
If ``zero_length_only`` is ``True``, remove only
those fragments with zero length,
and make all the others ``REGULAR``.
:param bool zero_length_only: remove only zero length NONSPEECH fragments | ### Input:
Remove ``NONSPEECH`` fragments from the list.
If ``zero_length_only`` is ``True``, remove only
those fragments with zero length,
and make all the others ``REGULAR``.
:param bool zero_length_only: remove only zero length NONSPEECH fragments
### Response:
def remove_nonspeech_fragments(self, zero_length_only=False):
self.log(u"Removing nonspeech fragments...")
nonspeech = list(self.nonspeech_fragments)
if zero_length_only:
nonspeech = [(i, f) for i, f in nonspeech if f.has_zero_length]
nonspeech_indices = [i for i, f in nonspeech]
self.remove(nonspeech_indices)
if zero_length_only:
for i, f in list(self.nonspeech_fragments):
f.fragment_type = SyncMapFragment.REGULAR
self.log(u"Removing nonspeech fragments... done") |
def rm_(name, force=False, volumes=False, **kwargs):
kwargs = __utils__[](**kwargs)
stop_ = kwargs.pop(, False)
timeout = kwargs.pop(, None)
auto_remove = False
if kwargs:
__utils__[](kwargs)
if state(name) == and not (force or stop_):
raise CommandExecutionError(
{0}\
.format(name)
)
if stop_ and not force:
inspect_results = inspect_container(name)
try:
auto_remove = inspect_results[][]
except KeyError:
log.error(
, inspect_results
)
stop(name, timeout=timeout)
pre = ps_(all=True)
if not auto_remove:
_client_wrapper(, name, v=volumes, force=force)
_clear_context()
return [x for x in pre if x not in ps_(all=True)] | Removes a container
name
Container name or ID
force : False
If ``True``, the container will be killed first before removal, as the
Docker API will not permit a running container to be removed. This
option is set to ``False`` by default to prevent accidental removal of
a running container.
stop : False
If ``True``, the container will be stopped first before removal, as the
Docker API will not permit a running container to be removed. This
option is set to ``False`` by default to prevent accidental removal of
a running container.
.. versionadded:: 2017.7.0
timeout
Optional timeout to be passed to :py:func:`docker.stop
<salt.modules.dockermod.stop>` if stopping the container.
.. versionadded:: 2018.3.0
volumes : False
Also remove volumes associated with container
**RETURN DATA**
A list of the IDs of containers which were removed
CLI Example:
.. code-block:: bash
salt myminion docker.rm mycontainer
salt myminion docker.rm mycontainer force=True | ### Input:
Removes a container
name
Container name or ID
force : False
If ``True``, the container will be killed first before removal, as the
Docker API will not permit a running container to be removed. This
option is set to ``False`` by default to prevent accidental removal of
a running container.
stop : False
If ``True``, the container will be stopped first before removal, as the
Docker API will not permit a running container to be removed. This
option is set to ``False`` by default to prevent accidental removal of
a running container.
.. versionadded:: 2017.7.0
timeout
Optional timeout to be passed to :py:func:`docker.stop
<salt.modules.dockermod.stop>` if stopping the container.
.. versionadded:: 2018.3.0
volumes : False
Also remove volumes associated with container
**RETURN DATA**
A list of the IDs of containers which were removed
CLI Example:
.. code-block:: bash
salt myminion docker.rm mycontainer
salt myminion docker.rm mycontainer force=True
### Response:
def rm_(name, force=False, volumes=False, **kwargs):
kwargs = __utils__[](**kwargs)
stop_ = kwargs.pop(, False)
timeout = kwargs.pop(, None)
auto_remove = False
if kwargs:
__utils__[](kwargs)
if state(name) == and not (force or stop_):
raise CommandExecutionError(
{0}\
.format(name)
)
if stop_ and not force:
inspect_results = inspect_container(name)
try:
auto_remove = inspect_results[][]
except KeyError:
log.error(
, inspect_results
)
stop(name, timeout=timeout)
pre = ps_(all=True)
if not auto_remove:
_client_wrapper(, name, v=volumes, force=force)
_clear_context()
return [x for x in pre if x not in ps_(all=True)] |
def cli(family_file, family_type, to_json, to_madeline, to_ped, to_dict,
outfile, logfile, loglevel):
from pprint import pprint as pp
my_parser = FamilyParser(family_file, family_type)
if to_json:
if outfile:
outfile.write(my_parser.to_json())
else:
print(my_parser.to_json())
elif to_madeline:
for line in my_parser.to_madeline():
if outfile:
outfile.write(line + )
else:
print(line)
elif to_ped:
for line in my_parser.to_ped():
if outfile:
outfile.write(line + )
else:
print(line)
elif to_dict:
pp(my_parser.to_dict()) | Cli for testing the ped parser. | ### Input:
Cli for testing the ped parser.
### Response:
def cli(family_file, family_type, to_json, to_madeline, to_ped, to_dict,
outfile, logfile, loglevel):
from pprint import pprint as pp
my_parser = FamilyParser(family_file, family_type)
if to_json:
if outfile:
outfile.write(my_parser.to_json())
else:
print(my_parser.to_json())
elif to_madeline:
for line in my_parser.to_madeline():
if outfile:
outfile.write(line + )
else:
print(line)
elif to_ped:
for line in my_parser.to_ped():
if outfile:
outfile.write(line + )
else:
print(line)
elif to_dict:
pp(my_parser.to_dict()) |
def paginate_update(update):
from happenings.models import Update
time = update.pub_time
event = update.event
try:
next = Update.objects.filter(
event=event,
pub_time__gt=time
).order_by().only()[0]
except:
next = None
try:
previous = Update.objects.filter(
event=event,
pub_time__lt=time
).order_by().only()[0]
except:
previous = None
return {: next, : previous, : event} | attempts to get next and previous on updates | ### Input:
attempts to get next and previous on updates
### Response:
def paginate_update(update):
from happenings.models import Update
time = update.pub_time
event = update.event
try:
next = Update.objects.filter(
event=event,
pub_time__gt=time
).order_by().only()[0]
except:
next = None
try:
previous = Update.objects.filter(
event=event,
pub_time__lt=time
).order_by().only()[0]
except:
previous = None
return {: next, : previous, : event} |
def get_setting(context, key, default_val="", as_key=None):
if ("%s" % default_val).startswith():
default_val = getattr(settings, default_val[2:])
val = getattr(settings, key, default_val)
if not as_key:
return val
context[as_key] = val
return | get val form settings and set to context
{% load lbutils %}
{% get_setting "key" default_val "as_key" %}
{{ as_key }}
if as_key is None, this tag will return val | ### Input:
get val form settings and set to context
{% load lbutils %}
{% get_setting "key" default_val "as_key" %}
{{ as_key }}
if as_key is None, this tag will return val
### Response:
def get_setting(context, key, default_val="", as_key=None):
if ("%s" % default_val).startswith():
default_val = getattr(settings, default_val[2:])
val = getattr(settings, key, default_val)
if not as_key:
return val
context[as_key] = val
return |
def get_slide_number(self):
a, slide_number, b = self.get_id_parts()
if slide_number > 5000:
slide_number = 5000 - slide_number
return slide_number | Return the slide-number for this trigger | ### Input:
Return the slide-number for this trigger
### Response:
def get_slide_number(self):
a, slide_number, b = self.get_id_parts()
if slide_number > 5000:
slide_number = 5000 - slide_number
return slide_number |
def convert_to_namespace(file, output, keyword):
resource = parse_bel_resource(file)
write_namespace(
namespace_keyword=(keyword or resource[][]),
namespace_name=resource[][],
namespace_description=resource[][],
author_name=,
namespace_domain=NAMESPACE_DOMAIN_OTHER,
values=resource[],
citation_name=resource[][],
file=output
) | Convert an annotation file to a namespace file. | ### Input:
Convert an annotation file to a namespace file.
### Response:
def convert_to_namespace(file, output, keyword):
resource = parse_bel_resource(file)
write_namespace(
namespace_keyword=(keyword or resource[][]),
namespace_name=resource[][],
namespace_description=resource[][],
author_name=,
namespace_domain=NAMESPACE_DOMAIN_OTHER,
values=resource[],
citation_name=resource[][],
file=output
) |
def infer_missing_backwards_edge(graph, u, v, k):
if u in graph[v]:
for attr_dict in graph[v][u].values():
if attr_dict == graph[u][v][k]:
return
graph.add_edge(v, u, key=k, **graph[u][v][k]) | Add the same edge, but in the opposite direction if not already present.
:type graph: pybel.BELGraph
:type u: tuple
:type v: tuple
:type k: int | ### Input:
Add the same edge, but in the opposite direction if not already present.
:type graph: pybel.BELGraph
:type u: tuple
:type v: tuple
:type k: int
### Response:
def infer_missing_backwards_edge(graph, u, v, k):
if u in graph[v]:
for attr_dict in graph[v][u].values():
if attr_dict == graph[u][v][k]:
return
graph.add_edge(v, u, key=k, **graph[u][v][k]) |
def set_id(self, identifier):
self._id = identifier
refobj = self.get_refobj()
if refobj:
self.get_refobjinter().set_id(refobj, identifier) | Set the id of the given reftrack
This will set the id on the refobject
:param identifier: the identifier number
:type identifier: int
:returns: None
:rtype: None
:raises: None | ### Input:
Set the id of the given reftrack
This will set the id on the refobject
:param identifier: the identifier number
:type identifier: int
:returns: None
:rtype: None
:raises: None
### Response:
def set_id(self, identifier):
self._id = identifier
refobj = self.get_refobj()
if refobj:
self.get_refobjinter().set_id(refobj, identifier) |
def set_absent(name, family=, **kwargs):
ret = {: name,
: {},
: None,
: }
set_check = __salt__[](name, family)
if not set_check:
ret[] = True
ret[] = (
.format(name, family))
return ret
if __opts__[]:
ret[] = .format(
name,
family)
return ret
flush_set = __salt__[](name, family)
if flush_set:
command = __salt__[](name, family)
if command is True:
ret[] = {: name}
ret[] = True
ret[] = (
.format(name, family))
else:
ret[] = False
ret[] = (
.format(name, command.strip(), family))
else:
ret[] = False
ret[] = .format(
name,
flush_set.strip(),
family
)
return ret | .. versionadded:: 2014.7.0
Verify the set is absent.
family
Networking family, either ipv4 or ipv6 | ### Input:
.. versionadded:: 2014.7.0
Verify the set is absent.
family
Networking family, either ipv4 or ipv6
### Response:
def set_absent(name, family=, **kwargs):
ret = {: name,
: {},
: None,
: }
set_check = __salt__[](name, family)
if not set_check:
ret[] = True
ret[] = (
.format(name, family))
return ret
if __opts__[]:
ret[] = .format(
name,
family)
return ret
flush_set = __salt__[](name, family)
if flush_set:
command = __salt__[](name, family)
if command is True:
ret[] = {: name}
ret[] = True
ret[] = (
.format(name, family))
else:
ret[] = False
ret[] = (
.format(name, command.strip(), family))
else:
ret[] = False
ret[] = .format(
name,
flush_set.strip(),
family
)
return ret |
def tonativefunc(enc=):
native
if sys.version_info >= (3,0,0):
return lambda x: x.decode(enc) if isinstance(x, bytes) else str(x)
return lambda x: x.encode(enc) if isinstance(x, unicode) else str(x) | Returns a function that turns everything into 'native' strings using enc | ### Input:
Returns a function that turns everything into 'native' strings using enc
### Response:
def tonativefunc(enc=):
native
if sys.version_info >= (3,0,0):
return lambda x: x.decode(enc) if isinstance(x, bytes) else str(x)
return lambda x: x.encode(enc) if isinstance(x, unicode) else str(x) |
def _note_remote_option(self, option, state):
if not self.telnet_opt_dict.has_key(option):
self.telnet_opt_dict[option] = TelnetOption()
self.telnet_opt_dict[option].remote_option = state | Record the status of local negotiated Telnet options. | ### Input:
Record the status of local negotiated Telnet options.
### Response:
def _note_remote_option(self, option, state):
if not self.telnet_opt_dict.has_key(option):
self.telnet_opt_dict[option] = TelnetOption()
self.telnet_opt_dict[option].remote_option = state |
def get_person_from_legacy_format(profile_record):
if not is_profile_in_legacy_format(profile_record):
raise ValueError("Not a legacy profile")
profile = profile_record
try:
profile = json.loads(json.dumps(profile))
except ValueError:
pass
images = []
accounts = []
profile_data = {
"@type": "Person"
}
if profile.has_key("name") and type(profile["name"]) == dict \
and profile["name"].has_key("formatted"):
profile_data["name"] = profile["name"]["formatted"]
if profile.has_key("bio"):
profile_data["description"] = profile["bio"]
if profile.has_key("location") and type(profile["location"]) == dict \
and profile["location"].has_key("formatted"):
profile_data["address"] = {
"@type": "PostalAddress",
"addressLocality": profile["location"]["formatted"]
}
if profile.has_key("avatar") and type(profile["avatar"]) == dict and \
profile["avatar"].has_key("url"):
images.append({
"@type": "ImageObject",
"name": "avatar",
"contentUrl": profile["avatar"]["url"]
})
if profile.has_key("cover") and type(profile["cover"]) == dict and \
profile["cover"].has_key("url"):
images.append({
"@type": "ImageObject",
"name": "cover",
"contentUrl": profile["cover"]["url"]
})
if len(images) > 0:
profile_data["image"] = images
if profile.has_key("website") and type(profile["website"]) in [str, unicode]:
profile_data["website"] = [{
"@type": "WebSite",
"url": profile["website"]
}]
for service_name in ["twitter", "facebook", "github"]:
if profile.has_key(service_name):
accounts.append(
format_account(service_name, profile[service_name])
)
if profile.has_key("bitcoin") and type(profile["bitcoin"]) == dict and \
profile["bitcoin"].has_key("address"):
accounts.append({
"@type": "Account",
"role": "payment",
"service": "bitcoin",
"identifier": profile["bitcoin"]["address"]
})
if profile.has_key("auth"):
if len(profile["auth"]) > 0 and type(profile["auth"]) == dict:
if profile["auth"][0].has_key("publicKeychain"):
accounts.append({
"@type": "Account",
"role": "key",
"service": "bip32",
"identifier": profile["auth"][0]["publicKeychain"]
})
if profile.has_key("pgp") and type(profile["pgp"]) == dict \
and profile["pgp"].has_key("url") \
and profile["pgp"].has_key("fingerprint"):
accounts.append({
"@type": "Account",
"role": "key",
"service": "pgp",
"identifier": profile["pgp"]["fingerprint"],
"contentUrl": profile["pgp"]["url"]
})
profile_data["account"] = accounts
return profile_data | Given a whole profile, convert it into
zone-file format. In the full profile JSON,
this method operates on the 'data_record' object.
@profile is a dict that contains the legacy profile data
Return a dict with the zone-file formatting. | ### Input:
Given a whole profile, convert it into
zone-file format. In the full profile JSON,
this method operates on the 'data_record' object.
@profile is a dict that contains the legacy profile data
Return a dict with the zone-file formatting.
### Response:
def get_person_from_legacy_format(profile_record):
if not is_profile_in_legacy_format(profile_record):
raise ValueError("Not a legacy profile")
profile = profile_record
try:
profile = json.loads(json.dumps(profile))
except ValueError:
pass
images = []
accounts = []
profile_data = {
"@type": "Person"
}
if profile.has_key("name") and type(profile["name"]) == dict \
and profile["name"].has_key("formatted"):
profile_data["name"] = profile["name"]["formatted"]
if profile.has_key("bio"):
profile_data["description"] = profile["bio"]
if profile.has_key("location") and type(profile["location"]) == dict \
and profile["location"].has_key("formatted"):
profile_data["address"] = {
"@type": "PostalAddress",
"addressLocality": profile["location"]["formatted"]
}
if profile.has_key("avatar") and type(profile["avatar"]) == dict and \
profile["avatar"].has_key("url"):
images.append({
"@type": "ImageObject",
"name": "avatar",
"contentUrl": profile["avatar"]["url"]
})
if profile.has_key("cover") and type(profile["cover"]) == dict and \
profile["cover"].has_key("url"):
images.append({
"@type": "ImageObject",
"name": "cover",
"contentUrl": profile["cover"]["url"]
})
if len(images) > 0:
profile_data["image"] = images
if profile.has_key("website") and type(profile["website"]) in [str, unicode]:
profile_data["website"] = [{
"@type": "WebSite",
"url": profile["website"]
}]
for service_name in ["twitter", "facebook", "github"]:
if profile.has_key(service_name):
accounts.append(
format_account(service_name, profile[service_name])
)
if profile.has_key("bitcoin") and type(profile["bitcoin"]) == dict and \
profile["bitcoin"].has_key("address"):
accounts.append({
"@type": "Account",
"role": "payment",
"service": "bitcoin",
"identifier": profile["bitcoin"]["address"]
})
if profile.has_key("auth"):
if len(profile["auth"]) > 0 and type(profile["auth"]) == dict:
if profile["auth"][0].has_key("publicKeychain"):
accounts.append({
"@type": "Account",
"role": "key",
"service": "bip32",
"identifier": profile["auth"][0]["publicKeychain"]
})
if profile.has_key("pgp") and type(profile["pgp"]) == dict \
and profile["pgp"].has_key("url") \
and profile["pgp"].has_key("fingerprint"):
accounts.append({
"@type": "Account",
"role": "key",
"service": "pgp",
"identifier": profile["pgp"]["fingerprint"],
"contentUrl": profile["pgp"]["url"]
})
profile_data["account"] = accounts
return profile_data |
def get_or_create_iobject(identifier_uid,
identifier_namespace_uri,
iobject_type_name,
iobject_type_namespace_uri,
iobject_type_revision_name,
iobject_family_name,
iobject_family_revision_name="",
identifier_namespace_name="",
timestamp=None,
create_timestamp=None,
overwrite=False,
dingos_class_map=dingos_class_map):
if not timestamp:
raise StandardError("You must supply a timestamp.")
id_namespace, created = dingos_class_map[].objects.get_or_create(uri=identifier_namespace_uri)
if created and identifier_namespace_name:
id_namespace.name = identifier_namespace_name
id_namespace.save()
identifier, created = dingos_class_map[].objects.get_or_create(uid=identifier_uid,
namespace=id_namespace,
defaults={: None})
iobject_type_namespace, created = dingos_class_map[].objects.get_or_create(uri=iobject_type_namespace_uri)
iobject_family, created = dingos_class_map[].objects.get_or_create(name=iobject_family_name)
iobject_family_revision, created = dingos_class_map[].objects.get_or_create(
name=iobject_family_revision_name)
iobject_type, created = dingos_class_map[].objects.get_or_create(name=iobject_type_name,
iobject_family=iobject_family,
namespace=iobject_type_namespace)
iobject_type_revision, created = dingos_class_map[].objects.get_or_create(name=iobject_type_revision_name)
if not create_timestamp:
create_timestamp = timezone.now()
iobject, created = dingos_class_map["InfoObject"].objects.get_or_create(identifier=identifier,
timestamp=timestamp,
defaults={: iobject_family,
: iobject_family_revision,
: iobject_type,
: iobject_type_revision,
: create_timestamp})
if created:
iobject.set_name()
iobject.save()
identifier.latest = iobject
identifier.save()
elif overwrite:
iobject.timestamp = timestamp
iobject.create_timestamp = create_timestamp
iobject.iobject_family = iobject_family
iobject.iobject_family_revision = iobject_family_revision
iobject.iobject_type = iobject_type
iobject.iobject_type_revision = iobject_type_revision
iobject.set_name()
iobject.save()
logger.debug(
"Created iobject id with %s , ts %s (created was %s) and overwrite as %s" % (iobject.identifier, timestamp, created, overwrite))
return iobject, created | Get or create an information object. | ### Input:
Get or create an information object.
### Response:
def get_or_create_iobject(identifier_uid,
identifier_namespace_uri,
iobject_type_name,
iobject_type_namespace_uri,
iobject_type_revision_name,
iobject_family_name,
iobject_family_revision_name="",
identifier_namespace_name="",
timestamp=None,
create_timestamp=None,
overwrite=False,
dingos_class_map=dingos_class_map):
if not timestamp:
raise StandardError("You must supply a timestamp.")
id_namespace, created = dingos_class_map[].objects.get_or_create(uri=identifier_namespace_uri)
if created and identifier_namespace_name:
id_namespace.name = identifier_namespace_name
id_namespace.save()
identifier, created = dingos_class_map[].objects.get_or_create(uid=identifier_uid,
namespace=id_namespace,
defaults={: None})
iobject_type_namespace, created = dingos_class_map[].objects.get_or_create(uri=iobject_type_namespace_uri)
iobject_family, created = dingos_class_map[].objects.get_or_create(name=iobject_family_name)
iobject_family_revision, created = dingos_class_map[].objects.get_or_create(
name=iobject_family_revision_name)
iobject_type, created = dingos_class_map[].objects.get_or_create(name=iobject_type_name,
iobject_family=iobject_family,
namespace=iobject_type_namespace)
iobject_type_revision, created = dingos_class_map[].objects.get_or_create(name=iobject_type_revision_name)
if not create_timestamp:
create_timestamp = timezone.now()
iobject, created = dingos_class_map["InfoObject"].objects.get_or_create(identifier=identifier,
timestamp=timestamp,
defaults={: iobject_family,
: iobject_family_revision,
: iobject_type,
: iobject_type_revision,
: create_timestamp})
if created:
iobject.set_name()
iobject.save()
identifier.latest = iobject
identifier.save()
elif overwrite:
iobject.timestamp = timestamp
iobject.create_timestamp = create_timestamp
iobject.iobject_family = iobject_family
iobject.iobject_family_revision = iobject_family_revision
iobject.iobject_type = iobject_type
iobject.iobject_type_revision = iobject_type_revision
iobject.set_name()
iobject.save()
logger.debug(
"Created iobject id with %s , ts %s (created was %s) and overwrite as %s" % (iobject.identifier, timestamp, created, overwrite))
return iobject, created |
def handle_api_exception(f):
def wraps(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except SupersetSecurityException as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e),
status=e.status,
stacktrace=traceback.format_exc(),
link=e.link)
except SupersetException as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc(),
status=e.status)
except Exception as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
return functools.update_wrapper(wraps, f) | A decorator to catch superset exceptions. Use it after the @api decorator above
so superset exception handler is triggered before the handler for generic exceptions. | ### Input:
A decorator to catch superset exceptions. Use it after the @api decorator above
so superset exception handler is triggered before the handler for generic exceptions.
### Response:
def handle_api_exception(f):
def wraps(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except SupersetSecurityException as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e),
status=e.status,
stacktrace=traceback.format_exc(),
link=e.link)
except SupersetException as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc(),
status=e.status)
except Exception as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
return functools.update_wrapper(wraps, f) |
def _transform(self, df):
output_col = self.getOutputCol()
label_col = self.getLabelCol()
new_schema = copy.deepcopy(df.schema)
new_schema.add(StructField(output_col, StringType(), True))
rdd = df.rdd.coalesce(1)
features = np.asarray(
rdd.map(lambda x: from_vector(x.features)).collect())
model = model_from_yaml(self.get_keras_model_config())
model.set_weights(self.weights.value)
predictions = rdd.ctx.parallelize(
model.predict_classes(features)).coalesce(1)
predictions = predictions.map(lambda x: tuple(str(x)))
results_rdd = rdd.zip(predictions).map(lambda x: x[0] + x[1])
results_df = df.sql_ctx.createDataFrame(results_rdd, new_schema)
results_df = results_df.withColumn(
output_col, results_df[output_col].cast(DoubleType()))
results_df = results_df.withColumn(
label_col, results_df[label_col].cast(DoubleType()))
return results_df | Private transform method of a Transformer. This serves as batch-prediction method for our purposes. | ### Input:
Private transform method of a Transformer. This serves as batch-prediction method for our purposes.
### Response:
def _transform(self, df):
output_col = self.getOutputCol()
label_col = self.getLabelCol()
new_schema = copy.deepcopy(df.schema)
new_schema.add(StructField(output_col, StringType(), True))
rdd = df.rdd.coalesce(1)
features = np.asarray(
rdd.map(lambda x: from_vector(x.features)).collect())
model = model_from_yaml(self.get_keras_model_config())
model.set_weights(self.weights.value)
predictions = rdd.ctx.parallelize(
model.predict_classes(features)).coalesce(1)
predictions = predictions.map(lambda x: tuple(str(x)))
results_rdd = rdd.zip(predictions).map(lambda x: x[0] + x[1])
results_df = df.sql_ctx.createDataFrame(results_rdd, new_schema)
results_df = results_df.withColumn(
output_col, results_df[output_col].cast(DoubleType()))
results_df = results_df.withColumn(
label_col, results_df[label_col].cast(DoubleType()))
return results_df |
def redirect_stds(self):
if not self.debug:
sys.stdout = self.stdout_write
sys.stderr = self.stderr_write
sys.stdin = self.stdin_read | Redirects stds | ### Input:
Redirects stds
### Response:
def redirect_stds(self):
if not self.debug:
sys.stdout = self.stdout_write
sys.stderr = self.stderr_write
sys.stdin = self.stdin_read |
def import_plugin(name, superclasses=None):
plugin_fqname = "sos.plugins.%s" % name
if not superclasses:
superclasses = (Plugin,)
return import_module(plugin_fqname, superclasses) | Import name as a module and return a list of all classes defined in that
module. superclasses should be a tuple of valid superclasses to import,
this defaults to (Plugin,). | ### Input:
Import name as a module and return a list of all classes defined in that
module. superclasses should be a tuple of valid superclasses to import,
this defaults to (Plugin,).
### Response:
def import_plugin(name, superclasses=None):
plugin_fqname = "sos.plugins.%s" % name
if not superclasses:
superclasses = (Plugin,)
return import_module(plugin_fqname, superclasses) |
def successors(self, state, addr=None, jumpkind=None, default_engine=False, procedure_engine=False,
engines=None, **kwargs):
if addr is not None or jumpkind is not None:
state = state.copy()
if addr is not None:
state.ip = addr
if jumpkind is not None:
state.history.jumpkind = jumpkind
if default_engine and self.has_default_engine():
engines = [self.default_engine]
elif procedure_engine and self.has_procedure_engine():
engines = [self.procedure_engine]
elif engines is None:
engines = (self.get_plugin(name) for name in self.order)
else:
engines = (self.get_plugin(e) if isinstance(e, str) else e for e in engines)
for engine in engines:
if engine.check(state, **kwargs):
r = engine.process(state, **kwargs)
if r.processed:
return r
raise AngrExitError("All engines failed to execute!") | Perform execution using any applicable engine. Enumerate the current engines and use the
first one that works. Engines are enumerated in order, specified by the ``order`` attribute.
:param state: The state to analyze
:param addr: optional, an address to execute at instead of the state's ip
:param jumpkind: optional, the jumpkind of the previous exit
:param default_engine: Whether we should only attempt to use the default engine (usually VEX)
:param procedure_engine: Whether we should only attempt to use the procedure engine
:param engines: A list of engines to try to use, instead of the default.
This list is expected to contain engine names or engine instances.
Additional keyword arguments will be passed directly into each engine's process method.
:return SimSuccessors: A SimSuccessors object classifying the results of the run. | ### Input:
Perform execution using any applicable engine. Enumerate the current engines and use the
first one that works. Engines are enumerated in order, specified by the ``order`` attribute.
:param state: The state to analyze
:param addr: optional, an address to execute at instead of the state's ip
:param jumpkind: optional, the jumpkind of the previous exit
:param default_engine: Whether we should only attempt to use the default engine (usually VEX)
:param procedure_engine: Whether we should only attempt to use the procedure engine
:param engines: A list of engines to try to use, instead of the default.
This list is expected to contain engine names or engine instances.
Additional keyword arguments will be passed directly into each engine's process method.
:return SimSuccessors: A SimSuccessors object classifying the results of the run.
### Response:
def successors(self, state, addr=None, jumpkind=None, default_engine=False, procedure_engine=False,
engines=None, **kwargs):
if addr is not None or jumpkind is not None:
state = state.copy()
if addr is not None:
state.ip = addr
if jumpkind is not None:
state.history.jumpkind = jumpkind
if default_engine and self.has_default_engine():
engines = [self.default_engine]
elif procedure_engine and self.has_procedure_engine():
engines = [self.procedure_engine]
elif engines is None:
engines = (self.get_plugin(name) for name in self.order)
else:
engines = (self.get_plugin(e) if isinstance(e, str) else e for e in engines)
for engine in engines:
if engine.check(state, **kwargs):
r = engine.process(state, **kwargs)
if r.processed:
return r
raise AngrExitError("All engines failed to execute!") |
def get_block_by_height(self, height: int, is_full: bool = False) -> dict:
payload = self.generate_json_rpc_payload(RpcMethod.GET_BLOCK, [height, 1])
response = self.__post(self.__url, payload)
if is_full:
return response
return response[] | This interface is used to get the block information by block height in current network.
Return:
the decimal total number of blocks in current network. | ### Input:
This interface is used to get the block information by block height in current network.
Return:
the decimal total number of blocks in current network.
### Response:
def get_block_by_height(self, height: int, is_full: bool = False) -> dict:
payload = self.generate_json_rpc_payload(RpcMethod.GET_BLOCK, [height, 1])
response = self.__post(self.__url, payload)
if is_full:
return response
return response[] |
def to_er7(self, encoding_chars=None, trailing_children=False):
if encoding_chars is None:
encoding_chars = self.encoding_chars
separator = encoding_chars.get()
repetition = encoding_chars.get()
s = [self.name]
for child in self._get_children(trailing_children):
if child is not None:
s.append(repetition.join(item.to_er7(encoding_chars, trailing_children) for item in child))
else:
try:
s.append(self._handle_empty_children(encoding_chars))
except NotImplementedError:
pass
if self.name == and len(s) > 1:
s.pop(1)
return separator.join(s) | Return the ER7-encoded string
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`get_default_encoding_chars <hl7apy.get_default_encoding_chars>`)
:type trailing_children: ``bool``
:param trailing_children: if ``True``, trailing children will be added even if their value is None
:return: the ER7-encoded string
>>> pid = Segment("PID")
>>> pid.pid_1 = '1'
>>> pid.pid_5 = "EVERYMAN^ADAM"
>>> print(pid.to_er7())
PID|1||||EVERYMAN^ADAM | ### Input:
Return the ER7-encoded string
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`get_default_encoding_chars <hl7apy.get_default_encoding_chars>`)
:type trailing_children: ``bool``
:param trailing_children: if ``True``, trailing children will be added even if their value is None
:return: the ER7-encoded string
>>> pid = Segment("PID")
>>> pid.pid_1 = '1'
>>> pid.pid_5 = "EVERYMAN^ADAM"
>>> print(pid.to_er7())
PID|1||||EVERYMAN^ADAM
### Response:
def to_er7(self, encoding_chars=None, trailing_children=False):
if encoding_chars is None:
encoding_chars = self.encoding_chars
separator = encoding_chars.get()
repetition = encoding_chars.get()
s = [self.name]
for child in self._get_children(trailing_children):
if child is not None:
s.append(repetition.join(item.to_er7(encoding_chars, trailing_children) for item in child))
else:
try:
s.append(self._handle_empty_children(encoding_chars))
except NotImplementedError:
pass
if self.name == and len(s) > 1:
s.pop(1)
return separator.join(s) |
def magic_contract(*args, **kwargs):
def inner_decorator(f):
for name, val in f.__globals__.items():
if not name.startswith() and isinstance(val, type):
safe_new_contract(name, val)
return contract(*args, **kwargs)(f)
return inner_decorator | Drop-in replacement for ``pycontracts.contract`` decorator, except that it supports locally-visible types
:param args: Arguments to pass to the ``contract`` decorator
:param kwargs: Keyword arguments to pass to the ``contract`` decorator
:return: The contracted function | ### Input:
Drop-in replacement for ``pycontracts.contract`` decorator, except that it supports locally-visible types
:param args: Arguments to pass to the ``contract`` decorator
:param kwargs: Keyword arguments to pass to the ``contract`` decorator
:return: The contracted function
### Response:
def magic_contract(*args, **kwargs):
def inner_decorator(f):
for name, val in f.__globals__.items():
if not name.startswith() and isinstance(val, type):
safe_new_contract(name, val)
return contract(*args, **kwargs)(f)
return inner_decorator |
def conflicts_with(self, other):
if isinstance(other, Requirement):
if (self.name_ != other.name_) or (self.range is None) \
or (other.range is None):
return False
elif self.conflict:
return False if other.conflict \
else self.range_.issuperset(other.range_)
elif other.conflict:
return other.range_.issuperset(self.range_)
else:
return not self.range_.intersects(other.range_)
else:
if (self.name_ != other.name_) or (self.range is None):
return False
if self.conflict:
return (other.version_ in self.range_)
else:
return (other.version_ not in self.range_) | Returns True if this requirement conflicts with another `Requirement`
or `VersionedObject`. | ### Input:
Returns True if this requirement conflicts with another `Requirement`
or `VersionedObject`.
### Response:
def conflicts_with(self, other):
if isinstance(other, Requirement):
if (self.name_ != other.name_) or (self.range is None) \
or (other.range is None):
return False
elif self.conflict:
return False if other.conflict \
else self.range_.issuperset(other.range_)
elif other.conflict:
return other.range_.issuperset(self.range_)
else:
return not self.range_.intersects(other.range_)
else:
if (self.name_ != other.name_) or (self.range is None):
return False
if self.conflict:
return (other.version_ in self.range_)
else:
return (other.version_ not in self.range_) |
def remove_script(zap_helper, script_name):
with zap_error_handler():
console.debug(.format(script_name))
result = zap_helper.zap.script.remove(script_name)
if result != :
raise ZAPError(.format(result))
console.info(.format(script_name)) | Remove a script. | ### Input:
Remove a script.
### Response:
def remove_script(zap_helper, script_name):
with zap_error_handler():
console.debug(.format(script_name))
result = zap_helper.zap.script.remove(script_name)
if result != :
raise ZAPError(.format(result))
console.info(.format(script_name)) |
def get_integrated_channels(self, options):
channel_classes = self.get_channel_classes(options.get())
filter_kwargs = {
: True,
: True,
}
enterprise_customer = self.get_enterprise_customer(options.get())
if enterprise_customer:
filter_kwargs[] = enterprise_customer
for channel_class in channel_classes:
for integrated_channel in channel_class.objects.filter(**filter_kwargs):
yield integrated_channel | Generates a list of active integrated channels for active customers, filtered from the given options.
Raises errors when invalid options are encountered.
See ``add_arguments`` for the accepted options. | ### Input:
Generates a list of active integrated channels for active customers, filtered from the given options.
Raises errors when invalid options are encountered.
See ``add_arguments`` for the accepted options.
### Response:
def get_integrated_channels(self, options):
channel_classes = self.get_channel_classes(options.get())
filter_kwargs = {
: True,
: True,
}
enterprise_customer = self.get_enterprise_customer(options.get())
if enterprise_customer:
filter_kwargs[] = enterprise_customer
for channel_class in channel_classes:
for integrated_channel in channel_class.objects.filter(**filter_kwargs):
yield integrated_channel |
def _unascii(s):
chunks = []
pos = 0
while m:
start = m.start()
end = m.end()
g = m.group(1)
if g is None:
chunks.append(s[pos:end])
else:
| Unpack `\\uNNNN` escapes in 's' and encode the result as UTF-8
This method takes the output of the JSONEncoder and expands any \\uNNNN
escapes it finds (except for \\u0000 to \\u001F, which are converted to
\\xNN escapes).
For performance, it assumes that the input is valid JSON, and performs few
sanity checks. | ### Input:
Unpack `\\uNNNN` escapes in 's' and encode the result as UTF-8
This method takes the output of the JSONEncoder and expands any \\uNNNN
escapes it finds (except for \\u0000 to \\u001F, which are converted to
\\xNN escapes).
For performance, it assumes that the input is valid JSON, and performs few
sanity checks.
### Response:
def _unascii(s):
chunks = []
pos = 0
while m:
start = m.start()
end = m.end()
g = m.group(1)
if g is None:
chunks.append(s[pos:end])
else:
|
def ot_tnrs_match_names(name_list,
context_name=None,
do_approximate_matching=True,
include_dubious=False,
include_deprecated=True,
tnrs_wrapper=None):
if tnrs_wrapper is None:
from peyotl.sugar import tnrs
tnrs_wrapper = tnrs
match_obj = tnrs_wrapper.match_names(name_list,
context_name=context_name,
do_approximate_matching=do_approximate_matching,
include_deprecated=include_deprecated,
include_dubious=include_dubious,
wrap_response=True)
return match_obj | Uses a peyotl wrapper around an Open Tree web service to get a list of OTT IDs matching
the `name_list`.
The tnrs_wrapper can be None (in which case the default wrapper from peyotl.sugar will be used.
All other arguments correspond to the arguments of the web-service call.
A ValueError will be raised if the `context_name` does not match one of the valid names for a
taxonomic context.
This uses the wrap_response option to create and return a TNRSRespose object around the response. | ### Input:
Uses a peyotl wrapper around an Open Tree web service to get a list of OTT IDs matching
the `name_list`.
The tnrs_wrapper can be None (in which case the default wrapper from peyotl.sugar will be used.
All other arguments correspond to the arguments of the web-service call.
A ValueError will be raised if the `context_name` does not match one of the valid names for a
taxonomic context.
This uses the wrap_response option to create and return a TNRSRespose object around the response.
### Response:
def ot_tnrs_match_names(name_list,
context_name=None,
do_approximate_matching=True,
include_dubious=False,
include_deprecated=True,
tnrs_wrapper=None):
if tnrs_wrapper is None:
from peyotl.sugar import tnrs
tnrs_wrapper = tnrs
match_obj = tnrs_wrapper.match_names(name_list,
context_name=context_name,
do_approximate_matching=do_approximate_matching,
include_deprecated=include_deprecated,
include_dubious=include_dubious,
wrap_response=True)
return match_obj |
def create_table(self, table, fields):
table = table.get_soap_object(self.client)
return self.call(, table, fields) | Responsys.createTable call
Accepts:
InteractObject table
list fields
Returns True on success | ### Input:
Responsys.createTable call
Accepts:
InteractObject table
list fields
Returns True on success
### Response:
def create_table(self, table, fields):
table = table.get_soap_object(self.client)
return self.call(, table, fields) |
def decorate_class(self, klass, *decorator_args, **decorator_kwargs):
class ChildClass(klass):
def __init__(slf, *args, **kwargs):
super(ChildClass, slf).__init__(*args, **kwargs)
self.decorate(
slf, *decorator_args, **decorator_kwargs
)
decorate_klass = ChildClass
decorate_klass.__name__ = klass.__name__
decorate_klass.__module__ = klass.__module__
return decorate_klass | where the magic happens, this wraps a class to call our decorate method
in the init of the class | ### Input:
where the magic happens, this wraps a class to call our decorate method
in the init of the class
### Response:
def decorate_class(self, klass, *decorator_args, **decorator_kwargs):
class ChildClass(klass):
def __init__(slf, *args, **kwargs):
super(ChildClass, slf).__init__(*args, **kwargs)
self.decorate(
slf, *decorator_args, **decorator_kwargs
)
decorate_klass = ChildClass
decorate_klass.__name__ = klass.__name__
decorate_klass.__module__ = klass.__module__
return decorate_klass |
def validateDayOfWeek(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, dayNames=ENGLISH_DAYS_OF_WEEK, excMsg=None):
try:
return validateMonth(value, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes, monthNames=ENGLISH_DAYS_OF_WEEK)
except:
_raiseValidationException(_() % (_errstr(value)), excMsg) | Raises ValidationException if value is not a day of the week, such as 'Mon' or 'Friday'.
Returns the titlecased day of the week.
* value (str): The value being validated as a day of the week.
* blank (bool): If True, a blank string will be accepted. Defaults to False.
* strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped.
* allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers.
* blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation.
* dayNames (Mapping): A mapping of uppercase day abbreviations to day names, i.e. {'SUN': 'Sunday', ...} The default provides English day names.
* excMsg (str): A custom message to use in the raised ValidationException.
>>> import pysimplevalidate as pysv
>>> pysv.validateDayOfWeek('mon')
'Monday'
>>> pysv.validateDayOfWeek('THURSday')
'Thursday' | ### Input:
Raises ValidationException if value is not a day of the week, such as 'Mon' or 'Friday'.
Returns the titlecased day of the week.
* value (str): The value being validated as a day of the week.
* blank (bool): If True, a blank string will be accepted. Defaults to False.
* strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped.
* allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers.
* blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation.
* dayNames (Mapping): A mapping of uppercase day abbreviations to day names, i.e. {'SUN': 'Sunday', ...} The default provides English day names.
* excMsg (str): A custom message to use in the raised ValidationException.
>>> import pysimplevalidate as pysv
>>> pysv.validateDayOfWeek('mon')
'Monday'
>>> pysv.validateDayOfWeek('THURSday')
'Thursday'
### Response:
def validateDayOfWeek(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, dayNames=ENGLISH_DAYS_OF_WEEK, excMsg=None):
try:
return validateMonth(value, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes, monthNames=ENGLISH_DAYS_OF_WEEK)
except:
_raiseValidationException(_() % (_errstr(value)), excMsg) |
def encrypt_batch_items(table_name, aws_cmk_id):
index_keys = [
{"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "55"}},
{"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "56"}},
{"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "57"}},
{"partition_attribute": {"S": "another"}, "sort_attribute": {"N": "55"}},
]
plaintext_additional_attributes = {
"example": {"S": "data"},
"some numbers": {"N": "99"},
"and some binary": {"B": b"\x00\x01\x02"},
"leave me": {"S": "alone"},
}
plaintext_items = []
for key in index_keys:
_attributes = key.copy()
_attributes.update(plaintext_additional_attributes)
plaintext_items.append(_attributes)
encrypted_attributes = set(plaintext_additional_attributes.keys())
encrypted_attributes.remove("leave me")
unencrypted_attributes = set(index_keys[0].keys())
unencrypted_attributes.add("leave me")
client = boto3.client("dynamodb")
aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=aws_cmk_id)
actions = AttributeActions(
default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={"leave me": CryptoAction.DO_NOTHING}
)
encrypted_client = EncryptedClient(client=client, materials_provider=aws_kms_cmp, attribute_actions=actions)
encrypted_client.batch_write_item(
RequestItems={table_name: [{"PutRequest": {"Item": item}} for item in plaintext_items]}
)
encrypted_items = client.batch_get_item(RequestItems={table_name: {"Keys": index_keys}})["Responses"][table_name]
decrypted_items = encrypted_client.batch_get_item(RequestItems={table_name: {"Keys": index_keys}})["Responses"][
table_name
]
def _select_index_from_item(item):
for index in index_keys:
if all([item[key] == value for key, value in index.items()]):
return index
raise Exception("Index key not found in item.")
def _select_item_from_index(index, all_items):
for item in all_items:
if all([item[key] == value for key, value in index.items()]):
return item
raise Exception("Index key not found in item.")
for encrypted_item in encrypted_items:
key = _select_index_from_item(encrypted_item)
plaintext_item = _select_item_from_index(key, plaintext_items)
decrypted_item = _select_item_from_index(key, decrypted_items)
for name in encrypted_attributes:
assert encrypted_item[name] != plaintext_item[name]
assert decrypted_item[name] == plaintext_item[name]
for name in unencrypted_attributes:
assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name]
encrypted_client.batch_write_item(
RequestItems={table_name: [{"DeleteRequest": {"Key": key}} for key in index_keys]}
) | Demonstrate use of EncryptedClient to transparently encrypt multiple items in a batch request. | ### Input:
Demonstrate use of EncryptedClient to transparently encrypt multiple items in a batch request.
### Response:
def encrypt_batch_items(table_name, aws_cmk_id):
index_keys = [
{"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "55"}},
{"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "56"}},
{"partition_attribute": {"S": "is this"}, "sort_attribute": {"N": "57"}},
{"partition_attribute": {"S": "another"}, "sort_attribute": {"N": "55"}},
]
plaintext_additional_attributes = {
"example": {"S": "data"},
"some numbers": {"N": "99"},
"and some binary": {"B": b"\x00\x01\x02"},
"leave me": {"S": "alone"},
}
plaintext_items = []
for key in index_keys:
_attributes = key.copy()
_attributes.update(plaintext_additional_attributes)
plaintext_items.append(_attributes)
encrypted_attributes = set(plaintext_additional_attributes.keys())
encrypted_attributes.remove("leave me")
unencrypted_attributes = set(index_keys[0].keys())
unencrypted_attributes.add("leave me")
client = boto3.client("dynamodb")
aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=aws_cmk_id)
actions = AttributeActions(
default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={"leave me": CryptoAction.DO_NOTHING}
)
encrypted_client = EncryptedClient(client=client, materials_provider=aws_kms_cmp, attribute_actions=actions)
encrypted_client.batch_write_item(
RequestItems={table_name: [{"PutRequest": {"Item": item}} for item in plaintext_items]}
)
encrypted_items = client.batch_get_item(RequestItems={table_name: {"Keys": index_keys}})["Responses"][table_name]
decrypted_items = encrypted_client.batch_get_item(RequestItems={table_name: {"Keys": index_keys}})["Responses"][
table_name
]
def _select_index_from_item(item):
for index in index_keys:
if all([item[key] == value for key, value in index.items()]):
return index
raise Exception("Index key not found in item.")
def _select_item_from_index(index, all_items):
for item in all_items:
if all([item[key] == value for key, value in index.items()]):
return item
raise Exception("Index key not found in item.")
for encrypted_item in encrypted_items:
key = _select_index_from_item(encrypted_item)
plaintext_item = _select_item_from_index(key, plaintext_items)
decrypted_item = _select_item_from_index(key, decrypted_items)
for name in encrypted_attributes:
assert encrypted_item[name] != plaintext_item[name]
assert decrypted_item[name] == plaintext_item[name]
for name in unencrypted_attributes:
assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name]
encrypted_client.batch_write_item(
RequestItems={table_name: [{"DeleteRequest": {"Key": key}} for key in index_keys]}
) |
def tap_key(self, character=, n=1, interval=0):
for i in range(n):
self.press_key(character)
self.release_key(character)
time.sleep(interval) | Press and release a given character key n times. | ### Input:
Press and release a given character key n times.
### Response:
def tap_key(self, character=, n=1, interval=0):
for i in range(n):
self.press_key(character)
self.release_key(character)
time.sleep(interval) |
def check_signing_file(self, keyid, signing_file):
if not signing_file or not os.path.exists(signing_file):
return False
if not self.check_permissions(signing_file):
log.warning(, signing_file)
return False
mtime = os.path.getmtime(signing_file)
if self.signing_files.get(signing_file, {}).get() != mtime:
self.signing_files.setdefault(signing_file, {})[] = mtime
with salt.utils.files.fopen(signing_file, ) as fp_:
self.signing_files[signing_file][] = [
entry for entry in [line.strip() for line in fp_] if not entry.strip().startswith()
]
return any(salt.utils.stringutils.expr_match(keyid, line) for line
in self.signing_files[signing_file].get(, [])) | Check a keyid for membership in a signing file | ### Input:
Check a keyid for membership in a signing file
### Response:
def check_signing_file(self, keyid, signing_file):
if not signing_file or not os.path.exists(signing_file):
return False
if not self.check_permissions(signing_file):
log.warning(, signing_file)
return False
mtime = os.path.getmtime(signing_file)
if self.signing_files.get(signing_file, {}).get() != mtime:
self.signing_files.setdefault(signing_file, {})[] = mtime
with salt.utils.files.fopen(signing_file, ) as fp_:
self.signing_files[signing_file][] = [
entry for entry in [line.strip() for line in fp_] if not entry.strip().startswith()
]
return any(salt.utils.stringutils.expr_match(keyid, line) for line
in self.signing_files[signing_file].get(, [])) |
def slice_shift(self, periods=1, axis=0):
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self) | Equivalent to `shift` without copying data. The shifted data will
not include the dropped periods and the shifted axis will be smaller
than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment. | ### Input:
Equivalent to `shift` without copying data. The shifted data will
not include the dropped periods and the shifted axis will be smaller
than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
### Response:
def slice_shift(self, periods=1, axis=0):
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self) |
def ensure_tuple(arg, len_=None):
if not is_tuple(arg, len_=len_):
len_part = "" if len_ is None else " of length %s" % len_
raise TypeError(
"expected tuple%s, got %s" % (len_part, _describe_type(arg)))
return arg | Checks whether argument is a tuple.
:param len_: Optional expected length of the tuple
:return: Argument, if it's a tuple (of given length, if any)
:raise TypeError: When argument is not a tuple (of given length, if any) | ### Input:
Checks whether argument is a tuple.
:param len_: Optional expected length of the tuple
:return: Argument, if it's a tuple (of given length, if any)
:raise TypeError: When argument is not a tuple (of given length, if any)
### Response:
def ensure_tuple(arg, len_=None):
if not is_tuple(arg, len_=len_):
len_part = "" if len_ is None else " of length %s" % len_
raise TypeError(
"expected tuple%s, got %s" % (len_part, _describe_type(arg)))
return arg |
def _evictStaleDevices(self):
while self.running:
expiredDeviceIds = [key for key, value in self.devices.items() if value.hasExpired()]
for key in expiredDeviceIds:
logger.warning("Device timeout, removing " + key)
del self.devices[key]
time.sleep(1)
logger.warning("DeviceCaretaker is now shutdown") | A housekeeping function which runs in a worker thread and which evicts devices that haven't sent an update for a
while. | ### Input:
A housekeeping function which runs in a worker thread and which evicts devices that haven't sent an update for a
while.
### Response:
def _evictStaleDevices(self):
while self.running:
expiredDeviceIds = [key for key, value in self.devices.items() if value.hasExpired()]
for key in expiredDeviceIds:
logger.warning("Device timeout, removing " + key)
del self.devices[key]
time.sleep(1)
logger.warning("DeviceCaretaker is now shutdown") |
def reject_sender(self, link_handle, pn_condition=None):
link = self._sender_links.get(link_handle)
if not link:
raise Exception("Invalid link_handle: %s" % link_handle)
link.reject(pn_condition)
link.destroy() | Rejects the SenderLink, and destroys the handle. | ### Input:
Rejects the SenderLink, and destroys the handle.
### Response:
def reject_sender(self, link_handle, pn_condition=None):
link = self._sender_links.get(link_handle)
if not link:
raise Exception("Invalid link_handle: %s" % link_handle)
link.reject(pn_condition)
link.destroy() |
def download_previews(self, savedir=None):
for obsid in self.obsids:
pm = io.PathManager(obsid.img_id, savedir=savedir)
pm.basepath.mkdir(exist_ok=True)
basename = Path(obsid.medium_img_url).name
print("Downloading", basename)
urlretrieve(obsid.medium_img_url, str(pm.basepath / basename)) | Download preview files for the previously found and stored Opus obsids.
Parameters
==========
savedir: str or pathlib.Path, optional
If the database root folder as defined by the config.ini should not be used,
provide a different savedir here. It will be handed to PathManager. | ### Input:
Download preview files for the previously found and stored Opus obsids.
Parameters
==========
savedir: str or pathlib.Path, optional
If the database root folder as defined by the config.ini should not be used,
provide a different savedir here. It will be handed to PathManager.
### Response:
def download_previews(self, savedir=None):
for obsid in self.obsids:
pm = io.PathManager(obsid.img_id, savedir=savedir)
pm.basepath.mkdir(exist_ok=True)
basename = Path(obsid.medium_img_url).name
print("Downloading", basename)
urlretrieve(obsid.medium_img_url, str(pm.basepath / basename)) |
def to_html(doc, output="/tmp", style="dep"):
file_name = "-".join([w.text for w in doc[:6] if not w.is_punct]) + ".html"
html = displacy.render(doc, style=style, page=True)
if output is not None:
output_path = Path(output)
if not output_path.exists():
output_path.mkdir()
output_file = Path(output) / file_name
output_file.open("w", encoding="utf-8").write(html)
print("Saved HTML to {}".format(output_file))
else:
print(html) | Doc method extension for saving the current state as a displaCy
visualization. | ### Input:
Doc method extension for saving the current state as a displaCy
visualization.
### Response:
def to_html(doc, output="/tmp", style="dep"):
file_name = "-".join([w.text for w in doc[:6] if not w.is_punct]) + ".html"
html = displacy.render(doc, style=style, page=True)
if output is not None:
output_path = Path(output)
if not output_path.exists():
output_path.mkdir()
output_file = Path(output) / file_name
output_file.open("w", encoding="utf-8").write(html)
print("Saved HTML to {}".format(output_file))
else:
print(html) |
def _longer_than(segments, min_dur):
if min_dur <= 0.:
return segments
long_enough = []
for seg in segments:
if sum([t[1] - t[0] for t in seg[]]) >= min_dur:
long_enough.append(seg)
return long_enough | Remove segments longer than min_dur. | ### Input:
Remove segments longer than min_dur.
### Response:
def _longer_than(segments, min_dur):
if min_dur <= 0.:
return segments
long_enough = []
for seg in segments:
if sum([t[1] - t[0] for t in seg[]]) >= min_dur:
long_enough.append(seg)
return long_enough |
def parse(value, to_np=None):
if ((isinstance(value, list) or isinstance(value, tuple)) and
not (isinstance(value[0], str))):
print()
return None
elif ((not (isinstance(value, list))) and
(not (isinstance(value, tuple))) and
(not (isinstance(value, str)))):
print()
return None
else:
if (isinstance(value, list) or isinstance(value, tuple)):
num = len(value)
epochs = []
for x in range(0, num):
epochs.append(CDFepoch._parse_epoch(value[x]))
if (to_np == None):
return epochs
else:
return np.array(epochs)
else:
if (to_np == None):
return CDFepoch._parse_epoch(value)
else:
return np.array(CDFepoch._parse_epoch(value)) | Parses the provided date/time string(s) into CDF epoch value(s).
For CDF_EPOCH:
The string has to be in the form of 'dd-mmm-yyyy hh:mm:ss.xxx' or
'yyyy-mm-ddThh:mm:ss.xxx' (in iso_8601). The string is the output
from encode function.
For CDF_EPOCH16:
The string has to be in the form of
'dd-mmm-yyyy hh:mm:ss.mmm.uuu.nnn.ppp' or
'yyyy-mm-ddThh:mm:ss.mmmuuunnnppp' (in iso_8601). The string is
the output from encode function.
For TT2000:
The string has to be in the form of
'dd-mmm-yyyy hh:mm:ss.mmm.uuu.nnn' or
'yyyy-mm-ddThh:mm:ss.mmmuuunnn' (in iso_8601). The string is
the output from encode function.
Specify to_np to True, if the result should be in numpy class. | ### Input:
Parses the provided date/time string(s) into CDF epoch value(s).
For CDF_EPOCH:
The string has to be in the form of 'dd-mmm-yyyy hh:mm:ss.xxx' or
'yyyy-mm-ddThh:mm:ss.xxx' (in iso_8601). The string is the output
from encode function.
For CDF_EPOCH16:
The string has to be in the form of
'dd-mmm-yyyy hh:mm:ss.mmm.uuu.nnn.ppp' or
'yyyy-mm-ddThh:mm:ss.mmmuuunnnppp' (in iso_8601). The string is
the output from encode function.
For TT2000:
The string has to be in the form of
'dd-mmm-yyyy hh:mm:ss.mmm.uuu.nnn' or
'yyyy-mm-ddThh:mm:ss.mmmuuunnn' (in iso_8601). The string is
the output from encode function.
Specify to_np to True, if the result should be in numpy class.
### Response:
def parse(value, to_np=None):
if ((isinstance(value, list) or isinstance(value, tuple)) and
not (isinstance(value[0], str))):
print()
return None
elif ((not (isinstance(value, list))) and
(not (isinstance(value, tuple))) and
(not (isinstance(value, str)))):
print()
return None
else:
if (isinstance(value, list) or isinstance(value, tuple)):
num = len(value)
epochs = []
for x in range(0, num):
epochs.append(CDFepoch._parse_epoch(value[x]))
if (to_np == None):
return epochs
else:
return np.array(epochs)
else:
if (to_np == None):
return CDFepoch._parse_epoch(value)
else:
return np.array(CDFepoch._parse_epoch(value)) |
def chart_plot(self, ax=None, cmap=,
xlabel=, ylabel=, grid_on=True, colorbar=True):
from matplotlib.mlab import griddata
from numpy import linspace, meshgrid
import matplotlib.pyplot as plt
x = self.dropna().N
y = self.dropna().Z
z = self.dropna().values
xi = linspace(min(x), max(x), max(x) - min(x) + 1)
yi = linspace(min(y), max(y), max(y) - min(y) + 1)
Z = griddata(x, y, z, xi, yi)
X, Y = meshgrid(xi, yi)
if ax is None:
ax = plt.gca()
chart = ax.pcolormesh(X, Y, Z, cmap=cmap)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(grid_on)
ax.set_aspect()
if colorbar:
plt.colorbar(chart)
return ax | Plot a nuclear chart with (N,Z) as axis and the values
of the Table as a color scale
Parameters
----------
ax: optional matplotlib axes
defaults to current axes
cmap: a matplotlib colormap
default: 'RdBu'
xlabel: string representing the label of the x axis
default: 'N'
ylabel: string, default: 'Z'
the label of the x axis
grid_on: boolean, default: True,
whether to draw the axes grid or not
colorbar: boolean, default: True
whether to draw a colorbar or not
Returns
-------
ax: a matplotlib axes object
Example
-------
Plot the theoretical deviation for the Möller's model::
>>> Table('FRDM95').error().chart_plot() | ### Input:
Plot a nuclear chart with (N,Z) as axis and the values
of the Table as a color scale
Parameters
----------
ax: optional matplotlib axes
defaults to current axes
cmap: a matplotlib colormap
default: 'RdBu'
xlabel: string representing the label of the x axis
default: 'N'
ylabel: string, default: 'Z'
the label of the x axis
grid_on: boolean, default: True,
whether to draw the axes grid or not
colorbar: boolean, default: True
whether to draw a colorbar or not
Returns
-------
ax: a matplotlib axes object
Example
-------
Plot the theoretical deviation for the Möller's model::
>>> Table('FRDM95').error().chart_plot()
### Response:
def chart_plot(self, ax=None, cmap=,
xlabel=, ylabel=, grid_on=True, colorbar=True):
from matplotlib.mlab import griddata
from numpy import linspace, meshgrid
import matplotlib.pyplot as plt
x = self.dropna().N
y = self.dropna().Z
z = self.dropna().values
xi = linspace(min(x), max(x), max(x) - min(x) + 1)
yi = linspace(min(y), max(y), max(y) - min(y) + 1)
Z = griddata(x, y, z, xi, yi)
X, Y = meshgrid(xi, yi)
if ax is None:
ax = plt.gca()
chart = ax.pcolormesh(X, Y, Z, cmap=cmap)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(grid_on)
ax.set_aspect()
if colorbar:
plt.colorbar(chart)
return ax |
def _is_univariate_marginal(self, index_points):
num_index_points = tf.compat.dimension_value(
index_points.shape[-(self.kernel.feature_ndims + 1)])
if num_index_points is None:
warnings.warn(
)
return num_index_points == 1 | True if the given index_points would yield a univariate marginal.
Args:
index_points: the set of index set locations at which to compute the
marginal Gaussian distribution. If this set is of size 1, the marginal is
univariate.
Returns:
is_univariate: Boolean indicating whether the marginal is univariate or
multivariate. In the case of dynamic shape in the number of index points,
defaults to "multivariate" since that's the best we can do. | ### Input:
True if the given index_points would yield a univariate marginal.
Args:
index_points: the set of index set locations at which to compute the
marginal Gaussian distribution. If this set is of size 1, the marginal is
univariate.
Returns:
is_univariate: Boolean indicating whether the marginal is univariate or
multivariate. In the case of dynamic shape in the number of index points,
defaults to "multivariate" since that's the best we can do.
### Response:
def _is_univariate_marginal(self, index_points):
num_index_points = tf.compat.dimension_value(
index_points.shape[-(self.kernel.feature_ndims + 1)])
if num_index_points is None:
warnings.warn(
)
return num_index_points == 1 |
def get_predictor(self, input_names, output_names, device=0):
assert self.tower_func is not None, "Must set tower_func on the trainer to use get_predictor()!"
tower_name = .format(device) if device >= 0 else
device_id = device
device = .format(device_id) if device_id >= 0 else
try:
tower = self.tower_func.towers[tower_name]
assert tower is not None, "This is a bug!"
except KeyError:
tower = None
if tower is None:
input = PlaceholderInput()
input.setup(self.input_signature)
vs_name = self._vs_name_for_predictor(device_id)
with tfv1.variable_scope(tfv1.get_variable_scope(), reuse=True), \
tf.device(device), PredictTowerContext(
tower_name, vs_name=vs_name):
logger.info("Building graph for predict tower on device {} {}...".format(
tower_name, device,
"with variable scope ".format(vs_name) if vs_name else ))
self.tower_func(*input.get_input_tensors())
tower = self.tower_func.towers[tower_name]
input_tensors = tower.get_tensors(input_names)
output_tensors = tower.get_tensors(output_names)
predictor = OnlinePredictor(input_tensors, output_tensors)
self._predictors.append(predictor)
return predictor | This method will build the trainer's tower function under ``TowerContext(is_training=False)``,
and returns a callable predictor with input placeholders & output tensors in this tower.
This method handles the common case of inference with the same tower function.
If you want to do inference with a different tower function, you can always build the tower by yourself,
under a "reuse" variable scope and a `TowerContext(is_training=False)`.
Args:
input_names (list): list of input names, matching the inputs declared for the trainer.
output_names(list): list of tensor names without the tower prefix.
device (int): build the predictor on device '/gpu:{device}' or use -1 for '/cpu:0'.
Returns:
an :class:`OnlinePredictor`.
Example:
.. code-block:: none
# in the graph:
interesting_tensor = tf.identity(x, name='fun')
# in _setup_graph callback method:
self._predictor = self.trainer.get_predictor(['input1', 'input2'], ['fun'])
# After session is initialized (see Tutorials - Write a Callback), can use it by:
outputs = self._predictor(input1, input2)
The CycleGAN example and DQN example have more concrete use of this method. | ### Input:
This method will build the trainer's tower function under ``TowerContext(is_training=False)``,
and returns a callable predictor with input placeholders & output tensors in this tower.
This method handles the common case of inference with the same tower function.
If you want to do inference with a different tower function, you can always build the tower by yourself,
under a "reuse" variable scope and a `TowerContext(is_training=False)`.
Args:
input_names (list): list of input names, matching the inputs declared for the trainer.
output_names(list): list of tensor names without the tower prefix.
device (int): build the predictor on device '/gpu:{device}' or use -1 for '/cpu:0'.
Returns:
an :class:`OnlinePredictor`.
Example:
.. code-block:: none
# in the graph:
interesting_tensor = tf.identity(x, name='fun')
# in _setup_graph callback method:
self._predictor = self.trainer.get_predictor(['input1', 'input2'], ['fun'])
# After session is initialized (see Tutorials - Write a Callback), can use it by:
outputs = self._predictor(input1, input2)
The CycleGAN example and DQN example have more concrete use of this method.
### Response:
def get_predictor(self, input_names, output_names, device=0):
assert self.tower_func is not None, "Must set tower_func on the trainer to use get_predictor()!"
tower_name = .format(device) if device >= 0 else
device_id = device
device = .format(device_id) if device_id >= 0 else
try:
tower = self.tower_func.towers[tower_name]
assert tower is not None, "This is a bug!"
except KeyError:
tower = None
if tower is None:
input = PlaceholderInput()
input.setup(self.input_signature)
vs_name = self._vs_name_for_predictor(device_id)
with tfv1.variable_scope(tfv1.get_variable_scope(), reuse=True), \
tf.device(device), PredictTowerContext(
tower_name, vs_name=vs_name):
logger.info("Building graph for predict tower on device {} {}...".format(
tower_name, device,
"with variable scope ".format(vs_name) if vs_name else ))
self.tower_func(*input.get_input_tensors())
tower = self.tower_func.towers[tower_name]
input_tensors = tower.get_tensors(input_names)
output_tensors = tower.get_tensors(output_names)
predictor = OnlinePredictor(input_tensors, output_tensors)
self._predictors.append(predictor)
return predictor |
def _set_cluster(self):
center = self.struct[self.center_index].coords
sphere = self.struct.get_neighbors(self.struct[self.center_index], self.radius)
symbols = [self.absorbing_atom]
coords = [[0, 0, 0]]
for i, site_dist in enumerate(sphere):
site_symbol = re.sub(r"[^aA-zZ]+", "", site_dist[0].species_string)
symbols.append(site_symbol)
coords.append(site_dist[0].coords - center)
return Molecule(symbols, coords) | Compute and set the cluster of atoms as a Molecule object. The siteato
coordinates are translated such that the absorbing atom(aka central
atom) is at the origin.
Returns:
Molecule | ### Input:
Compute and set the cluster of atoms as a Molecule object. The siteato
coordinates are translated such that the absorbing atom(aka central
atom) is at the origin.
Returns:
Molecule
### Response:
def _set_cluster(self):
center = self.struct[self.center_index].coords
sphere = self.struct.get_neighbors(self.struct[self.center_index], self.radius)
symbols = [self.absorbing_atom]
coords = [[0, 0, 0]]
for i, site_dist in enumerate(sphere):
site_symbol = re.sub(r"[^aA-zZ]+", "", site_dist[0].species_string)
symbols.append(site_symbol)
coords.append(site_dist[0].coords - center)
return Molecule(symbols, coords) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.