code stringlengths 70 11.9k | docstring stringlengths 4 7.08k | text stringlengths 128 15k |
|---|---|---|
def revision(directory=None, message=None, autogenerate=False, sql=False,
head=, splice=False, branch_label=None, version_path=None,
rev_id=None):
config = current_app.extensions[].migrate.get_config(directory)
if alembic_version >= (0, 7, 0):
command.revision(config, message, autogenerate=autogenerate, sql=sql,
head=head, splice=splice, branch_label=branch_label,
version_path=version_path, rev_id=rev_id)
else:
command.revision(config, message, autogenerate=autogenerate, sql=sql) | Create a new revision file. | ### Input:
Create a new revision file.
### Response:
def revision(directory=None, message=None, autogenerate=False, sql=False,
head=, splice=False, branch_label=None, version_path=None,
rev_id=None):
config = current_app.extensions[].migrate.get_config(directory)
if alembic_version >= (0, 7, 0):
command.revision(config, message, autogenerate=autogenerate, sql=sql,
head=head, splice=splice, branch_label=branch_label,
version_path=version_path, rev_id=rev_id)
else:
command.revision(config, message, autogenerate=autogenerate, sql=sql) |
def plot(self):
try:
import pyqtgraph
from .gui.utils import COLORS
from .gui.widgets.plot_standalone import StandalonePlot
from PyQt5.QtWidgets import QApplication
app = QApplication([])
plot = StandalonePlot([self], True, False)
name = self.name
if self.comment:
comment = self.comment.replace("$", "")
comment = extract_cncomment_xml(comment)
comment = fill(comment, 120).replace("\\n", " ")
title = f"{name}\n({comment})"
plot.plot.plot.plotItem.setTitle(title, color=COLORS[0])
else:
plot.plot.plot.plotItem.setTitle(name, color=COLORS[0])
plot.show()
plot.setWindowTitle(f"{self.name} - asammdf{__version__}")
app.exec_()
return
except:
try:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib.widgets import Slider
except ImportError:
logging.warning("Signal plotting requires pyqtgraph or matplotlib")
return
if len(self.samples.shape) <= 1 and self.samples.dtype.names is None:
fig = plt.figure()
fig.canvas.set_window_title(self.name)
fig.text(
0.95,
0.05,
f"asammdf {__version__}",
fontsize=8,
color="red",
ha="right",
va="top",
alpha=0.5,
)
name = self.name
if self.comment:
comment = self.comment.replace("$", "")
comment = extract_cncomment_xml(comment)
comment = fill(comment, 120).replace("\\n", " ")
title = f"{name}\n({comment})"
plt.title(title)
else:
plt.title(name)
try:
if not self.master_metadata:
plt.xlabel("Time [s]")
plt.ylabel(f"[{self.unit}]")
plt.plot(self.timestamps, self.samples, "b")
plt.plot(self.timestamps, self.samples, "b.")
plt.grid(True)
plt.show()
else:
master_name, sync_type = self.master_metadata
if sync_type in (0, 1):
plt.xlabel(f"{master_name} [s]")
elif sync_type == 2:
plt.xlabel(f"{master_name} [deg]")
elif sync_type == 3:
plt.xlabel(f"{master_name} [m]")
elif sync_type == 4:
plt.xlabel(f"{master_name} [index]")
plt.ylabel(f"[{self.unit}]")
plt.plot(self.timestamps, self.samples, "b")
plt.plot(self.timestamps, self.samples, "b.")
plt.grid(True)
plt.show()
except ValueError:
plt.close(fig)
else:
try:
names = self.samples.dtype.names
if self.samples.dtype.names is None or len(names) == 1:
if names:
samples = self.samples[names[0]]
else:
samples = self.samples
shape = samples.shape[1:]
fig = plt.figure()
fig.canvas.set_window_title(self.name)
fig.text(
0.95,
0.05,
f"asammdf {__version__}",
fontsize=8,
color="red",
ha="right",
va="top",
alpha=0.5,
)
if self.comment:
comment = self.comment.replace("$", "")
plt.title(f"{self.name}\n({comment})")
else:
plt.title(self.name)
ax = fig.add_subplot(111, projection="3d")
X = np.array(range(shape[1]))
Y = np.array(range(shape[0]))
X, Y = np.meshgrid(X, Y)
Z = samples[0]
self._plot_axis = ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1)
ax_a = plt.axes([0.25, 0.1, 0.65, 0.03])
sa = Slider(
ax_a,
"Time [s]",
self.timestamps[0],
self.timestamps[-1],
valinit=self.timestamps[0],
)
def update(val):
self._plot_axis.remove()
idx = np.searchsorted(self.timestamps, sa.val, side="right")
Z = samples[idx - 1]
self._plot_axis = ax.plot_wireframe(
X, Y, Z, rstride=1, cstride=1
)
fig.canvas.draw_idle()
sa.on_changed(update)
plt.show()
else:
fig = plt.figure()
fig.canvas.set_window_title(self.name)
fig.text(
0.95,
0.05,
f"asammdf {__version__}",
fontsize=8,
color="red",
ha="right",
va="top",
alpha=0.5,
)
if self.comment:
comment = self.comment.replace("$", "")
plt.title(f"{self.name}\n({comment})")
else:
plt.title(self.name)
ax = fig.add_subplot(111, projection="3d")
samples = self.samples[names[0]]
axis1 = self.samples[names[1]]
axis2 = self.samples[names[2]]
X, Y = np.meshgrid(axis2[0], axis1[0])
Z = samples[0]
self._plot_axis = ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1)
ax_a = plt.axes([0.25, 0.1, 0.65, 0.03])
sa = Slider(
ax_a,
"Time [s]",
self.timestamps[0],
self.timestamps[-1],
valinit=self.timestamps[0],
)
def update(val):
self._plot_axis.remove()
idx = np.searchsorted(self.timestamps, sa.val, side="right")
Z = samples[idx - 1]
X, Y = np.meshgrid(axis2[idx - 1], axis1[idx - 1])
self._plot_axis = ax.plot_wireframe(
X, Y, Z, rstride=1, cstride=1
)
fig.canvas.draw_idle()
sa.on_changed(update)
plt.show()
except Exception as err:
print(err) | plot Signal samples. Pyqtgraph is used if it is available; in this
case see the GUI plot documentation to see the available commands | ### Input:
plot Signal samples. Pyqtgraph is used if it is available; in this
case see the GUI plot documentation to see the available commands
### Response:
def plot(self):
try:
import pyqtgraph
from .gui.utils import COLORS
from .gui.widgets.plot_standalone import StandalonePlot
from PyQt5.QtWidgets import QApplication
app = QApplication([])
plot = StandalonePlot([self], True, False)
name = self.name
if self.comment:
comment = self.comment.replace("$", "")
comment = extract_cncomment_xml(comment)
comment = fill(comment, 120).replace("\\n", " ")
title = f"{name}\n({comment})"
plot.plot.plot.plotItem.setTitle(title, color=COLORS[0])
else:
plot.plot.plot.plotItem.setTitle(name, color=COLORS[0])
plot.show()
plot.setWindowTitle(f"{self.name} - asammdf{__version__}")
app.exec_()
return
except:
try:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib.widgets import Slider
except ImportError:
logging.warning("Signal plotting requires pyqtgraph or matplotlib")
return
if len(self.samples.shape) <= 1 and self.samples.dtype.names is None:
fig = plt.figure()
fig.canvas.set_window_title(self.name)
fig.text(
0.95,
0.05,
f"asammdf {__version__}",
fontsize=8,
color="red",
ha="right",
va="top",
alpha=0.5,
)
name = self.name
if self.comment:
comment = self.comment.replace("$", "")
comment = extract_cncomment_xml(comment)
comment = fill(comment, 120).replace("\\n", " ")
title = f"{name}\n({comment})"
plt.title(title)
else:
plt.title(name)
try:
if not self.master_metadata:
plt.xlabel("Time [s]")
plt.ylabel(f"[{self.unit}]")
plt.plot(self.timestamps, self.samples, "b")
plt.plot(self.timestamps, self.samples, "b.")
plt.grid(True)
plt.show()
else:
master_name, sync_type = self.master_metadata
if sync_type in (0, 1):
plt.xlabel(f"{master_name} [s]")
elif sync_type == 2:
plt.xlabel(f"{master_name} [deg]")
elif sync_type == 3:
plt.xlabel(f"{master_name} [m]")
elif sync_type == 4:
plt.xlabel(f"{master_name} [index]")
plt.ylabel(f"[{self.unit}]")
plt.plot(self.timestamps, self.samples, "b")
plt.plot(self.timestamps, self.samples, "b.")
plt.grid(True)
plt.show()
except ValueError:
plt.close(fig)
else:
try:
names = self.samples.dtype.names
if self.samples.dtype.names is None or len(names) == 1:
if names:
samples = self.samples[names[0]]
else:
samples = self.samples
shape = samples.shape[1:]
fig = plt.figure()
fig.canvas.set_window_title(self.name)
fig.text(
0.95,
0.05,
f"asammdf {__version__}",
fontsize=8,
color="red",
ha="right",
va="top",
alpha=0.5,
)
if self.comment:
comment = self.comment.replace("$", "")
plt.title(f"{self.name}\n({comment})")
else:
plt.title(self.name)
ax = fig.add_subplot(111, projection="3d")
X = np.array(range(shape[1]))
Y = np.array(range(shape[0]))
X, Y = np.meshgrid(X, Y)
Z = samples[0]
self._plot_axis = ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1)
ax_a = plt.axes([0.25, 0.1, 0.65, 0.03])
sa = Slider(
ax_a,
"Time [s]",
self.timestamps[0],
self.timestamps[-1],
valinit=self.timestamps[0],
)
def update(val):
self._plot_axis.remove()
idx = np.searchsorted(self.timestamps, sa.val, side="right")
Z = samples[idx - 1]
self._plot_axis = ax.plot_wireframe(
X, Y, Z, rstride=1, cstride=1
)
fig.canvas.draw_idle()
sa.on_changed(update)
plt.show()
else:
fig = plt.figure()
fig.canvas.set_window_title(self.name)
fig.text(
0.95,
0.05,
f"asammdf {__version__}",
fontsize=8,
color="red",
ha="right",
va="top",
alpha=0.5,
)
if self.comment:
comment = self.comment.replace("$", "")
plt.title(f"{self.name}\n({comment})")
else:
plt.title(self.name)
ax = fig.add_subplot(111, projection="3d")
samples = self.samples[names[0]]
axis1 = self.samples[names[1]]
axis2 = self.samples[names[2]]
X, Y = np.meshgrid(axis2[0], axis1[0])
Z = samples[0]
self._plot_axis = ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1)
ax_a = plt.axes([0.25, 0.1, 0.65, 0.03])
sa = Slider(
ax_a,
"Time [s]",
self.timestamps[0],
self.timestamps[-1],
valinit=self.timestamps[0],
)
def update(val):
self._plot_axis.remove()
idx = np.searchsorted(self.timestamps, sa.val, side="right")
Z = samples[idx - 1]
X, Y = np.meshgrid(axis2[idx - 1], axis1[idx - 1])
self._plot_axis = ax.plot_wireframe(
X, Y, Z, rstride=1, cstride=1
)
fig.canvas.draw_idle()
sa.on_changed(update)
plt.show()
except Exception as err:
print(err) |
def nla_reserve(msg, attrtype, attrlen):
tlen = NLMSG_ALIGN(msg.nm_nlh.nlmsg_len) + nla_total_size(attrlen)
if tlen > msg.nm_size:
return None
nla = nlattr(nlmsg_tail(msg.nm_nlh))
nla.nla_type = attrtype
nla.nla_len = nla_attr_size(attrlen)
if attrlen:
padlen = nla_padlen(attrlen)
nla.bytearray[nla.nla_len:nla.nla_len + padlen] = bytearray(b) * padlen
msg.nm_nlh.nlmsg_len = tlen
_LOGGER.debug(, id(msg), id(nla),
nla.nla_type, nla_total_size(attrlen), attrlen,
nla.bytearray.slice.start - nlmsg_data(msg.nm_nlh).slice.start, msg.nm_nlh.nlmsg_len)
return nla | Reserve space for an attribute.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L456
Reserves room for an attribute in the specified Netlink message and fills in the attribute header (type, length).
Returns None if there is insufficient space for the attribute.
Any padding between payload and the start of the next attribute is zeroed out.
Positional arguments:
msg -- Netlink message (nl_msg class instance).
attrtype -- attribute type (integer).
attrlen -- length of payload (integer).
Returns:
nlattr class instance allocated to the new space or None on failure. | ### Input:
Reserve space for an attribute.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L456
Reserves room for an attribute in the specified Netlink message and fills in the attribute header (type, length).
Returns None if there is insufficient space for the attribute.
Any padding between payload and the start of the next attribute is zeroed out.
Positional arguments:
msg -- Netlink message (nl_msg class instance).
attrtype -- attribute type (integer).
attrlen -- length of payload (integer).
Returns:
nlattr class instance allocated to the new space or None on failure.
### Response:
def nla_reserve(msg, attrtype, attrlen):
tlen = NLMSG_ALIGN(msg.nm_nlh.nlmsg_len) + nla_total_size(attrlen)
if tlen > msg.nm_size:
return None
nla = nlattr(nlmsg_tail(msg.nm_nlh))
nla.nla_type = attrtype
nla.nla_len = nla_attr_size(attrlen)
if attrlen:
padlen = nla_padlen(attrlen)
nla.bytearray[nla.nla_len:nla.nla_len + padlen] = bytearray(b) * padlen
msg.nm_nlh.nlmsg_len = tlen
_LOGGER.debug(, id(msg), id(nla),
nla.nla_type, nla_total_size(attrlen), attrlen,
nla.bytearray.slice.start - nlmsg_data(msg.nm_nlh).slice.start, msg.nm_nlh.nlmsg_len)
return nla |
def stop(self):
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, , None)
if sock:
if not isinstance(self.bind_addr, six.string_types):
try:
host, port = sock.getsockname()[:2]
except socket.error as ex:
if ex.args[0] not in errors.socket_errors_to_ignore:
raise
else:
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout) | Gracefully shutdown a server that is serving forever. | ### Input:
Gracefully shutdown a server that is serving forever.
### Response:
def stop(self):
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, , None)
if sock:
if not isinstance(self.bind_addr, six.string_types):
try:
host, port = sock.getsockname()[:2]
except socket.error as ex:
if ex.args[0] not in errors.socket_errors_to_ignore:
raise
else:
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout) |
def from_boto_instance(cls, instance):
return cls(
name=instance.tags.get(),
private_ip=instance.private_ip_address,
public_ip=instance.ip_address,
instance_type=instance.instance_type,
instance_id=instance.id,
hostname=instance.dns_name,
stack_id=instance.tags.get(),
stack_name=instance.tags.get(),
logical_id=instance.tags.get(),
security_groups=[g.name for g in instance.groups],
launch_time=instance.launch_time,
ami_id=instance.image_id,
tags={k.lower(): v for k, v in six.iteritems(instance.tags)}
) | Loads a ``HostEntry`` from a boto instance.
:param instance: A boto instance object.
:type instance: :py:class:`boto.ec2.instanceInstance`
:rtype: :py:class:`HostEntry` | ### Input:
Loads a ``HostEntry`` from a boto instance.
:param instance: A boto instance object.
:type instance: :py:class:`boto.ec2.instanceInstance`
:rtype: :py:class:`HostEntry`
### Response:
def from_boto_instance(cls, instance):
return cls(
name=instance.tags.get(),
private_ip=instance.private_ip_address,
public_ip=instance.ip_address,
instance_type=instance.instance_type,
instance_id=instance.id,
hostname=instance.dns_name,
stack_id=instance.tags.get(),
stack_name=instance.tags.get(),
logical_id=instance.tags.get(),
security_groups=[g.name for g in instance.groups],
launch_time=instance.launch_time,
ami_id=instance.image_id,
tags={k.lower(): v for k, v in six.iteritems(instance.tags)}
) |
def createLayout(self, className, parent=None, name=):
layout = super(UiLoader, self).createLayout(className, parent, name)
setattr(self._baseinstance, name, layout)
return layout | Overloads teh create action method to handle the proper base
instance information, similar to the PyQt4 loading system.
:param className | <str>
parent | <QWidget> || None
name | <str> | ### Input:
Overloads teh create action method to handle the proper base
instance information, similar to the PyQt4 loading system.
:param className | <str>
parent | <QWidget> || None
name | <str>
### Response:
def createLayout(self, className, parent=None, name=):
layout = super(UiLoader, self).createLayout(className, parent, name)
setattr(self._baseinstance, name, layout)
return layout |
def calculate_cardinality(
angle,
earthquake_hazard=None,
place_exposure=None
):
_ = earthquake_hazard, place_exposure
direction_list = tr(
).split()
bearing = float(angle)
direction_count = len(direction_list)
direction_interval = 360. / direction_count
index = int(floor(bearing / direction_interval))
index %= direction_count
return direction_list[index] | Simple postprocessor where we compute the cardinality of an angle.
:param angle: Bearing angle.
:type angle: float
:param earthquake_hazard: The hazard to use.
:type earthquake_hazard: str
:param place_exposure: The exposure to use.
:type place_exposure: str
:return: Cardinality text.
:rtype: str | ### Input:
Simple postprocessor where we compute the cardinality of an angle.
:param angle: Bearing angle.
:type angle: float
:param earthquake_hazard: The hazard to use.
:type earthquake_hazard: str
:param place_exposure: The exposure to use.
:type place_exposure: str
:return: Cardinality text.
:rtype: str
### Response:
def calculate_cardinality(
angle,
earthquake_hazard=None,
place_exposure=None
):
_ = earthquake_hazard, place_exposure
direction_list = tr(
).split()
bearing = float(angle)
direction_count = len(direction_list)
direction_interval = 360. / direction_count
index = int(floor(bearing / direction_interval))
index %= direction_count
return direction_list[index] |
def getbugfields(self, force_refresh=False):
if force_refresh or not self._cache.bugfields:
log.debug("Refreshing bugfields")
self._cache.bugfields = self._getbugfields()
self._cache.bugfields.sort()
log.debug("bugfields = %s", self._cache.bugfields)
return self._cache.bugfields | Calls getBugFields, which returns a list of fields in each bug
for this bugzilla instance. This can be used to set the list of attrs
on the Bug object. | ### Input:
Calls getBugFields, which returns a list of fields in each bug
for this bugzilla instance. This can be used to set the list of attrs
on the Bug object.
### Response:
def getbugfields(self, force_refresh=False):
if force_refresh or not self._cache.bugfields:
log.debug("Refreshing bugfields")
self._cache.bugfields = self._getbugfields()
self._cache.bugfields.sort()
log.debug("bugfields = %s", self._cache.bugfields)
return self._cache.bugfields |
def response_message(cls, result, request_id):
if isinstance(result, CodeMessageError):
payload = cls.error_payload(result, request_id)
else:
payload = cls.response_payload(result, request_id)
return cls.encode_payload(payload) | Convert a response result (or RPCError) to a message. | ### Input:
Convert a response result (or RPCError) to a message.
### Response:
def response_message(cls, result, request_id):
if isinstance(result, CodeMessageError):
payload = cls.error_payload(result, request_id)
else:
payload = cls.response_payload(result, request_id)
return cls.encode_payload(payload) |
def info(self, buf=None):
if buf is None:
buf = sys.stdout
lines = []
lines.append()
lines.append()
for name, size in self.dims.items():
lines.append(.format(name=name, size=size))
lines.append()
for name, da in self.variables.items():
dims = .join(da.dims)
lines.append(.format(
type=da.dtype, name=name, dims=dims))
for k, v in da.attrs.items():
lines.append(.format(name=name, k=k,
v=v))
lines.append()
for k, v in self.attrs.items():
lines.append(.format(k=k, v=v))
lines.append()
buf.write(.join(lines)) | Concise summary of a Dataset variables and attributes.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
See Also
--------
pandas.DataFrame.assign
netCDF's ncdump | ### Input:
Concise summary of a Dataset variables and attributes.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
See Also
--------
pandas.DataFrame.assign
netCDF's ncdump
### Response:
def info(self, buf=None):
if buf is None:
buf = sys.stdout
lines = []
lines.append()
lines.append()
for name, size in self.dims.items():
lines.append(.format(name=name, size=size))
lines.append()
for name, da in self.variables.items():
dims = .join(da.dims)
lines.append(.format(
type=da.dtype, name=name, dims=dims))
for k, v in da.attrs.items():
lines.append(.format(name=name, k=k,
v=v))
lines.append()
for k, v in self.attrs.items():
lines.append(.format(k=k, v=v))
lines.append()
buf.write(.join(lines)) |
def import_attr(path):
if isinstance(path, six.binary_type):
path = path.decode("utf-8")
if not isinstance(path, six.text_type):
return path
if u"." not in path:
ValueError("%r should be of the form `module.attr` and we just got `attr`" % path)
module, attr = path.rsplit(u, 1)
try:
return getattr(import_module(module), attr)
except ImportError:
raise ImportError("Module %r not found" % module)
except AttributeError:
raise AttributeError("Module %r has not attribut %r" % (module, attr)) | transform a python dotted path to the attr
:param path: A dotted path to a python object or a python object
:type path: :obj:`unicode` or :obj:`str` or anything
:return: The python object pointed by the dotted path or the python object unchanged | ### Input:
transform a python dotted path to the attr
:param path: A dotted path to a python object or a python object
:type path: :obj:`unicode` or :obj:`str` or anything
:return: The python object pointed by the dotted path or the python object unchanged
### Response:
def import_attr(path):
if isinstance(path, six.binary_type):
path = path.decode("utf-8")
if not isinstance(path, six.text_type):
return path
if u"." not in path:
ValueError("%r should be of the form `module.attr` and we just got `attr`" % path)
module, attr = path.rsplit(u, 1)
try:
return getattr(import_module(module), attr)
except ImportError:
raise ImportError("Module %r not found" % module)
except AttributeError:
raise AttributeError("Module %r has not attribut %r" % (module, attr)) |
def travis_build_package():
travis_tag = os.environ.get()
if not travis_tag:
print("TRAVIS_TAG environment variable is not present")
return "TRAVIS_TAG environment variable is not present"
try:
name, version = travis_tag.split("_")
except ValueError:
print("TRAVIS_TAG is not (tag is: {})".format(travis_tag))
return "TRAVIS_TAG is not (tag is: {})".format(travis_tag)
try:
version = Version(version)
except InvalidVersion:
print("Version must be a valid PEP440 version (version is: {})".format(version))
return "Version must be a valid PEP440 version (version is: {})".format(version)
if name.lower() in OMITTED_RELEASE_PACKAGES:
print("The input package {} has been disabled for release from Travis.CI.".format(name))
return
abs_dist_path = Path(os.environ[], )
create_package(name, str(abs_dist_path))
print("Produced:\n{}".format(list(abs_dist_path.glob())))
pattern = "*{}*".format(version)
packages = list(abs_dist_path.glob(pattern))
if not packages:
return "Package version does not match tag {}, abort".format(version)
pypi_server = os.environ.get("PYPI_SERVER", "default PyPI server")
print("Package created as expected and will be pushed to {}".format(pypi_server)) | Assumed called on Travis, to prepare a package to be deployed
This method prints on stdout for Travis.
Return is obj to pass to sys.exit() directly | ### Input:
Assumed called on Travis, to prepare a package to be deployed
This method prints on stdout for Travis.
Return is obj to pass to sys.exit() directly
### Response:
def travis_build_package():
travis_tag = os.environ.get()
if not travis_tag:
print("TRAVIS_TAG environment variable is not present")
return "TRAVIS_TAG environment variable is not present"
try:
name, version = travis_tag.split("_")
except ValueError:
print("TRAVIS_TAG is not (tag is: {})".format(travis_tag))
return "TRAVIS_TAG is not (tag is: {})".format(travis_tag)
try:
version = Version(version)
except InvalidVersion:
print("Version must be a valid PEP440 version (version is: {})".format(version))
return "Version must be a valid PEP440 version (version is: {})".format(version)
if name.lower() in OMITTED_RELEASE_PACKAGES:
print("The input package {} has been disabled for release from Travis.CI.".format(name))
return
abs_dist_path = Path(os.environ[], )
create_package(name, str(abs_dist_path))
print("Produced:\n{}".format(list(abs_dist_path.glob())))
pattern = "*{}*".format(version)
packages = list(abs_dist_path.glob(pattern))
if not packages:
return "Package version does not match tag {}, abort".format(version)
pypi_server = os.environ.get("PYPI_SERVER", "default PyPI server")
print("Package created as expected and will be pushed to {}".format(pypi_server)) |
def ProtoFromTfRecordFiles(files,
max_entries=10000,
features=None,
is_sequence=False,
iterator_options=None):
warnings.warn(
,
DeprecationWarning)
return FeatureStatisticsGenerator().ProtoFromTfRecordFiles(
files, max_entries, features, is_sequence, iterator_options) | Creates a feature statistics proto from a set of TFRecord files.
Args:
files: A list of dicts describing files for each dataset for the proto.
Each
entry contains a 'path' field with the path to the TFRecord file on
disk
and a 'name' field to identify the dataset in the proto.
max_entries: The maximum number of examples to load from each dataset
in order to create the proto. Defaults to 10000.
features: A list of strings that is a whitelist of feature names to create
feature statistics for. If set to None then all features in the
dataset
are analyzed. Defaults to None.
is_sequence: True if the input data from 'tables' are tf.SequenceExamples,
False if tf.Examples. Defaults to false.
iterator_options: Options to pass to the iterator that reads the examples.
Defaults to None.
Returns:
The feature statistics proto for the provided files. | ### Input:
Creates a feature statistics proto from a set of TFRecord files.
Args:
files: A list of dicts describing files for each dataset for the proto.
Each
entry contains a 'path' field with the path to the TFRecord file on
disk
and a 'name' field to identify the dataset in the proto.
max_entries: The maximum number of examples to load from each dataset
in order to create the proto. Defaults to 10000.
features: A list of strings that is a whitelist of feature names to create
feature statistics for. If set to None then all features in the
dataset
are analyzed. Defaults to None.
is_sequence: True if the input data from 'tables' are tf.SequenceExamples,
False if tf.Examples. Defaults to false.
iterator_options: Options to pass to the iterator that reads the examples.
Defaults to None.
Returns:
The feature statistics proto for the provided files.
### Response:
def ProtoFromTfRecordFiles(files,
max_entries=10000,
features=None,
is_sequence=False,
iterator_options=None):
warnings.warn(
,
DeprecationWarning)
return FeatureStatisticsGenerator().ProtoFromTfRecordFiles(
files, max_entries, features, is_sequence, iterator_options) |
def stream_list(self, id, listener, run_async=False, timeout=__DEFAULT_STREAM_TIMEOUT, reconnect_async=False, reconnect_async_wait_sec=__DEFAULT_STREAM_RECONNECT_WAIT_SEC):
id = self.__unpack_id(id)
return self.__stream("/api/v1/streaming/list?list={}".format(id), listener, run_async=run_async, timeout=timeout, reconnect_async=reconnect_async, reconnect_async_wait_sec=reconnect_async_wait_sec) | Stream events for the current user, restricted to accounts on the given
list. | ### Input:
Stream events for the current user, restricted to accounts on the given
list.
### Response:
def stream_list(self, id, listener, run_async=False, timeout=__DEFAULT_STREAM_TIMEOUT, reconnect_async=False, reconnect_async_wait_sec=__DEFAULT_STREAM_RECONNECT_WAIT_SEC):
id = self.__unpack_id(id)
return self.__stream("/api/v1/streaming/list?list={}".format(id), listener, run_async=run_async, timeout=timeout, reconnect_async=reconnect_async, reconnect_async_wait_sec=reconnect_async_wait_sec) |
def __disambiguate_proper_names_2(self, docs, lexicon):
for doc in docs:
for sentence in doc.divide( layer=WORDS, by=SENTENCES ):
sentencePos = 0
for i in range(len(sentence)):
word = sentence[i]
if all([ a[POSTAG] == for a in word[ANALYSIS] ]) and \
not re.match(, word[TEXT]):
sentencePos = 0
continue
if len(word[ANALYSIS]) > 1 and \
any([ a[POSTAG] == for a in word[ANALYSIS] ]):
if sentencePos != 0:
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] not in [, ]:
toDelete.append( analysis )
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
else:
hasRecurringProperName = False
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[ROOT] in lexicon and lexicon[analysis[ROOT]] > 1:
hasRecurringProperName = True
if analysis[POSTAG] not in [, ]:
toDelete.append( analysis )
if hasRecurringProperName and toDelete:
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
sentencePos += 1 | Kustutame üleliigsed mitte-pärisnimeanalüüsid:
-- kui lause keskel on pärisnimeanalüüsiga sõna, jätamegi alles vaid
pärisnimeanalyys(id);
-- kui lause alguses on pärisnimeanalüüsiga s6na, ning pärisnimelemma
esineb korpuses suurema sagedusega kui 1, jätamegi alles vaid
pärisnimeanalyys(id); vastasel juhul ei kustuta midagi; | ### Input:
Kustutame üleliigsed mitte-pärisnimeanalüüsid:
-- kui lause keskel on pärisnimeanalüüsiga sõna, jätamegi alles vaid
pärisnimeanalyys(id);
-- kui lause alguses on pärisnimeanalüüsiga s6na, ning pärisnimelemma
esineb korpuses suurema sagedusega kui 1, jätamegi alles vaid
pärisnimeanalyys(id); vastasel juhul ei kustuta midagi;
### Response:
def __disambiguate_proper_names_2(self, docs, lexicon):
for doc in docs:
for sentence in doc.divide( layer=WORDS, by=SENTENCES ):
sentencePos = 0
for i in range(len(sentence)):
word = sentence[i]
if all([ a[POSTAG] == for a in word[ANALYSIS] ]) and \
not re.match(, word[TEXT]):
sentencePos = 0
continue
if len(word[ANALYSIS]) > 1 and \
any([ a[POSTAG] == for a in word[ANALYSIS] ]):
if sentencePos != 0:
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] not in [, ]:
toDelete.append( analysis )
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
else:
hasRecurringProperName = False
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[ROOT] in lexicon and lexicon[analysis[ROOT]] > 1:
hasRecurringProperName = True
if analysis[POSTAG] not in [, ]:
toDelete.append( analysis )
if hasRecurringProperName and toDelete:
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
sentencePos += 1 |
def Advertise(port, stype="SCOOP", sname="Broker", advertisername="Broker",
location=""):
scoop.logger.info("Launching advertiser...")
service = minusconf.Service(stype, port, sname, location)
advertiser = minusconf.ThreadAdvertiser([service], advertisername)
advertiser.start()
scoop.logger.info("Advertiser launched.")
return advertiser | stype = always SCOOP
port = comma separated ports
sname = broker unique name
location = routable location (ip or dns) | ### Input:
stype = always SCOOP
port = comma separated ports
sname = broker unique name
location = routable location (ip or dns)
### Response:
def Advertise(port, stype="SCOOP", sname="Broker", advertisername="Broker",
location=""):
scoop.logger.info("Launching advertiser...")
service = minusconf.Service(stype, port, sname, location)
advertiser = minusconf.ThreadAdvertiser([service], advertisername)
advertiser.start()
scoop.logger.info("Advertiser launched.")
return advertiser |
def is_url_locked_by_token(self, url, lock_token):
lockUrl = self.get_lock(lock_token, "root")
return lockUrl and util.is_equal_or_child_uri(lockUrl, url) | Check, if url (or any of it's parents) is locked by lock_token. | ### Input:
Check, if url (or any of it's parents) is locked by lock_token.
### Response:
def is_url_locked_by_token(self, url, lock_token):
lockUrl = self.get_lock(lock_token, "root")
return lockUrl and util.is_equal_or_child_uri(lockUrl, url) |
def header_output(self):
result = []
for key in self.keys():
result.append(key + + self.get(key).value)
return .join(result) | 只输出cookie的key-value字串.
比如: HISTORY=21341; PHPSESSION=3289012u39jsdijf28; token=233129 | ### Input:
只输出cookie的key-value字串.
比如: HISTORY=21341; PHPSESSION=3289012u39jsdijf28; token=233129
### Response:
def header_output(self):
result = []
for key in self.keys():
result.append(key + + self.get(key).value)
return .join(result) |
def lchmod(self, mode):
if self._closed:
self._raise_closed()
self._accessor.lchmod(self, mode) | Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's. | ### Input:
Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's.
### Response:
def lchmod(self, mode):
if self._closed:
self._raise_closed()
self._accessor.lchmod(self, mode) |
def get_event_start(event_buffer, event_format):
if event_format == "csv":
return get_csv_event_start(event_buffer)
elif event_format == "xml":
return get_xml_event_start(event_buffer)
else:
return get_json_event_start(event_buffer) | dispatch event start method based on event format type | ### Input:
dispatch event start method based on event format type
### Response:
def get_event_start(event_buffer, event_format):
if event_format == "csv":
return get_csv_event_start(event_buffer)
elif event_format == "xml":
return get_xml_event_start(event_buffer)
else:
return get_json_event_start(event_buffer) |
def make_benchark(n_train, n_test, n_dim=2):
X_train = np.random.rand(n_train, n_dim)
y_train = np.random.rand(n_train)
X_test = np.random.rand(n_test, n_dim)
res = {}
for variogram_model in VARIOGRAM_MODELS:
tic = time()
OK = OrdinaryKriging(X_train[:, 0], X_train[:, 1], y_train,
variogram_model=,
verbose=False, enable_plotting=False)
res[.format(variogram_model)] = time() - tic
for backend in BACKENDS:
for n_closest_points in N_MOVING_WINDOW:
if backend == and n_closest_points is not None:
continue
tic = time()
OK.execute(, X_test[:, 0], X_test[:, 1],
backend=backend,
n_closest_points=n_closest_points)
res[.format(backend, n_closest_points)] = time() - tic
return res | Compute the benchmarks for Ordianry Kriging
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
Returns
-------
res : dict
a dictionary with the timing results | ### Input:
Compute the benchmarks for Ordianry Kriging
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
Returns
-------
res : dict
a dictionary with the timing results
### Response:
def make_benchark(n_train, n_test, n_dim=2):
X_train = np.random.rand(n_train, n_dim)
y_train = np.random.rand(n_train)
X_test = np.random.rand(n_test, n_dim)
res = {}
for variogram_model in VARIOGRAM_MODELS:
tic = time()
OK = OrdinaryKriging(X_train[:, 0], X_train[:, 1], y_train,
variogram_model=,
verbose=False, enable_plotting=False)
res[.format(variogram_model)] = time() - tic
for backend in BACKENDS:
for n_closest_points in N_MOVING_WINDOW:
if backend == and n_closest_points is not None:
continue
tic = time()
OK.execute(, X_test[:, 0], X_test[:, 1],
backend=backend,
n_closest_points=n_closest_points)
res[.format(backend, n_closest_points)] = time() - tic
return res |
def scandir_walk(top, skip_dirs=(), on_skip=None):
try:
scandir_it = Path2(top).scandir()
except PermissionError as err:
log.error("scandir error: %s" % err)
return
for entry in scandir_it:
if entry.is_dir(follow_symlinks=False):
if entry.name in skip_dirs:
on_skip(entry, entry.name)
else:
yield from scandir_walk(entry.path, skip_dirs, on_skip)
else:
yield entry | Just walk the filesystem tree top-down with os.scandir() and don't follow symlinks.
:param top: path to scan
:param skip_dirs: List of dir names to skip
e.g.: "__pycache__", "temp", "tmp"
:param on_skip: function that will be called if 'skip_dirs' match.
e.g.:
def on_skip(entry, pattern):
log.error("Skip pattern %r hit: %s" % (pattern, entry.path))
:return: yields os.DirEntry() instances | ### Input:
Just walk the filesystem tree top-down with os.scandir() and don't follow symlinks.
:param top: path to scan
:param skip_dirs: List of dir names to skip
e.g.: "__pycache__", "temp", "tmp"
:param on_skip: function that will be called if 'skip_dirs' match.
e.g.:
def on_skip(entry, pattern):
log.error("Skip pattern %r hit: %s" % (pattern, entry.path))
:return: yields os.DirEntry() instances
### Response:
def scandir_walk(top, skip_dirs=(), on_skip=None):
try:
scandir_it = Path2(top).scandir()
except PermissionError as err:
log.error("scandir error: %s" % err)
return
for entry in scandir_it:
if entry.is_dir(follow_symlinks=False):
if entry.name in skip_dirs:
on_skip(entry, entry.name)
else:
yield from scandir_walk(entry.path, skip_dirs, on_skip)
else:
yield entry |
async def _set_subscriptions(self, subscriptions):
url, params = self._get_subscriptions_endpoint()
data = {
: ,
: self.webhook_url,
: .join(subscriptions),
: self.verify_token,
}
headers = {
: ,
}
post = self.session.post(
url,
params=params,
data=ujson.dumps(data),
headers=headers,
)
async with post as r:
await self._handle_fb_response(r)
data = await r.json() | Set the subscriptions to a specific list of values | ### Input:
Set the subscriptions to a specific list of values
### Response:
async def _set_subscriptions(self, subscriptions):
url, params = self._get_subscriptions_endpoint()
data = {
: ,
: self.webhook_url,
: .join(subscriptions),
: self.verify_token,
}
headers = {
: ,
}
post = self.session.post(
url,
params=params,
data=ujson.dumps(data),
headers=headers,
)
async with post as r:
await self._handle_fb_response(r)
data = await r.json() |
def shell_cmd_complete(self, text: str, line: str, begidx: int, endidx: int,
complete_blank: bool = False) -> List[str]:
if not text.startswith() and os.path.sep not in text:
return self.get_exes_in_path(text)
else:
return self.path_complete(text, line, begidx, endidx,
lambda path: os.path.isdir(path) or os.access(path, os.X_OK)) | Performs completion of executables either in a user's path or a given path
:param text: the string prefix we are attempting to match (all returned matches must begin with it)
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param complete_blank: If True, then a blank will complete all shell commands in a user's path
If False, then no completion is performed
Defaults to False to match Bash shell behavior
:return: a list of possible tab completions | ### Input:
Performs completion of executables either in a user's path or a given path
:param text: the string prefix we are attempting to match (all returned matches must begin with it)
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param complete_blank: If True, then a blank will complete all shell commands in a user's path
If False, then no completion is performed
Defaults to False to match Bash shell behavior
:return: a list of possible tab completions
### Response:
def shell_cmd_complete(self, text: str, line: str, begidx: int, endidx: int,
complete_blank: bool = False) -> List[str]:
if not text.startswith() and os.path.sep not in text:
return self.get_exes_in_path(text)
else:
return self.path_complete(text, line, begidx, endidx,
lambda path: os.path.isdir(path) or os.access(path, os.X_OK)) |
def handle_events(self):
for event in sys.stdin:
if event.startswith():
continue
name = json.loads(event.lstrip())[]
for obj in self.loader.objects:
if obj.output_options[] == name:
obj.on_click(json.loads(event.lstrip())) | An event handler that processes events from stdin and calls the on_click
function of the respective object. This function is run in another
thread, so as to not stall the main thread. | ### Input:
An event handler that processes events from stdin and calls the on_click
function of the respective object. This function is run in another
thread, so as to not stall the main thread.
### Response:
def handle_events(self):
for event in sys.stdin:
if event.startswith():
continue
name = json.loads(event.lstrip())[]
for obj in self.loader.objects:
if obj.output_options[] == name:
obj.on_click(json.loads(event.lstrip())) |
def _bn(editor, force=False):
eb = editor.window_arrangement.active_editor_buffer
if not force and eb.has_unsaved_changes:
editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT)
else:
editor.window_arrangement.go_to_next_buffer() | Go to next buffer. | ### Input:
Go to next buffer.
### Response:
def _bn(editor, force=False):
eb = editor.window_arrangement.active_editor_buffer
if not force and eb.has_unsaved_changes:
editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT)
else:
editor.window_arrangement.go_to_next_buffer() |
def decode(self, offset):
new_offset = offset + 1
(ctrl_byte,) = struct.unpack(b, self._buffer[offset:new_offset])
type_num = ctrl_byte >> 5
if not type_num:
(type_num, new_offset) = self._read_extended(new_offset)
(size, new_offset) = self._size_from_ctrl_byte(
ctrl_byte, new_offset, type_num)
return self._type_decoder[type_num](self, size, new_offset) | Decode a section of the data section starting at offset
Arguments:
offset -- the location of the data structure to decode | ### Input:
Decode a section of the data section starting at offset
Arguments:
offset -- the location of the data structure to decode
### Response:
def decode(self, offset):
new_offset = offset + 1
(ctrl_byte,) = struct.unpack(b, self._buffer[offset:new_offset])
type_num = ctrl_byte >> 5
if not type_num:
(type_num, new_offset) = self._read_extended(new_offset)
(size, new_offset) = self._size_from_ctrl_byte(
ctrl_byte, new_offset, type_num)
return self._type_decoder[type_num](self, size, new_offset) |
def eval_string_value(self, element, value):
strval =
vals = []
for term in value:
if type(term) is ast.WhitespaceToken:
pass
elif type(term) is ast.StringToken:
strval += term.value
elif type(term) is ast.IdentToken:
log(DEBUG, u"IdentToken as string: {}".format(
term.value).encode())
strval += term.value
elif type(term) is ast.LiteralToken:
log(DEBUG, u"LiteralToken as string: {}".format(
term.value).encode())
strval += term.value
elif type(term) is ast.FunctionBlock:
if term.name == :
str_args = split(term.arguments, )
str_name = self.eval_string_value(element,
str_args[0])[0]
val = self.lookup(, str_name)
if val == :
if len(str_args) > 1:
val = self.eval_string_value(element,
str_args[1])[0]
else:
log(WARN, u"{} blank string"
.format(str_name).encode())
strval += val
elif term.name == u:
att_args = split(term.arguments, )
att_name = self.eval_string_value(element,
att_args[0])[0]
att_def =
if len(att_args) > 1:
att_def = self.eval_string_value(element,
att_args[1])[0]
if in att_name:
ns, att = att_name.split()
try:
ns = self.css_namespaces[ns]
except KeyError:
log(WARN, u"Undefined namespace prefix {}"
.format(ns).encode())
continue
att_name = etree.QName(ns, att)
strval += element.etree_element.get(att_name, att_def)
elif term.name == u:
strval += self.generate_id()
elif term.name == u:
strval += etree.tostring(element.etree_element,
encoding=,
method=,
with_tail=False)
elif term.name.startswith():
if strval:
vals.append(strval)
strval =
target_args = split(term.arguments, )
vref = self.eval_string_value(element,
target_args[0])[0]
vname = self.eval_string_value(element,
target_args[1])[0]
vtype = term.name[7:]+
vals.append(TargetVal(self, vref[1:], vname, vtype))
elif term.name == u:
tmpstr = self.eval_string_value(element, term.arguments)
if tmpstr:
if isinstance(tmpstr[0], basestring):
strval += tmpstr[0][0]
else:
log(WARN, u"Bad string value:"
u" nested target-* not allowed. "
u"{}".format(
serialize(value)).encode(
))
elif term.name == :
counterargs = [serialize(t).strip(" \,counterspendingutf-8utf-8'))
if strval:
vals.append(strval)
return vals | Evaluate parsed string.
Returns a list of current and delayed values. | ### Input:
Evaluate parsed string.
Returns a list of current and delayed values.
### Response:
def eval_string_value(self, element, value):
strval =
vals = []
for term in value:
if type(term) is ast.WhitespaceToken:
pass
elif type(term) is ast.StringToken:
strval += term.value
elif type(term) is ast.IdentToken:
log(DEBUG, u"IdentToken as string: {}".format(
term.value).encode())
strval += term.value
elif type(term) is ast.LiteralToken:
log(DEBUG, u"LiteralToken as string: {}".format(
term.value).encode())
strval += term.value
elif type(term) is ast.FunctionBlock:
if term.name == :
str_args = split(term.arguments, )
str_name = self.eval_string_value(element,
str_args[0])[0]
val = self.lookup(, str_name)
if val == :
if len(str_args) > 1:
val = self.eval_string_value(element,
str_args[1])[0]
else:
log(WARN, u"{} blank string"
.format(str_name).encode())
strval += val
elif term.name == u:
att_args = split(term.arguments, )
att_name = self.eval_string_value(element,
att_args[0])[0]
att_def =
if len(att_args) > 1:
att_def = self.eval_string_value(element,
att_args[1])[0]
if in att_name:
ns, att = att_name.split()
try:
ns = self.css_namespaces[ns]
except KeyError:
log(WARN, u"Undefined namespace prefix {}"
.format(ns).encode())
continue
att_name = etree.QName(ns, att)
strval += element.etree_element.get(att_name, att_def)
elif term.name == u:
strval += self.generate_id()
elif term.name == u:
strval += etree.tostring(element.etree_element,
encoding=,
method=,
with_tail=False)
elif term.name.startswith():
if strval:
vals.append(strval)
strval =
target_args = split(term.arguments, )
vref = self.eval_string_value(element,
target_args[0])[0]
vname = self.eval_string_value(element,
target_args[1])[0]
vtype = term.name[7:]+
vals.append(TargetVal(self, vref[1:], vname, vtype))
elif term.name == u:
tmpstr = self.eval_string_value(element, term.arguments)
if tmpstr:
if isinstance(tmpstr[0], basestring):
strval += tmpstr[0][0]
else:
log(WARN, u"Bad string value:"
u" nested target-* not allowed. "
u"{}".format(
serialize(value)).encode(
))
elif term.name == :
counterargs = [serialize(t).strip(" \,counterspendingutf-8utf-8'))
if strval:
vals.append(strval)
return vals |
def get_user_by_email(server_context, email):
url = server_context.build_url(user_controller, )
payload = dict(includeDeactivatedAccounts=True)
result = server_context.make_request(url, payload)
if result is None or result[] is None:
raise ValueError("No Users in container" + email)
for user in result[]:
if user[] == email:
return user
else:
raise ValueError("User not found: " + email) | Get the user with the provided email. Throws a ValueError if not found.
:param server_context: A LabKey server context. See utils.create_server_context.
:param email:
:return: | ### Input:
Get the user with the provided email. Throws a ValueError if not found.
:param server_context: A LabKey server context. See utils.create_server_context.
:param email:
:return:
### Response:
def get_user_by_email(server_context, email):
url = server_context.build_url(user_controller, )
payload = dict(includeDeactivatedAccounts=True)
result = server_context.make_request(url, payload)
if result is None or result[] is None:
raise ValueError("No Users in container" + email)
for user in result[]:
if user[] == email:
return user
else:
raise ValueError("User not found: " + email) |
def removeMainWindow(self, mainWindow):
logger.debug("removeMainWindow called")
self.windowActionGroup.removeAction(mainWindow.activateWindowAction)
self.repopulateAllWindowMenus()
self.mainWindows.remove(mainWindow) | Removes the mainWindow from the list of windows. Saves the settings | ### Input:
Removes the mainWindow from the list of windows. Saves the settings
### Response:
def removeMainWindow(self, mainWindow):
logger.debug("removeMainWindow called")
self.windowActionGroup.removeAction(mainWindow.activateWindowAction)
self.repopulateAllWindowMenus()
self.mainWindows.remove(mainWindow) |
async def _remote_close(self, exc=None):
if self.state in (STATE_CLOSING, STATE_CLOSED):
return
log.info("close session: %s", self.id)
self.state = STATE_CLOSING
if exc is not None:
self.exception = exc
self.interrupted = True
try:
await self.handler(SockjsMessage(MSG_CLOSE, exc), self)
except Exception:
log.exception("Exception in close handler.") | close session from remote. | ### Input:
close session from remote.
### Response:
async def _remote_close(self, exc=None):
if self.state in (STATE_CLOSING, STATE_CLOSED):
return
log.info("close session: %s", self.id)
self.state = STATE_CLOSING
if exc is not None:
self.exception = exc
self.interrupted = True
try:
await self.handler(SockjsMessage(MSG_CLOSE, exc), self)
except Exception:
log.exception("Exception in close handler.") |
def get_json_lines(annotation: ResourceAnnotation, field: str, route: str,
request: bool = False) -> List:
url_params = URL_PARAMS_RE.findall(route)
if not request:
return_type = annotation.logic._doctor_signature.return_annotation
if issubclass(return_type, Response):
if return_type.__args__ is not None:
return_type = return_type.__args__[0]
if issubclass(return_type, Array):
if issubclass(return_type.items, Object):
properties = return_type.items.properties
field +=
else:
return []
elif issubclass(return_type, Object):
properties = return_type.properties
else:
return []
else:
if annotation.logic._doctor_req_obj_type:
properties = annotation.logic._doctor_req_obj_type.properties
else:
parameters = annotation.annotated_parameters
properties = {k: p.annotation for k, p in parameters.items()}
return get_json_object_lines(annotation, properties, field, url_params,
request) | Generate documentation lines for the given annotation.
This only documents schemas of type "object", or type "list" where each
"item" is an object. Other types are ignored (but a warning is logged).
:param doctor.resource.ResourceAnnotation annotation:
Annotation object for the associated handler method.
:param str field: Sphinx field type to use (e.g. '<json').
:param str route: The route the annotation is attached to.
:param bool request: Whether the resource annotation is for the request or
not.
:returns: list of strings, one for each line. | ### Input:
Generate documentation lines for the given annotation.
This only documents schemas of type "object", or type "list" where each
"item" is an object. Other types are ignored (but a warning is logged).
:param doctor.resource.ResourceAnnotation annotation:
Annotation object for the associated handler method.
:param str field: Sphinx field type to use (e.g. '<json').
:param str route: The route the annotation is attached to.
:param bool request: Whether the resource annotation is for the request or
not.
:returns: list of strings, one for each line.
### Response:
def get_json_lines(annotation: ResourceAnnotation, field: str, route: str,
request: bool = False) -> List:
url_params = URL_PARAMS_RE.findall(route)
if not request:
return_type = annotation.logic._doctor_signature.return_annotation
if issubclass(return_type, Response):
if return_type.__args__ is not None:
return_type = return_type.__args__[0]
if issubclass(return_type, Array):
if issubclass(return_type.items, Object):
properties = return_type.items.properties
field +=
else:
return []
elif issubclass(return_type, Object):
properties = return_type.properties
else:
return []
else:
if annotation.logic._doctor_req_obj_type:
properties = annotation.logic._doctor_req_obj_type.properties
else:
parameters = annotation.annotated_parameters
properties = {k: p.annotation for k, p in parameters.items()}
return get_json_object_lines(annotation, properties, field, url_params,
request) |
def finish_statistics(self):
self.stat.finish_layer(self.data, self.stat.params) | Prepare/modify data for plotting | ### Input:
Prepare/modify data for plotting
### Response:
def finish_statistics(self):
self.stat.finish_layer(self.data, self.stat.params) |
def main(input_filename, format):
song_data = AudioSegment.from_file(input_filename, format=format)
song_data = song_data.set_channels(1)
wav_tmp = song_data.export(format="wav")
wav_tmp.seek(0)
rate, wav_data = wavfile.read(wav_tmp)
rows_per_second = (1 + (rate - WIDTH)) // FRAME_STRIDE
window_size = (rows_per_second // TIME_STRIDE, (WIDTH // 2) // FREQ_STRIDE)
peaks = resound.get_peaks(np.array(wav_data), window_size=window_size)
f_width = WIDTH // (2 * FREQ_STRIDE) * 2
t_gap = 1 * rows_per_second
t_width = 2 * rows_per_second
fingerprints = resound.hashes(peaks, f_width=f_width, t_gap=t_gap, t_width=t_width)
return fingerprints | Calculate the fingerprint hashses of the referenced audio file and save
to disk as a pickle file | ### Input:
Calculate the fingerprint hashses of the referenced audio file and save
to disk as a pickle file
### Response:
def main(input_filename, format):
song_data = AudioSegment.from_file(input_filename, format=format)
song_data = song_data.set_channels(1)
wav_tmp = song_data.export(format="wav")
wav_tmp.seek(0)
rate, wav_data = wavfile.read(wav_tmp)
rows_per_second = (1 + (rate - WIDTH)) // FRAME_STRIDE
window_size = (rows_per_second // TIME_STRIDE, (WIDTH // 2) // FREQ_STRIDE)
peaks = resound.get_peaks(np.array(wav_data), window_size=window_size)
f_width = WIDTH // (2 * FREQ_STRIDE) * 2
t_gap = 1 * rows_per_second
t_width = 2 * rows_per_second
fingerprints = resound.hashes(peaks, f_width=f_width, t_gap=t_gap, t_width=t_width)
return fingerprints |
def methods(self) -> :
return PrettyDir(
self.obj,
[
pattr
for pattr in self.pattrs
if category_match(pattr.category, AttrCategory.FUNCTION)
],
) | Returns all methods of the inspected object.
Note that "methods" can mean "functions" when inspecting a module. | ### Input:
Returns all methods of the inspected object.
Note that "methods" can mean "functions" when inspecting a module.
### Response:
def methods(self) -> :
return PrettyDir(
self.obj,
[
pattr
for pattr in self.pattrs
if category_match(pattr.category, AttrCategory.FUNCTION)
],
) |
def date_time_ad(self, tzinfo=None, end_datetime=None, start_datetime=None):
start_time = -62135596800 if start_datetime is None else self._parse_start_datetime(start_datetime)
end_datetime = self._parse_end_datetime(end_datetime)
ts = self.generator.random.randint(start_time, end_datetime)
return datetime(1970, 1, 1, tzinfo=tzinfo) + timedelta(seconds=ts) | Get a datetime object for a date between January 1, 001 and now
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1265-03-22 21:15:52')
:return datetime | ### Input:
Get a datetime object for a date between January 1, 001 and now
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1265-03-22 21:15:52')
:return datetime
### Response:
def date_time_ad(self, tzinfo=None, end_datetime=None, start_datetime=None):
start_time = -62135596800 if start_datetime is None else self._parse_start_datetime(start_datetime)
end_datetime = self._parse_end_datetime(end_datetime)
ts = self.generator.random.randint(start_time, end_datetime)
return datetime(1970, 1, 1, tzinfo=tzinfo) + timedelta(seconds=ts) |
def add_scope(scope=None, scope_fn=None):
def decorator(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
name = kwargs.pop("name", None)
with scope_fn(name or scope or f.__name__):
return f(*args, **kwargs)
return decorated
return decorator | Return a decorator which add a TF name/variable scope to a function.
Note that the function returned by the decorator accept an additional 'name'
parameter, which can overwrite the name scope given when the function is
created.
Args:
scope (str): name of the scope. If None, the function name is used.
scope_fn (fct): Either tf.name_scope or tf.variable_scope
Returns:
fct: the add_scope decorator | ### Input:
Return a decorator which add a TF name/variable scope to a function.
Note that the function returned by the decorator accept an additional 'name'
parameter, which can overwrite the name scope given when the function is
created.
Args:
scope (str): name of the scope. If None, the function name is used.
scope_fn (fct): Either tf.name_scope or tf.variable_scope
Returns:
fct: the add_scope decorator
### Response:
def add_scope(scope=None, scope_fn=None):
def decorator(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
name = kwargs.pop("name", None)
with scope_fn(name or scope or f.__name__):
return f(*args, **kwargs)
return decorated
return decorator |
def decorate(self, func, *decorator_args, **decorator_kwargs):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper | override this in a child class with your own logic, it must return a
function that calls self.func
:param func: callback -- the function being decorated
:param decorator_args: tuple -- the arguments passed into the decorator (eg, @dec(1, 2))
:param decorator_kwargs: dict -- the named args passed into the decorator (eg, @dec(foo=1)) | ### Input:
override this in a child class with your own logic, it must return a
function that calls self.func
:param func: callback -- the function being decorated
:param decorator_args: tuple -- the arguments passed into the decorator (eg, @dec(1, 2))
:param decorator_kwargs: dict -- the named args passed into the decorator (eg, @dec(foo=1))
### Response:
def decorate(self, func, *decorator_args, **decorator_kwargs):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper |
def filesystem_present(name, create_parent=False, properties=None, cloned_from=None):
return _dataset_present(
,
name,
create_parent=create_parent,
properties=properties,
cloned_from=cloned_from,
) | ensure filesystem exists and has properties set
name : string
name of filesystem
create_parent : boolean
creates all the non-existing parent datasets.
any property specified on the command line using the -o option is ignored.
cloned_from : string
name of snapshot to clone
properties : dict
additional zfs properties (-o)
.. note::
``cloned_from`` is only use if the filesystem does not exist yet,
when ``cloned_from`` is set after the filesystem exists it will be ignored.
.. note::
Properties do not get cloned, if you specify the properties in the
state file they will be applied on a subsequent run. | ### Input:
ensure filesystem exists and has properties set
name : string
name of filesystem
create_parent : boolean
creates all the non-existing parent datasets.
any property specified on the command line using the -o option is ignored.
cloned_from : string
name of snapshot to clone
properties : dict
additional zfs properties (-o)
.. note::
``cloned_from`` is only use if the filesystem does not exist yet,
when ``cloned_from`` is set after the filesystem exists it will be ignored.
.. note::
Properties do not get cloned, if you specify the properties in the
state file they will be applied on a subsequent run.
### Response:
def filesystem_present(name, create_parent=False, properties=None, cloned_from=None):
return _dataset_present(
,
name,
create_parent=create_parent,
properties=properties,
cloned_from=cloned_from,
) |
def main():
if len(sys.argv) != 2:
raise RuntimeError()
sip_logging.init_logger(show_thread=False)
spead_config = json.loads(sys.argv[1])
try:
_path = os.path.dirname(os.path.abspath(__file__))
schema_path = os.path.join(_path, )
with open(schema_path) as schema_file:
schema = json.load(schema_file)
validate(spead_config, schema)
except ValidationError as error:
print(error.cause)
raise
sender = SpeadSender(spead_config)
sender.run() | Main function for SPEAD sender module. | ### Input:
Main function for SPEAD sender module.
### Response:
def main():
if len(sys.argv) != 2:
raise RuntimeError()
sip_logging.init_logger(show_thread=False)
spead_config = json.loads(sys.argv[1])
try:
_path = os.path.dirname(os.path.abspath(__file__))
schema_path = os.path.join(_path, )
with open(schema_path) as schema_file:
schema = json.load(schema_file)
validate(spead_config, schema)
except ValidationError as error:
print(error.cause)
raise
sender = SpeadSender(spead_config)
sender.run() |
def generate_response(self, response):
if not response:
return self.__serializer__.dump({})
return self.__serializer__.dump(response.to_dict()) | Serializes the response object and returns the
raw form.
:param response: A `pinky.core.response.Response`
object | ### Input:
Serializes the response object and returns the
raw form.
:param response: A `pinky.core.response.Response`
object
### Response:
def generate_response(self, response):
if not response:
return self.__serializer__.dump({})
return self.__serializer__.dump(response.to_dict()) |
def _seed(self, seed=-1):
if seed != -1:
self._random = NupicRandom(seed)
else:
self._random = NupicRandom() | Initialize the random seed | ### Input:
Initialize the random seed
### Response:
def _seed(self, seed=-1):
if seed != -1:
self._random = NupicRandom(seed)
else:
self._random = NupicRandom() |
def get_crime(self, persistent_id):
method = % persistent_id
response = self.service.request(, method)
crime = Crime(self, data=response[])
crime._outcomes = []
outcomes = response[]
if outcomes is not None:
for o in outcomes:
o.update({
: crime,
})
crime._outcomes.append(crime.Outcome(self, o))
return crime | Get a particular crime by persistent ID. Uses the outcomes-for-crime_
API call.
.. _outcomes-for-crime:
https://data.police.uk/docs/method/outcomes-for-crime/
:rtype: Crime
:param str persistent_id: The persistent ID of the crime to get.
:return: The ``Crime`` with the given persistent ID. | ### Input:
Get a particular crime by persistent ID. Uses the outcomes-for-crime_
API call.
.. _outcomes-for-crime:
https://data.police.uk/docs/method/outcomes-for-crime/
:rtype: Crime
:param str persistent_id: The persistent ID of the crime to get.
:return: The ``Crime`` with the given persistent ID.
### Response:
def get_crime(self, persistent_id):
method = % persistent_id
response = self.service.request(, method)
crime = Crime(self, data=response[])
crime._outcomes = []
outcomes = response[]
if outcomes is not None:
for o in outcomes:
o.update({
: crime,
})
crime._outcomes.append(crime.Outcome(self, o))
return crime |
def envs(self):
ref_paths = [x.path for x in self.repo.refs]
return self._get_envs_from_ref_paths(ref_paths) | Check the refs and return a list of the ones which can be used as salt
environments. | ### Input:
Check the refs and return a list of the ones which can be used as salt
environments.
### Response:
def envs(self):
ref_paths = [x.path for x in self.repo.refs]
return self._get_envs_from_ref_paths(ref_paths) |
def get_server(self, key, **kwds):
kwds = dict(self.kwds, **kwds)
server = self.servers.get(key)
if server:
server.check_keywords(self.constructor, kwds)
else:
server = _CachedServer(self.constructor, key, kwds)
self.servers[key] = server
return server | Get a new or existing server for this key.
:param int key: key for the server to use | ### Input:
Get a new or existing server for this key.
:param int key: key for the server to use
### Response:
def get_server(self, key, **kwds):
kwds = dict(self.kwds, **kwds)
server = self.servers.get(key)
if server:
server.check_keywords(self.constructor, kwds)
else:
server = _CachedServer(self.constructor, key, kwds)
self.servers[key] = server
return server |
def from_string(cls, prjs):
def parse(v):
try:
return int(v)
except ValueError:
pass
try:
return float(v)
except ValueError:
return v
parts = [o.lstrip() for o in prjs.strip().split()]
items = map(
lambda kv: len(kv) == 2 and (kv[0], parse(kv[1])) or (kv[0], True),
(p.split() for p in parts))
return cls({k: v for k, v in items if +k in PROJ4_PARAMS.keys()}) | Turn a PROJ.4 string into a mapping of parameters. Bare parameters
like "+no_defs" are given a value of ``True``. All keys are checked
against the ``all_proj_keys`` list.
Args:
prjs (str): A PROJ4 string. | ### Input:
Turn a PROJ.4 string into a mapping of parameters. Bare parameters
like "+no_defs" are given a value of ``True``. All keys are checked
against the ``all_proj_keys`` list.
Args:
prjs (str): A PROJ4 string.
### Response:
def from_string(cls, prjs):
def parse(v):
try:
return int(v)
except ValueError:
pass
try:
return float(v)
except ValueError:
return v
parts = [o.lstrip() for o in prjs.strip().split()]
items = map(
lambda kv: len(kv) == 2 and (kv[0], parse(kv[1])) or (kv[0], True),
(p.split() for p in parts))
return cls({k: v for k, v in items if +k in PROJ4_PARAMS.keys()}) |
def bicolor_spectral(self):
lap = self.laplacian().astype(float)
vals, vecs = eigs(lap, k=1, which=)
vec = vecs[:,0].real
return vec > 0 if vec[0] > 0 else vec < 0 | Returns an approximate 2-coloring as an array of booleans.
From "A Multiscale Pyramid Transform for Graph Signals" by Shuman et al.
Note: Assumes a single connected component, and may fail otherwise. | ### Input:
Returns an approximate 2-coloring as an array of booleans.
From "A Multiscale Pyramid Transform for Graph Signals" by Shuman et al.
Note: Assumes a single connected component, and may fail otherwise.
### Response:
def bicolor_spectral(self):
lap = self.laplacian().astype(float)
vals, vecs = eigs(lap, k=1, which=)
vec = vecs[:,0].real
return vec > 0 if vec[0] > 0 else vec < 0 |
def magnitude(X):
r = np.real(X)
i = np.imag(X)
return np.sqrt(r * r + i * i); | Magnitude of a complex matrix. | ### Input:
Magnitude of a complex matrix.
### Response:
def magnitude(X):
r = np.real(X)
i = np.imag(X)
return np.sqrt(r * r + i * i); |
def partialRelease():
a = TpPd(pd=0x6)
b = MessageType(mesType=0xa)
c = ChannelDescription()
packet = a / b / c
return packet | PARTIAL RELEASE Section 9.1.26 | ### Input:
PARTIAL RELEASE Section 9.1.26
### Response:
def partialRelease():
a = TpPd(pd=0x6)
b = MessageType(mesType=0xa)
c = ChannelDescription()
packet = a / b / c
return packet |
def run(hsm, aead_backend, args):
write_pid_file(args.pid_file)
server_address = (args.listen_addr, args.listen_port)
httpd = YHSM_KSMServer(server_address,
partial(YHSM_KSMRequestHandler, hsm, aead_backend, args))
my_log_message(args.debug or args.verbose, syslog.LOG_INFO,
"Serving requests to with key handle(s) %s (YubiHSM: , AEADs in , DB in )"
% (args.listen_addr, args.listen_port, args.serve_url, args.key_handles, args.device, args.aead_dir, args.db_url))
httpd.serve_forever() | Start a BaseHTTPServer.HTTPServer and serve requests forever. | ### Input:
Start a BaseHTTPServer.HTTPServer and serve requests forever.
### Response:
def run(hsm, aead_backend, args):
write_pid_file(args.pid_file)
server_address = (args.listen_addr, args.listen_port)
httpd = YHSM_KSMServer(server_address,
partial(YHSM_KSMRequestHandler, hsm, aead_backend, args))
my_log_message(args.debug or args.verbose, syslog.LOG_INFO,
"Serving requests to with key handle(s) %s (YubiHSM: , AEADs in , DB in )"
% (args.listen_addr, args.listen_port, args.serve_url, args.key_handles, args.device, args.aead_dir, args.db_url))
httpd.serve_forever() |
def from_detections_assignment(detections_1, detections_2, assignments):
traces = []
for d1n, d2n in six.iteritems(assignments):
if d1n < len(detections_1) and d2n < len(detections_2):
traces.append(Trace(detections_1[d1n], detections_2[d2n]))
return traces | Creates traces out of given assignment and cell data. | ### Input:
Creates traces out of given assignment and cell data.
### Response:
def from_detections_assignment(detections_1, detections_2, assignments):
traces = []
for d1n, d2n in six.iteritems(assignments):
if d1n < len(detections_1) and d2n < len(detections_2):
traces.append(Trace(detections_1[d1n], detections_2[d2n]))
return traces |
def set_observable(self,tseq,qseq):
tnt = None
qnt = None
if len(tseq) > 0: tnt = tseq[0]
if len(qseq) > 0: qnt = qseq[0]
self._observable.set(len(tseq),len(qseq),tnt,qnt) | Set the observable sequence data
:param tseq: target sequence (from the homopolymer)
:param qseq: query sequence ( from the homopolymer)
:type tseq: string
:type qseq: string | ### Input:
Set the observable sequence data
:param tseq: target sequence (from the homopolymer)
:param qseq: query sequence ( from the homopolymer)
:type tseq: string
:type qseq: string
### Response:
def set_observable(self,tseq,qseq):
tnt = None
qnt = None
if len(tseq) > 0: tnt = tseq[0]
if len(qseq) > 0: qnt = qseq[0]
self._observable.set(len(tseq),len(qseq),tnt,qnt) |
def get_normalized_ratios(psmfn, header, channels, denom_channels,
min_intensity, second_psmfn, secondheader):
ratios = []
if second_psmfn is not None:
median_psmfn = second_psmfn
medianheader = secondheader
else:
median_psmfn = psmfn
medianheader = header
for psm in reader.generate_tsv_psms(median_psmfn, medianheader):
ratios.append(calc_psm_ratios(psm, channels, denom_channels,
min_intensity))
ch_medians = isonormalizing.get_medians(channels, ratios)
report = (
.format(.join([.format(ch, ch_medians[ch])
for ch in channels])))
sys.stdout.write(report)
for psm in reader.generate_tsv_psms(psmfn, header):
psmratios = calc_psm_ratios(psm, channels, denom_channels,
min_intensity)
psm.update({ch: str(psmratios[ix] / ch_medians[ch])
if psmratios[ix] != else
for ix, ch in enumerate(channels)})
yield psm | Calculates ratios for PSM tables containing isobaric channels with
raw intensities. Normalizes the ratios by median. NA values or values
below min_intensity are excluded from the normalization. | ### Input:
Calculates ratios for PSM tables containing isobaric channels with
raw intensities. Normalizes the ratios by median. NA values or values
below min_intensity are excluded from the normalization.
### Response:
def get_normalized_ratios(psmfn, header, channels, denom_channels,
min_intensity, second_psmfn, secondheader):
ratios = []
if second_psmfn is not None:
median_psmfn = second_psmfn
medianheader = secondheader
else:
median_psmfn = psmfn
medianheader = header
for psm in reader.generate_tsv_psms(median_psmfn, medianheader):
ratios.append(calc_psm_ratios(psm, channels, denom_channels,
min_intensity))
ch_medians = isonormalizing.get_medians(channels, ratios)
report = (
.format(.join([.format(ch, ch_medians[ch])
for ch in channels])))
sys.stdout.write(report)
for psm in reader.generate_tsv_psms(psmfn, header):
psmratios = calc_psm_ratios(psm, channels, denom_channels,
min_intensity)
psm.update({ch: str(psmratios[ix] / ch_medians[ch])
if psmratios[ix] != else
for ix, ch in enumerate(channels)})
yield psm |
def help_center_section_translation_create(self, section_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/translations
api_path = "/api/v2/help_center/sections/{section_id}/translations.json"
api_path = api_path.format(section_id=section_id)
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/help_center/translations#create-translation | ### Input:
https://developer.zendesk.com/rest_api/docs/help_center/translations#create-translation
### Response:
def help_center_section_translation_create(self, section_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/translations
api_path = "/api/v2/help_center/sections/{section_id}/translations.json"
api_path = api_path.format(section_id=section_id)
return self.call(api_path, method="POST", data=data, **kwargs) |
def refresh(self):
if self.type in CONST.BINARY_SENSOR_TYPES:
response = self._lupusec.get_sensors()
for device in response:
if device[] == self._device_id:
self.update(device)
return device
elif self.type == CONST.ALARM_TYPE:
response = self._lupusec.get_panel()
self.update(response)
return response
elif self.type == CONST.TYPE_POWER_SWITCH:
response = self._lupusec.get_power_switches()
for pss in response:
if pss[] == self._device_id:
self.update(pss)
return pss | Refresh a device | ### Input:
Refresh a device
### Response:
def refresh(self):
if self.type in CONST.BINARY_SENSOR_TYPES:
response = self._lupusec.get_sensors()
for device in response:
if device[] == self._device_id:
self.update(device)
return device
elif self.type == CONST.ALARM_TYPE:
response = self._lupusec.get_panel()
self.update(response)
return response
elif self.type == CONST.TYPE_POWER_SWITCH:
response = self._lupusec.get_power_switches()
for pss in response:
if pss[] == self._device_id:
self.update(pss)
return pss |
def option(*args, **kwargs):
def decorate_sub_command(method):
if not hasattr(method, "optparser"):
method.optparser = SubCmdOptionParser()
method.optparser.add_option(*args, **kwargs)
return method
def decorate_class(klass):
assert _forgiving_issubclass(klass, Cmdln)
_inherit_attr(klass, "toplevel_optparser_options", [], cp=lambda l: l[:])
klass.toplevel_optparser_options.append( (args, kwargs) )
return klass
def decorate(obj):
if _forgiving_issubclass(obj, Cmdln):
return decorate_class(obj)
else:
return decorate_sub_command(obj)
return decorate | Decorator to add an option to the optparser argument of a Cmdln
subcommand
To add a toplevel option, apply the decorator on the class itself. (see
p4.py for an example)
Example:
@cmdln.option("-E", dest="environment_path")
class MyShell(cmdln.Cmdln):
@cmdln.option("-f", "--force", help="force removal")
def do_remove(self, subcmd, opts, *args):
#... | ### Input:
Decorator to add an option to the optparser argument of a Cmdln
subcommand
To add a toplevel option, apply the decorator on the class itself. (see
p4.py for an example)
Example:
@cmdln.option("-E", dest="environment_path")
class MyShell(cmdln.Cmdln):
@cmdln.option("-f", "--force", help="force removal")
def do_remove(self, subcmd, opts, *args):
#...
### Response:
def option(*args, **kwargs):
def decorate_sub_command(method):
if not hasattr(method, "optparser"):
method.optparser = SubCmdOptionParser()
method.optparser.add_option(*args, **kwargs)
return method
def decorate_class(klass):
assert _forgiving_issubclass(klass, Cmdln)
_inherit_attr(klass, "toplevel_optparser_options", [], cp=lambda l: l[:])
klass.toplevel_optparser_options.append( (args, kwargs) )
return klass
def decorate(obj):
if _forgiving_issubclass(obj, Cmdln):
return decorate_class(obj)
else:
return decorate_sub_command(obj)
return decorate |
def _parse_example(serialized_example):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64)
}
parsed = tf.parse_single_example(serialized_example, data_fields)
inputs = tf.sparse_tensor_to_dense(parsed["inputs"])
targets = tf.sparse_tensor_to_dense(parsed["targets"])
return inputs, targets | Return inputs and targets Tensors from a serialized tf.Example. | ### Input:
Return inputs and targets Tensors from a serialized tf.Example.
### Response:
def _parse_example(serialized_example):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64)
}
parsed = tf.parse_single_example(serialized_example, data_fields)
inputs = tf.sparse_tensor_to_dense(parsed["inputs"])
targets = tf.sparse_tensor_to_dense(parsed["targets"])
return inputs, targets |
def apply_T2(word):
WORD = word
offset = 0
for vv in vv_sequences(WORD):
seq = vv.group(2)
if not is_diphthong(seq) and not is_long(seq):
i = vv.start(2) + 1 + offset
WORD = WORD[:i] + + WORD[i:]
offset += 1
RULE = if word != WORD else
return WORD, RULE | There is a syllable boundary within a VV sequence of two nonidentical
vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa]. | ### Input:
There is a syllable boundary within a VV sequence of two nonidentical
vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].
### Response:
def apply_T2(word):
WORD = word
offset = 0
for vv in vv_sequences(WORD):
seq = vv.group(2)
if not is_diphthong(seq) and not is_long(seq):
i = vv.start(2) + 1 + offset
WORD = WORD[:i] + + WORD[i:]
offset += 1
RULE = if word != WORD else
return WORD, RULE |
def iter_orgs(username, number=-1, etag=None):
return gh.iter_orgs(username, number, etag) if username else [] | List the organizations associated with ``username``.
:param str username: (required), login of the user
:param int number: (optional), number of orgs to return. Default: -1,
return all of the issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`Organization <github3.orgs.Organization>` | ### Input:
List the organizations associated with ``username``.
:param str username: (required), login of the user
:param int number: (optional), number of orgs to return. Default: -1,
return all of the issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`Organization <github3.orgs.Organization>`
### Response:
def iter_orgs(username, number=-1, etag=None):
return gh.iter_orgs(username, number, etag) if username else [] |
def convert_tree_ensemble(model, feature_names, target, force_32bit_float):
if not(_HAS_XGBOOST):
raise RuntimeError()
import json
import os
feature_map = None
if isinstance(model, (_xgboost.core.Booster, _xgboost.XGBRegressor)):
if model.feature_names:
feature_map = {f:i for i,f in enumerate(model.feature_names)}
elif isinstance(model, str):
if not os.path.exists(model):
raise TypeError("Invalid path %s." % model)
with open(model) as f:
xgb_model_str = json.load(f)
feature_map = {f:i for i,f in enumerate(feature_names)}
else:
raise TypeError("Unexpected type. Expecting XGBoost model.")
mlkit_tree = _TreeEnsembleRegressor(feature_names, target)
mlkit_tree.set_default_prediction_value(0.5)
for xgb_tree_id, xgb_tree_str in enumerate(xgb_model_str):
xgb_tree_json = json.loads(xgb_tree_str)
recurse_json(mlkit_tree, xgb_tree_json, xgb_tree_id, node_id = 0,
feature_map = feature_map, force_32bit_float = force_32bit_float)
return mlkit_tree.spec | Convert a generic tree model to the protobuf spec.
This currently supports:
* Decision tree regression
Parameters
----------
model: str | Booster
Path on disk where the XGboost JSON representation of the model is or
a handle to the XGboost model.
feature_names : list of strings or None
Names of each of the features. When set to None, the feature names are
extracted from the model.
target: str,
Name of the output column.
force_32bit_float: bool
If True, then the resulting CoreML model will use 32 bit floats internally.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model | ### Input:
Convert a generic tree model to the protobuf spec.
This currently supports:
* Decision tree regression
Parameters
----------
model: str | Booster
Path on disk where the XGboost JSON representation of the model is or
a handle to the XGboost model.
feature_names : list of strings or None
Names of each of the features. When set to None, the feature names are
extracted from the model.
target: str,
Name of the output column.
force_32bit_float: bool
If True, then the resulting CoreML model will use 32 bit floats internally.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
### Response:
def convert_tree_ensemble(model, feature_names, target, force_32bit_float):
if not(_HAS_XGBOOST):
raise RuntimeError()
import json
import os
feature_map = None
if isinstance(model, (_xgboost.core.Booster, _xgboost.XGBRegressor)):
if model.feature_names:
feature_map = {f:i for i,f in enumerate(model.feature_names)}
elif isinstance(model, str):
if not os.path.exists(model):
raise TypeError("Invalid path %s." % model)
with open(model) as f:
xgb_model_str = json.load(f)
feature_map = {f:i for i,f in enumerate(feature_names)}
else:
raise TypeError("Unexpected type. Expecting XGBoost model.")
mlkit_tree = _TreeEnsembleRegressor(feature_names, target)
mlkit_tree.set_default_prediction_value(0.5)
for xgb_tree_id, xgb_tree_str in enumerate(xgb_model_str):
xgb_tree_json = json.loads(xgb_tree_str)
recurse_json(mlkit_tree, xgb_tree_json, xgb_tree_id, node_id = 0,
feature_map = feature_map, force_32bit_float = force_32bit_float)
return mlkit_tree.spec |
def exec_task(task_path, data):
if not data:
data = {: None, : task_path}
elif not isinstance(data, (str, bytes)):
data = {: json.dumps(data, cls=RequestJSONEncoder),
: task_path}
else:
if data is not None and data.startswith("file://"):
with open(data[len("file://"):]) as f:
data = f.read()
data = {: data, : task_path}
job = Job(data)
(task, task_callable) = create_task(task_path)
with delegating_job_context(job, task, task_callable) as jc:
return jc.task_callable(jc.task_data) | Execute task.
:param task_path: task path
:type task_path: str|Callable
:param data: task's data
:type data: Any
:return: | ### Input:
Execute task.
:param task_path: task path
:type task_path: str|Callable
:param data: task's data
:type data: Any
:return:
### Response:
def exec_task(task_path, data):
if not data:
data = {: None, : task_path}
elif not isinstance(data, (str, bytes)):
data = {: json.dumps(data, cls=RequestJSONEncoder),
: task_path}
else:
if data is not None and data.startswith("file://"):
with open(data[len("file://"):]) as f:
data = f.read()
data = {: data, : task_path}
job = Job(data)
(task, task_callable) = create_task(task_path)
with delegating_job_context(job, task, task_callable) as jc:
return jc.task_callable(jc.task_data) |
def kill(self, dwProcessId, bIgnoreExceptions = False):
try:
aProcess = self.system.get_process(dwProcessId)
except KeyError:
aProcess = Process(dwProcessId)
self.__cleanup_process(dwProcessId,
bIgnoreExceptions = bIgnoreExceptions)
try:
try:
if self.is_debugee(dwProcessId):
try:
if aProcess.is_alive():
aProcess.suspend()
finally:
self.detach(dwProcessId,
bIgnoreExceptions = bIgnoreExceptions)
finally:
aProcess.kill()
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
try:
aProcess.clear()
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning) | Kills a process currently being debugged.
@see: L{detach}
@type dwProcessId: int
@param dwProcessId: Global ID of a process to kill.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when killing the process.
@raise WindowsError: Raises an exception on error, unless
C{bIgnoreExceptions} is C{True}. | ### Input:
Kills a process currently being debugged.
@see: L{detach}
@type dwProcessId: int
@param dwProcessId: Global ID of a process to kill.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when killing the process.
@raise WindowsError: Raises an exception on error, unless
C{bIgnoreExceptions} is C{True}.
### Response:
def kill(self, dwProcessId, bIgnoreExceptions = False):
try:
aProcess = self.system.get_process(dwProcessId)
except KeyError:
aProcess = Process(dwProcessId)
self.__cleanup_process(dwProcessId,
bIgnoreExceptions = bIgnoreExceptions)
try:
try:
if self.is_debugee(dwProcessId):
try:
if aProcess.is_alive():
aProcess.suspend()
finally:
self.detach(dwProcessId,
bIgnoreExceptions = bIgnoreExceptions)
finally:
aProcess.kill()
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
try:
aProcess.clear()
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning) |
def vsclg(s, v1, ndim):
s = ctypes.c_double(s)
v1 = stypes.toDoubleVector(v1)
vout = stypes.emptyDoubleVector(ndim)
ndim = ctypes.c_int(ndim)
libspice.vsclg_c(s, v1, ndim, vout)
return stypes.cVectorToPython(vout) | Multiply a scalar and a double precision vector of arbitrary dimension.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vsclg_c.html
:param s: Scalar to multiply a vector
:type s: float
:param v1: Vector to be multiplied
:type v1: Array of floats
:param ndim: Dimension of v1
:type ndim: int
:return: Product vector, s*v1.
:rtype: Array of floats | ### Input:
Multiply a scalar and a double precision vector of arbitrary dimension.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vsclg_c.html
:param s: Scalar to multiply a vector
:type s: float
:param v1: Vector to be multiplied
:type v1: Array of floats
:param ndim: Dimension of v1
:type ndim: int
:return: Product vector, s*v1.
:rtype: Array of floats
### Response:
def vsclg(s, v1, ndim):
s = ctypes.c_double(s)
v1 = stypes.toDoubleVector(v1)
vout = stypes.emptyDoubleVector(ndim)
ndim = ctypes.c_int(ndim)
libspice.vsclg_c(s, v1, ndim, vout)
return stypes.cVectorToPython(vout) |
def optional_args(proxy=None):
optional_args:port:1234
opt_args = _get_device_grain(, proxy=proxy) or {}
if opt_args and _FORBIDDEN_OPT_ARGS:
for arg in _FORBIDDEN_OPT_ARGS:
opt_args.pop(arg, None)
return {: opt_args} | Return the connection optional args.
.. note::
Sensible data will not be returned.
.. versionadded:: 2017.7.0
CLI Example - select all devices connecting via port 1234:
.. code-block:: bash
salt -G 'optional_args:port:1234' test.ping
Output:
.. code-block:: yaml
device1:
True
device2:
True | ### Input:
Return the connection optional args.
.. note::
Sensible data will not be returned.
.. versionadded:: 2017.7.0
CLI Example - select all devices connecting via port 1234:
.. code-block:: bash
salt -G 'optional_args:port:1234' test.ping
Output:
.. code-block:: yaml
device1:
True
device2:
True
### Response:
def optional_args(proxy=None):
optional_args:port:1234
opt_args = _get_device_grain(, proxy=proxy) or {}
if opt_args and _FORBIDDEN_OPT_ARGS:
for arg in _FORBIDDEN_OPT_ARGS:
opt_args.pop(arg, None)
return {: opt_args} |
def make_worker_router(config, obj, queue_name):
return MessageRouter(config, obj, queue_name, VM_LANDO_WORKER_INCOMING_MESSAGES,
processor_constructor=DisconnectingWorkQueueProcessor) | Makes MessageRouter which can listen to queue_name sending lando_worker specific messages to obj.
:param config: WorkerConfig/ServerConfig: settings for connecting to the queue
:param obj: object: implements lando_worker specific methods
:param queue_name: str: name of the queue we will listen on. | ### Input:
Makes MessageRouter which can listen to queue_name sending lando_worker specific messages to obj.
:param config: WorkerConfig/ServerConfig: settings for connecting to the queue
:param obj: object: implements lando_worker specific methods
:param queue_name: str: name of the queue we will listen on.
### Response:
def make_worker_router(config, obj, queue_name):
return MessageRouter(config, obj, queue_name, VM_LANDO_WORKER_INCOMING_MESSAGES,
processor_constructor=DisconnectingWorkQueueProcessor) |
def assemble_pairs(p, pf, tag, target=["final.contigs.fasta"]):
slink(p, pf, tag)
assemble_dir(pf, target) | Take one pair of reads and assemble to contigs.fasta. | ### Input:
Take one pair of reads and assemble to contigs.fasta.
### Response:
def assemble_pairs(p, pf, tag, target=["final.contigs.fasta"]):
slink(p, pf, tag)
assemble_dir(pf, target) |
def lbfgs(x, rho, f_df, maxiter=20):
def f_df_augmented(theta):
f, df = f_df(theta)
obj = f + (rho / 2.) * np.linalg.norm(theta - x) ** 2
grad = df + rho * (theta - x)
return obj, grad
res = scipy_minimize(f_df_augmented, x, jac=True, method=,
options={: maxiter, : False})
return res.x | Minimize the proximal operator of a given objective using L-BFGS
Parameters
----------
f_df : function
Returns the objective and gradient of the function to minimize
maxiter : int
Maximum number of L-BFGS iterations | ### Input:
Minimize the proximal operator of a given objective using L-BFGS
Parameters
----------
f_df : function
Returns the objective and gradient of the function to minimize
maxiter : int
Maximum number of L-BFGS iterations
### Response:
def lbfgs(x, rho, f_df, maxiter=20):
def f_df_augmented(theta):
f, df = f_df(theta)
obj = f + (rho / 2.) * np.linalg.norm(theta - x) ** 2
grad = df + rho * (theta - x)
return obj, grad
res = scipy_minimize(f_df_augmented, x, jac=True, method=,
options={: maxiter, : False})
return res.x |
def compile_sequence(cycles, program_or_profile=,
unit_converter=None):
if unit_converter is None:
cv_cycles = cycles
else:
cv_cycles = convert_sequence_to_motor_units(cycles, \
unit_converter=unit_converter)
commands = []
previous_motion = {: [], : [], : [], : []}
for cycle in cv_cycles:
iterations = int(cycle[])
if iterations > 1:
previous_motion = {: [], : [], : [], : []}
if program_or_profile != :
commands.append( + str(iterations))
else:
commands.append( + str(iterations))
for i in range(0, len(cycle[])):
new_motion = cycle[][i]
if program_or_profile == \
and new_motion[] == 0.0:
new_motion[] = new_motion[]
for k in (, , ):
if previous_motion[k] != new_motion[k]:
val = round(float(new_motion[k]), 4)
if val == int(val):
val = int(val)
commands.append(k + str(val))
if previous_motion[] != new_motion[]:
if previous_motion[] == -new_motion[]:
commands.append()
else:
commands.append(
+ str(int(new_motion[])))
wait_time = cycle[][i]
if program_or_profile != :
commands.append()
commands.append()
if wait_time != 0:
wait_time = round(float(wait_time), 3)
if wait_time == int(wait_time):
wait_time = int(wait_time)
commands.append( + str(wait_time))
else:
commands.append()
commands.append()
if wait_time != 0:
commands.append(
+ str(int(1000*wait_time))
+ )
previous_motion = new_motion
if iterations > 1:
if program_or_profile != :
commands.append()
else:
commands.append()
return commands | Makes the command list for a move sequence.
Constructs the list of commands to execute the given sequence of
motion. Program/command line commands or profile commands can be
generated depending on the value of `program_or_profile` so that the
commands can be used to construct a program or profile later. Types
of motion supported (see Notes for how to specify) are moves from
one position to another (the motion will always come to a stop
before doing the next motion), waiting a given interval of time till
starting the next move, and looping over a sequence of moves.
Parameters
----------
cycles : iterable of dicts
The iterable of cycles of motion to do one after another. See
Notes for format.
program_or_profile : {'program', 'profile'}, optional
Whether program or profile motion commands should be used.
Anything other than these two values implies the default.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units. ``None`` indicates that
they are already in motor units.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the move sequence.
Notes
-----
`cycles` is an iterable of individual cycles of motion. Each cycle
is a ``dict`` that represents a sequence of moves that could
possibly be looped over. The field ``'iterations'`` gives how many
times the sequence of moves should be done (a value > 1 implies a
loop). Then the field ``'moves'`` is an iterable of the individual
moves. Each individual move is a ``dict`` with the acceleration
(``'A'``), deceleration (``'AD'`` with 0 meaning the value of the
acceleration is used), velocity (``'V'``), and the distance/position
(``'D'``). Back in the cycle, the field ``'wait_times'`` is an
iterable of numbers giving the time in seconds to wait after each
move before going onto the next.
See Also
--------
get_sequence_time
convert_sequence_to_motor_units
GeminiMotorDrive.utilities.UnitConverter
Examples
--------
Simple program style two motions with a pause in between.
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1, 0],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},
... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]
>>> compile_sequence(cycles)
['A100',
'AD0',
'V100',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'T1',
'A90',
'GO1',
'WAIT(AS.1=b0)']
The same motion but in profile style commands
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1, 0],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},
... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]
>>> compile_sequence(cycles, program_or_profile='profile')
['A100',
'AD100',
'V100',
'D-1000',
'VF0',
'GOBUF1',
'GOWHEN(T=1000)',
'A90',
'AD90',
'VF0',
'GOBUF1']
Another motion with a back and forth loop (100 iterations) in the
middle, done in program style commands.
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100}]},
... {'iterations':100, 'wait_times':[0, 0],
... 'moves':[{'A':50, 'AD':40, 'D':-1000, 'V':30},
... {'A':50, 'AD':40, 'D':1000, 'V':30}]},
... {'iterations':1, 'wait_times':[0],
... 'moves':[{'A':100, 'AD':0, 'D':1000, 'V':100}]}]
>>> compile_sequence(cycles)
['A100',
'AD0',
'V100',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'T1',
'L100',
'A50',
'AD40',
'V30',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'D~',
'GO1',
'WAIT(AS.1=b0)',
'LN',
'A100',
'AD0',
'V100',
'GO1',
'WAIT(AS.1=b0)'] | ### Input:
Makes the command list for a move sequence.
Constructs the list of commands to execute the given sequence of
motion. Program/command line commands or profile commands can be
generated depending on the value of `program_or_profile` so that the
commands can be used to construct a program or profile later. Types
of motion supported (see Notes for how to specify) are moves from
one position to another (the motion will always come to a stop
before doing the next motion), waiting a given interval of time till
starting the next move, and looping over a sequence of moves.
Parameters
----------
cycles : iterable of dicts
The iterable of cycles of motion to do one after another. See
Notes for format.
program_or_profile : {'program', 'profile'}, optional
Whether program or profile motion commands should be used.
Anything other than these two values implies the default.
unit_converter : UnitConverter, optional
``GeminiMotorDrive.utilities.UnitConverter`` to use to convert
the units in `cycles` to motor units. ``None`` indicates that
they are already in motor units.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the move sequence.
Notes
-----
`cycles` is an iterable of individual cycles of motion. Each cycle
is a ``dict`` that represents a sequence of moves that could
possibly be looped over. The field ``'iterations'`` gives how many
times the sequence of moves should be done (a value > 1 implies a
loop). Then the field ``'moves'`` is an iterable of the individual
moves. Each individual move is a ``dict`` with the acceleration
(``'A'``), deceleration (``'AD'`` with 0 meaning the value of the
acceleration is used), velocity (``'V'``), and the distance/position
(``'D'``). Back in the cycle, the field ``'wait_times'`` is an
iterable of numbers giving the time in seconds to wait after each
move before going onto the next.
See Also
--------
get_sequence_time
convert_sequence_to_motor_units
GeminiMotorDrive.utilities.UnitConverter
Examples
--------
Simple program style two motions with a pause in between.
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1, 0],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},
... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]
>>> compile_sequence(cycles)
['A100',
'AD0',
'V100',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'T1',
'A90',
'GO1',
'WAIT(AS.1=b0)']
The same motion but in profile style commands
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1, 0],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},
... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]
>>> compile_sequence(cycles, program_or_profile='profile')
['A100',
'AD100',
'V100',
'D-1000',
'VF0',
'GOBUF1',
'GOWHEN(T=1000)',
'A90',
'AD90',
'VF0',
'GOBUF1']
Another motion with a back and forth loop (100 iterations) in the
middle, done in program style commands.
>>> from GeminiMotorDrive.compilers.move_sequence import *
>>> cycles = [{'iterations':1, 'wait_times':[1],
... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100}]},
... {'iterations':100, 'wait_times':[0, 0],
... 'moves':[{'A':50, 'AD':40, 'D':-1000, 'V':30},
... {'A':50, 'AD':40, 'D':1000, 'V':30}]},
... {'iterations':1, 'wait_times':[0],
... 'moves':[{'A':100, 'AD':0, 'D':1000, 'V':100}]}]
>>> compile_sequence(cycles)
['A100',
'AD0',
'V100',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'T1',
'L100',
'A50',
'AD40',
'V30',
'D-1000',
'GO1',
'WAIT(AS.1=b0)',
'D~',
'GO1',
'WAIT(AS.1=b0)',
'LN',
'A100',
'AD0',
'V100',
'GO1',
'WAIT(AS.1=b0)']
### Response:
def compile_sequence(cycles, program_or_profile=,
unit_converter=None):
if unit_converter is None:
cv_cycles = cycles
else:
cv_cycles = convert_sequence_to_motor_units(cycles, \
unit_converter=unit_converter)
commands = []
previous_motion = {: [], : [], : [], : []}
for cycle in cv_cycles:
iterations = int(cycle[])
if iterations > 1:
previous_motion = {: [], : [], : [], : []}
if program_or_profile != :
commands.append( + str(iterations))
else:
commands.append( + str(iterations))
for i in range(0, len(cycle[])):
new_motion = cycle[][i]
if program_or_profile == \
and new_motion[] == 0.0:
new_motion[] = new_motion[]
for k in (, , ):
if previous_motion[k] != new_motion[k]:
val = round(float(new_motion[k]), 4)
if val == int(val):
val = int(val)
commands.append(k + str(val))
if previous_motion[] != new_motion[]:
if previous_motion[] == -new_motion[]:
commands.append()
else:
commands.append(
+ str(int(new_motion[])))
wait_time = cycle[][i]
if program_or_profile != :
commands.append()
commands.append()
if wait_time != 0:
wait_time = round(float(wait_time), 3)
if wait_time == int(wait_time):
wait_time = int(wait_time)
commands.append( + str(wait_time))
else:
commands.append()
commands.append()
if wait_time != 0:
commands.append(
+ str(int(1000*wait_time))
+ )
previous_motion = new_motion
if iterations > 1:
if program_or_profile != :
commands.append()
else:
commands.append()
return commands |
def standard_output_generation(self, groups, limit, points, out_of, check):
if points < out_of:
self.reasoning_routine(groups, check, priority_flag=limit)
else:
print("All tests passed!") | Generates the Terminal Output | ### Input:
Generates the Terminal Output
### Response:
def standard_output_generation(self, groups, limit, points, out_of, check):
if points < out_of:
self.reasoning_routine(groups, check, priority_flag=limit)
else:
print("All tests passed!") |
def post_change_receiver(self, instance: Model, action: Action, **kwargs):
try:
old_group_names = instance.__instance_groups.observers[self]
except (ValueError, KeyError):
old_group_names = set()
if action == Action.DELETE:
new_group_names = set()
else:
new_group_names = set(self.group_names(instance))
self.send_messages(
instance,
old_group_names - new_group_names,
Action.DELETE,
**kwargs
)
self.send_messages(
instance,
old_group_names & new_group_names,
Action.UPDATE,
**kwargs
)
self.send_messages(
instance,
new_group_names - old_group_names,
Action.CREATE,
**kwargs
) | Triggers the old_binding to possibly send to its group. | ### Input:
Triggers the old_binding to possibly send to its group.
### Response:
def post_change_receiver(self, instance: Model, action: Action, **kwargs):
try:
old_group_names = instance.__instance_groups.observers[self]
except (ValueError, KeyError):
old_group_names = set()
if action == Action.DELETE:
new_group_names = set()
else:
new_group_names = set(self.group_names(instance))
self.send_messages(
instance,
old_group_names - new_group_names,
Action.DELETE,
**kwargs
)
self.send_messages(
instance,
old_group_names & new_group_names,
Action.UPDATE,
**kwargs
)
self.send_messages(
instance,
new_group_names - old_group_names,
Action.CREATE,
**kwargs
) |
def lx4dec(string, first):
string = stypes.stringToCharP(string)
first = ctypes.c_int(first)
last = ctypes.c_int()
nchar = ctypes.c_int()
libspice.lx4dec_c(string, first, ctypes.byref(last), ctypes.byref(nchar))
return last.value, nchar.value | Scan a string from a specified starting position for the
end of a decimal number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lx4dec_c.html
:param string: Any character string.
:type string: str
:param first: First character to scan from in string.
:type first: int
:return: last and nchar
:rtype: tuple | ### Input:
Scan a string from a specified starting position for the
end of a decimal number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lx4dec_c.html
:param string: Any character string.
:type string: str
:param first: First character to scan from in string.
:type first: int
:return: last and nchar
:rtype: tuple
### Response:
def lx4dec(string, first):
string = stypes.stringToCharP(string)
first = ctypes.c_int(first)
last = ctypes.c_int()
nchar = ctypes.c_int()
libspice.lx4dec_c(string, first, ctypes.byref(last), ctypes.byref(nchar))
return last.value, nchar.value |
def _tempfile_as_multiline_string(self, data):
filename = FilePath(self.getTmpFilename(self.TmpDir))
data_file = open(filename,)
data_file.write(data)
data_file.close()
return filename | Write a multiline string to a temp file and return the filename.
data: a multiline string to be written to a file.
* Note: the result will be the filename as a FilePath object
(which is a string subclass). | ### Input:
Write a multiline string to a temp file and return the filename.
data: a multiline string to be written to a file.
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
### Response:
def _tempfile_as_multiline_string(self, data):
filename = FilePath(self.getTmpFilename(self.TmpDir))
data_file = open(filename,)
data_file.write(data)
data_file.close()
return filename |
def translate_pair(self, code):
name = self.countries[code]
if code in self.OLD_NAMES:
with override(None):
source_name = force_text(name)
name = force_text(name)
if name == source_name:
for old_name in self.OLD_NAMES[code]:
with override(None):
source_old_name = force_text(old_name)
old_name = force_text(old_name)
if old_name != source_old_name:
name = old_name
break
else:
name = force_text(name)
return CountryTuple(code, name) | Force a country to the current activated translation.
:returns: ``CountryTuple(code, translated_country_name)`` namedtuple | ### Input:
Force a country to the current activated translation.
:returns: ``CountryTuple(code, translated_country_name)`` namedtuple
### Response:
def translate_pair(self, code):
name = self.countries[code]
if code in self.OLD_NAMES:
with override(None):
source_name = force_text(name)
name = force_text(name)
if name == source_name:
for old_name in self.OLD_NAMES[code]:
with override(None):
source_old_name = force_text(old_name)
old_name = force_text(old_name)
if old_name != source_old_name:
name = old_name
break
else:
name = force_text(name)
return CountryTuple(code, name) |
def this_module(npop=1):
stack = inspect.stack()
st = stack[npop]
frame = st[0]
return inspect.getmodule(frame) | Returns the module object of the module this function is called from | ### Input:
Returns the module object of the module this function is called from
### Response:
def this_module(npop=1):
stack = inspect.stack()
st = stack[npop]
frame = st[0]
return inspect.getmodule(frame) |
def _register_signal_handler(self, description, signal, handler):
from flask import signals
if not signals.signals_available:
self.app.logger.warn(.format(description))
self.app.logger.info(.format(description))
signal.connect(handler, sender=self.app, weak=False) | Registers a handler for the given signal.
:param description: a short description of the signal to handle.
:param signal: the signal to handle.
:param handler: the function to use for handling the signal. | ### Input:
Registers a handler for the given signal.
:param description: a short description of the signal to handle.
:param signal: the signal to handle.
:param handler: the function to use for handling the signal.
### Response:
def _register_signal_handler(self, description, signal, handler):
from flask import signals
if not signals.signals_available:
self.app.logger.warn(.format(description))
self.app.logger.info(.format(description))
signal.connect(handler, sender=self.app, weak=False) |
def index_by(self, column_or_label):
column = self._get_column(column_or_label)
index = {}
for key, row in zip(column, self.rows):
index.setdefault(key, []).append(row)
return index | Return a dict keyed by values in a column that contains lists of
rows corresponding to each value. | ### Input:
Return a dict keyed by values in a column that contains lists of
rows corresponding to each value.
### Response:
def index_by(self, column_or_label):
column = self._get_column(column_or_label)
index = {}
for key, row in zip(column, self.rows):
index.setdefault(key, []).append(row)
return index |
def get_component(self, name):
return [c for c in self.components if c.name == name][0] | Retrieve a child component given its name.
:param name: The name of the component.
:returns: The component. | ### Input:
Retrieve a child component given its name.
:param name: The name of the component.
:returns: The component.
### Response:
def get_component(self, name):
return [c for c in self.components if c.name == name][0] |
def geometrize_shapes(
shapes: DataFrame, *, use_utm: bool = False
) -> DataFrame:
import geopandas as gpd
f = shapes.copy().sort_values(["shape_id", "shape_pt_sequence"])
def my_agg(group):
d = {}
d["geometry"] = sg.LineString(
group[["shape_pt_lon", "shape_pt_lat"]].values
)
return pd.Series(d)
g = f.groupby("shape_id").apply(my_agg).reset_index()
g = gpd.GeoDataFrame(g, crs=cs.WGS84)
if use_utm:
lat, lon = f.loc[0, ["shape_pt_lat", "shape_pt_lon"]].values
crs = hp.get_utm_crs(lat, lon)
g = g.to_crs(crs)
return g | Given a GTFS shapes DataFrame, convert it to a GeoPandas
GeoDataFrame and return the result.
The result has a ``'geometry'`` column of WGS84 LineStrings
instead of the columns ``'shape_pt_sequence'``, ``'shape_pt_lon'``,
``'shape_pt_lat'``, and ``'shape_dist_traveled'``.
If ``use_utm``, then use local UTM coordinates for the geometries.
Notes
------
Requires GeoPandas. | ### Input:
Given a GTFS shapes DataFrame, convert it to a GeoPandas
GeoDataFrame and return the result.
The result has a ``'geometry'`` column of WGS84 LineStrings
instead of the columns ``'shape_pt_sequence'``, ``'shape_pt_lon'``,
``'shape_pt_lat'``, and ``'shape_dist_traveled'``.
If ``use_utm``, then use local UTM coordinates for the geometries.
Notes
------
Requires GeoPandas.
### Response:
def geometrize_shapes(
shapes: DataFrame, *, use_utm: bool = False
) -> DataFrame:
import geopandas as gpd
f = shapes.copy().sort_values(["shape_id", "shape_pt_sequence"])
def my_agg(group):
d = {}
d["geometry"] = sg.LineString(
group[["shape_pt_lon", "shape_pt_lat"]].values
)
return pd.Series(d)
g = f.groupby("shape_id").apply(my_agg).reset_index()
g = gpd.GeoDataFrame(g, crs=cs.WGS84)
if use_utm:
lat, lon = f.loc[0, ["shape_pt_lat", "shape_pt_lon"]].values
crs = hp.get_utm_crs(lat, lon)
g = g.to_crs(crs)
return g |
def ws_disconnect(message):
language = message.channel_session[]
gr = Group(.format(language))
gr.discard(message.reply_channel) | Channels connection close.
Deregister the client | ### Input:
Channels connection close.
Deregister the client
### Response:
def ws_disconnect(message):
language = message.channel_session[]
gr = Group(.format(language))
gr.discard(message.reply_channel) |
def send_iq_and_wait_for_reply(self, iq, *,
timeout=None):
warnings.warn(
r"send_iq_and_wait_for_reply is deprecated and will be removed in"
r" 1.0",
DeprecationWarning,
stacklevel=1,
)
return (yield from self.send(iq, timeout=timeout)) | Send an IQ stanza `iq` and wait for the response. If `timeout` is not
:data:`None`, it must be the time in seconds for which to wait for a
response.
If the response is a ``"result"`` IQ, the value of the
:attr:`~aioxmpp.IQ.payload` attribute is returned. Otherwise,
the exception generated from the :attr:`~aioxmpp.IQ.error`
attribute is raised.
.. seealso::
:meth:`register_iq_response_future` and
:meth:`send_and_wait_for_sent` for other cases raising exceptions.
.. deprecated:: 0.8
This method will be removed in 1.0. Use :meth:`send` instead.
.. versionchanged:: 0.8
On a timeout, :class:`TimeoutError` is now raised instead of
:class:`asyncio.TimeoutError`. | ### Input:
Send an IQ stanza `iq` and wait for the response. If `timeout` is not
:data:`None`, it must be the time in seconds for which to wait for a
response.
If the response is a ``"result"`` IQ, the value of the
:attr:`~aioxmpp.IQ.payload` attribute is returned. Otherwise,
the exception generated from the :attr:`~aioxmpp.IQ.error`
attribute is raised.
.. seealso::
:meth:`register_iq_response_future` and
:meth:`send_and_wait_for_sent` for other cases raising exceptions.
.. deprecated:: 0.8
This method will be removed in 1.0. Use :meth:`send` instead.
.. versionchanged:: 0.8
On a timeout, :class:`TimeoutError` is now raised instead of
:class:`asyncio.TimeoutError`.
### Response:
def send_iq_and_wait_for_reply(self, iq, *,
timeout=None):
warnings.warn(
r"send_iq_and_wait_for_reply is deprecated and will be removed in"
r" 1.0",
DeprecationWarning,
stacklevel=1,
)
return (yield from self.send(iq, timeout=timeout)) |
def _prepare_graph(g, g_colors, q_cls, q_arg, adjust_graph):
g = _test_graph(g)
if adjust_graph:
pos = nx.get_node_attributes(g, )
ans = nx.to_dict_of_dicts(g)
g = adjacency2graph(ans, adjust=2, is_directed=g.is_directed())
g = QueueNetworkDiGraph(g)
if len(pos) > 0:
g.set_pos(pos)
g.new_vertex_property()
g.new_vertex_property()
g.new_vertex_property()
g.new_vertex_property()
g.new_edge_property()
g.new_edge_property()
g.new_edge_property()
g.new_edge_property()
queues = _set_queues(g, q_cls, q_arg, in g.vertex_properties())
if not in g.vertex_properties():
g.set_pos()
for k, e in enumerate(g.edges()):
g.set_ep(e, , 1.25)
g.set_ep(e, , 8)
if e[0] == e[1]:
g.set_ep(e, , queues[k].colors[])
else:
g.set_ep(e, , queues[k].colors[])
for v in g.nodes():
g.set_vp(v, , 1)
g.set_vp(v, , 8)
e = (v, v)
if g.is_edge(e):
g.set_vp(v, , queues[g.edge_index[e]]._current_color(2))
g.set_vp(v, , queues[g.edge_index[e]]._current_color())
else:
g.set_vp(v, , g_colors[])
g.set_vp(v, , g_colors[])
return g, queues | Prepares a graph for use in :class:`.QueueNetwork`.
This function is called by ``__init__`` in the
:class:`.QueueNetwork` class. It creates the :class:`.QueueServer`
instances that sit on the edges, and sets various edge and node
properties that are used when drawing the graph.
Parameters
----------
g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, \
``None``, etc.
Any object that networkx can turn into a
:any:`DiGraph<networkx.DiGraph>`
g_colors : dict
A dictionary of colors. The specific keys used are
``vertex_color`` and ``vertex_fill_color`` for vertices that
do not have any loops. Set :class:`.QueueNetwork` for the
default values passed.
q_cls : dict
A dictionary where the keys are integers that represent an edge
type, and the values are :class:`.QueueServer` classes.
q_args : dict
A dictionary where the keys are integers that represent an edge
type, and the values are the arguments that are used when
creating an instance of that :class:`.QueueServer` class.
adjust_graph : bool
Specifies whether the graph will be adjusted using
:func:`.adjacency2graph`.
Returns
-------
g : :class:`.QueueNetworkDiGraph`
queues : list
A list of :class:`QueueServers<.QueueServer>` where
``queues[k]`` is the ``QueueServer`` that sets on the edge with
edge index ``k``.
Notes
-----
The graph ``g`` should have the ``edge_type`` edge property map.
If it does not then an ``edge_type`` edge property is
created and set to 1.
The following properties are set by each queue: ``vertex_color``,
``vertex_fill_color``, ``vertex_fill_color``, ``edge_color``.
See :class:`.QueueServer` for more on setting these values.
The following properties are assigned as a properties to the graph;
their default values for each edge or vertex is shown:
* ``vertex_pen_width``: ``1``,
* ``vertex_size``: ``8``,
* ``edge_control_points``: ``[]``
* ``edge_marker_size``: ``8``
* ``edge_pen_width``: ``1.25``
Raises
------
TypeError
Raised when the parameter ``g`` is not of a type that can be
made into a :any:`networkx.DiGraph`. | ### Input:
Prepares a graph for use in :class:`.QueueNetwork`.
This function is called by ``__init__`` in the
:class:`.QueueNetwork` class. It creates the :class:`.QueueServer`
instances that sit on the edges, and sets various edge and node
properties that are used when drawing the graph.
Parameters
----------
g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, \
``None``, etc.
Any object that networkx can turn into a
:any:`DiGraph<networkx.DiGraph>`
g_colors : dict
A dictionary of colors. The specific keys used are
``vertex_color`` and ``vertex_fill_color`` for vertices that
do not have any loops. Set :class:`.QueueNetwork` for the
default values passed.
q_cls : dict
A dictionary where the keys are integers that represent an edge
type, and the values are :class:`.QueueServer` classes.
q_args : dict
A dictionary where the keys are integers that represent an edge
type, and the values are the arguments that are used when
creating an instance of that :class:`.QueueServer` class.
adjust_graph : bool
Specifies whether the graph will be adjusted using
:func:`.adjacency2graph`.
Returns
-------
g : :class:`.QueueNetworkDiGraph`
queues : list
A list of :class:`QueueServers<.QueueServer>` where
``queues[k]`` is the ``QueueServer`` that sets on the edge with
edge index ``k``.
Notes
-----
The graph ``g`` should have the ``edge_type`` edge property map.
If it does not then an ``edge_type`` edge property is
created and set to 1.
The following properties are set by each queue: ``vertex_color``,
``vertex_fill_color``, ``vertex_fill_color``, ``edge_color``.
See :class:`.QueueServer` for more on setting these values.
The following properties are assigned as a properties to the graph;
their default values for each edge or vertex is shown:
* ``vertex_pen_width``: ``1``,
* ``vertex_size``: ``8``,
* ``edge_control_points``: ``[]``
* ``edge_marker_size``: ``8``
* ``edge_pen_width``: ``1.25``
Raises
------
TypeError
Raised when the parameter ``g`` is not of a type that can be
made into a :any:`networkx.DiGraph`.
### Response:
def _prepare_graph(g, g_colors, q_cls, q_arg, adjust_graph):
g = _test_graph(g)
if adjust_graph:
pos = nx.get_node_attributes(g, )
ans = nx.to_dict_of_dicts(g)
g = adjacency2graph(ans, adjust=2, is_directed=g.is_directed())
g = QueueNetworkDiGraph(g)
if len(pos) > 0:
g.set_pos(pos)
g.new_vertex_property()
g.new_vertex_property()
g.new_vertex_property()
g.new_vertex_property()
g.new_edge_property()
g.new_edge_property()
g.new_edge_property()
g.new_edge_property()
queues = _set_queues(g, q_cls, q_arg, in g.vertex_properties())
if not in g.vertex_properties():
g.set_pos()
for k, e in enumerate(g.edges()):
g.set_ep(e, , 1.25)
g.set_ep(e, , 8)
if e[0] == e[1]:
g.set_ep(e, , queues[k].colors[])
else:
g.set_ep(e, , queues[k].colors[])
for v in g.nodes():
g.set_vp(v, , 1)
g.set_vp(v, , 8)
e = (v, v)
if g.is_edge(e):
g.set_vp(v, , queues[g.edge_index[e]]._current_color(2))
g.set_vp(v, , queues[g.edge_index[e]]._current_color())
else:
g.set_vp(v, , g_colors[])
g.set_vp(v, , g_colors[])
return g, queues |
def concat(a, b):
"Same as a + b, for a and b sequences."
if not hasattr(a, ):
msg = " object can't be concatenated" % type(a).__name__
raise TypeError(msg)
return a + b | Same as a + b, for a and b sequences. | ### Input:
Same as a + b, for a and b sequences.
### Response:
def concat(a, b):
"Same as a + b, for a and b sequences."
if not hasattr(a, ):
msg = " object can't be concatenated" % type(a).__name__
raise TypeError(msg)
return a + b |
def set_value(self, comp_str, comp_att):
old_value = self._encoded_value
self._encoded_value = comp_str
try:
self._parse(comp_att)
except ValueError:
self._encoded_value = old_value
raise
self._decode() | Set the value of component. By default, the component has a simple
value.
:param string comp_str: new value of component
:param string comp_att: attribute associated with value of component
:returns: None
:exception: ValueError - incorrect value of component | ### Input:
Set the value of component. By default, the component has a simple
value.
:param string comp_str: new value of component
:param string comp_att: attribute associated with value of component
:returns: None
:exception: ValueError - incorrect value of component
### Response:
def set_value(self, comp_str, comp_att):
old_value = self._encoded_value
self._encoded_value = comp_str
try:
self._parse(comp_att)
except ValueError:
self._encoded_value = old_value
raise
self._decode() |
def modify_metadata(identifier, metadata,
target=None,
append=None,
append_list=None,
priority=None,
access_key=None,
secret_key=None,
debug=None,
request_kwargs=None,
**get_item_kwargs):
item = get_item(identifier, **get_item_kwargs)
return item.modify_metadata(metadata,
target=target,
append=append,
append_list=append_list,
priority=priority,
access_key=access_key,
secret_key=secret_key,
debug=debug,
request_kwargs=request_kwargs) | Modify the metadata of an existing item on Archive.org.
:type identifier: str
:param identifier: The globally unique Archive.org identifier for a given item.
:type metadata: dict
:param metadata: Metadata used to update the item.
:type target: str
:param target: (optional) The metadata target to update. Defaults to `metadata`.
:type append: bool
:param append: (optional) set to True to append metadata values to current values
rather than replacing. Defaults to ``False``.
:type append_list: bool
:param append_list: (optional) Append values to an existing multi-value
metadata field. No duplicate values will be added.
:type priority: int
:param priority: (optional) Set task priority.
:type access_key: str
:param access_key: (optional) IA-S3 access_key to use when making the given request.
:type secret_key: str
:param secret_key: (optional) IA-S3 secret_key to use when making the given request.
:type debug: bool
:param debug: (optional) set to True to return a :class:`requests.Request <Request>`
object instead of sending request. Defaults to ``False``.
:param \*\*get_item_kwargs: (optional) Arguments that ``get_item`` takes.
:returns: :class:`requests.Response` object or :class:`requests.Request` object if
debug is ``True``. | ### Input:
Modify the metadata of an existing item on Archive.org.
:type identifier: str
:param identifier: The globally unique Archive.org identifier for a given item.
:type metadata: dict
:param metadata: Metadata used to update the item.
:type target: str
:param target: (optional) The metadata target to update. Defaults to `metadata`.
:type append: bool
:param append: (optional) set to True to append metadata values to current values
rather than replacing. Defaults to ``False``.
:type append_list: bool
:param append_list: (optional) Append values to an existing multi-value
metadata field. No duplicate values will be added.
:type priority: int
:param priority: (optional) Set task priority.
:type access_key: str
:param access_key: (optional) IA-S3 access_key to use when making the given request.
:type secret_key: str
:param secret_key: (optional) IA-S3 secret_key to use when making the given request.
:type debug: bool
:param debug: (optional) set to True to return a :class:`requests.Request <Request>`
object instead of sending request. Defaults to ``False``.
:param \*\*get_item_kwargs: (optional) Arguments that ``get_item`` takes.
:returns: :class:`requests.Response` object or :class:`requests.Request` object if
debug is ``True``.
### Response:
def modify_metadata(identifier, metadata,
target=None,
append=None,
append_list=None,
priority=None,
access_key=None,
secret_key=None,
debug=None,
request_kwargs=None,
**get_item_kwargs):
item = get_item(identifier, **get_item_kwargs)
return item.modify_metadata(metadata,
target=target,
append=append,
append_list=append_list,
priority=priority,
access_key=access_key,
secret_key=secret_key,
debug=debug,
request_kwargs=request_kwargs) |
def netmiko_config(*config_commands, **kwargs):
t have the concept of ``candidate`` config.
On Junos, or other platforms that have this capability, the changes will
not be loaded into the running config, and the user must set the
``commit`` argument to ``True`` to transfer the changes from the
candidate into the running config before exiting.
config_commands
A list of configuration commands to be loaded on the remote device.
config_file
Read the configuration commands from a file. The file can equally be a
template that can be rendered using the engine of choice (see
``template_engine``).
This can be specified using the absolute path to the file, or using one
of the following URL schemes:
- ``salt://``, to fetch the file from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
exit_config_mode: ``True``
Determines whether or not to exit config mode after complete.
delay_factor: ``1``
Factor to adjust delays.
max_loops: ``150``
Controls wait time in conjunction with delay_factor (default: ``150``).
strip_prompt: ``False``
Determines whether or not to strip the prompt (default: ``False``).
strip_command: ``False``
Determines whether or not to strip the command (default: ``False``).
config_mode_command
The command to enter into config mode.
commit: ``False``
Commit the configuration changes before exiting the config mode. This
option is by default disabled, as many platforms don*set system ntp peer 1.2.3.4*
netmiko_kwargs = netmiko_args()
kwargs.update(netmiko_kwargs)
return __salt__[](config_commands=config_commands,
**kwargs) | .. versionadded:: 2019.2.0
Load a list of configuration commands on the remote device, via Netmiko.
.. warning::
Please remember that ``netmiko`` does not have any rollback safeguards
and any configuration change will be directly loaded into the running
config if the platform doesn't have the concept of ``candidate`` config.
On Junos, or other platforms that have this capability, the changes will
not be loaded into the running config, and the user must set the
``commit`` argument to ``True`` to transfer the changes from the
candidate into the running config before exiting.
config_commands
A list of configuration commands to be loaded on the remote device.
config_file
Read the configuration commands from a file. The file can equally be a
template that can be rendered using the engine of choice (see
``template_engine``).
This can be specified using the absolute path to the file, or using one
of the following URL schemes:
- ``salt://``, to fetch the file from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
exit_config_mode: ``True``
Determines whether or not to exit config mode after complete.
delay_factor: ``1``
Factor to adjust delays.
max_loops: ``150``
Controls wait time in conjunction with delay_factor (default: ``150``).
strip_prompt: ``False``
Determines whether or not to strip the prompt (default: ``False``).
strip_command: ``False``
Determines whether or not to strip the command (default: ``False``).
config_mode_command
The command to enter into config mode.
commit: ``False``
Commit the configuration changes before exiting the config mode. This
option is by default disabled, as many platforms don't have this
capability natively.
CLI Example:
.. code-block:: bash
salt '*' napalm.netmiko_config 'set system ntp peer 1.2.3.4' commit=True
salt '*' napalm.netmiko_config https://bit.ly/2sgljCB | ### Input:
.. versionadded:: 2019.2.0
Load a list of configuration commands on the remote device, via Netmiko.
.. warning::
Please remember that ``netmiko`` does not have any rollback safeguards
and any configuration change will be directly loaded into the running
config if the platform doesn't have the concept of ``candidate`` config.
On Junos, or other platforms that have this capability, the changes will
not be loaded into the running config, and the user must set the
``commit`` argument to ``True`` to transfer the changes from the
candidate into the running config before exiting.
config_commands
A list of configuration commands to be loaded on the remote device.
config_file
Read the configuration commands from a file. The file can equally be a
template that can be rendered using the engine of choice (see
``template_engine``).
This can be specified using the absolute path to the file, or using one
of the following URL schemes:
- ``salt://``, to fetch the file from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
exit_config_mode: ``True``
Determines whether or not to exit config mode after complete.
delay_factor: ``1``
Factor to adjust delays.
max_loops: ``150``
Controls wait time in conjunction with delay_factor (default: ``150``).
strip_prompt: ``False``
Determines whether or not to strip the prompt (default: ``False``).
strip_command: ``False``
Determines whether or not to strip the command (default: ``False``).
config_mode_command
The command to enter into config mode.
commit: ``False``
Commit the configuration changes before exiting the config mode. This
option is by default disabled, as many platforms don't have this
capability natively.
CLI Example:
.. code-block:: bash
salt '*' napalm.netmiko_config 'set system ntp peer 1.2.3.4' commit=True
salt '*' napalm.netmiko_config https://bit.ly/2sgljCB
### Response:
def netmiko_config(*config_commands, **kwargs):
t have the concept of ``candidate`` config.
On Junos, or other platforms that have this capability, the changes will
not be loaded into the running config, and the user must set the
``commit`` argument to ``True`` to transfer the changes from the
candidate into the running config before exiting.
config_commands
A list of configuration commands to be loaded on the remote device.
config_file
Read the configuration commands from a file. The file can equally be a
template that can be rendered using the engine of choice (see
``template_engine``).
This can be specified using the absolute path to the file, or using one
of the following URL schemes:
- ``salt://``, to fetch the file from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
exit_config_mode: ``True``
Determines whether or not to exit config mode after complete.
delay_factor: ``1``
Factor to adjust delays.
max_loops: ``150``
Controls wait time in conjunction with delay_factor (default: ``150``).
strip_prompt: ``False``
Determines whether or not to strip the prompt (default: ``False``).
strip_command: ``False``
Determines whether or not to strip the command (default: ``False``).
config_mode_command
The command to enter into config mode.
commit: ``False``
Commit the configuration changes before exiting the config mode. This
option is by default disabled, as many platforms don*set system ntp peer 1.2.3.4*
netmiko_kwargs = netmiko_args()
kwargs.update(netmiko_kwargs)
return __salt__[](config_commands=config_commands,
**kwargs) |
def deferToGreenlet(*args, **kwargs):
from twisted.internet import reactor
assert reactor.greenlet == getcurrent(), "must invoke this in the reactor greenlet"
return deferToGreenletPool(reactor, reactor.getGreenletPool(), *args, **kwargs) | Call function using a greenlet and return the result as a Deferred | ### Input:
Call function using a greenlet and return the result as a Deferred
### Response:
def deferToGreenlet(*args, **kwargs):
from twisted.internet import reactor
assert reactor.greenlet == getcurrent(), "must invoke this in the reactor greenlet"
return deferToGreenletPool(reactor, reactor.getGreenletPool(), *args, **kwargs) |
def tokenize(self, string):
it = colorise.compat.ifilter(None, self._pattern.finditer(string))
try:
t = colorise.compat.next(it)
except StopIteration:
yield string, False
return
pos, buf, lm, escapeflag = -1, , -1, False
if t.start() > 0:
yield string[:t.start()], False
pos = t.start()
it = itertools.chain([t], it)
for m in it:
start = m.start()
e, s = m.group(2) or , m.group(3)
escaped = e.count(self._ESCAPE) % 2 != 0
if escaped:
buf += string[pos:m.end(2)-1] + s
escapeflag = True
else:
buf += string[pos:m.start(3)]
if buf:
yield buf, escapeflag
buf =
escapeflag = False
if lm == start:
yield , False
yield s, False
lm = m.end()
pos = m.end()
if buf:
yield buf, escapeflag
escapeflag = False
if pos < len(string):
yield string[pos:], False | Tokenize a string and return an iterator over its tokens. | ### Input:
Tokenize a string and return an iterator over its tokens.
### Response:
def tokenize(self, string):
it = colorise.compat.ifilter(None, self._pattern.finditer(string))
try:
t = colorise.compat.next(it)
except StopIteration:
yield string, False
return
pos, buf, lm, escapeflag = -1, , -1, False
if t.start() > 0:
yield string[:t.start()], False
pos = t.start()
it = itertools.chain([t], it)
for m in it:
start = m.start()
e, s = m.group(2) or , m.group(3)
escaped = e.count(self._ESCAPE) % 2 != 0
if escaped:
buf += string[pos:m.end(2)-1] + s
escapeflag = True
else:
buf += string[pos:m.start(3)]
if buf:
yield buf, escapeflag
buf =
escapeflag = False
if lm == start:
yield , False
yield s, False
lm = m.end()
pos = m.end()
if buf:
yield buf, escapeflag
escapeflag = False
if pos < len(string):
yield string[pos:], False |
def set_status_return_level(self, srl_for_id, **kwargs):
convert = kwargs[] if in kwargs else self._convert
if convert:
srl_for_id = dict(zip(srl_for_id.keys(),
[(, , ).index(s) for s in srl_for_id.values()]))
self._set_status_return_level(srl_for_id, convert=False) | Sets status return level to the specified motors. | ### Input:
Sets status return level to the specified motors.
### Response:
def set_status_return_level(self, srl_for_id, **kwargs):
convert = kwargs[] if in kwargs else self._convert
if convert:
srl_for_id = dict(zip(srl_for_id.keys(),
[(, , ).index(s) for s in srl_for_id.values()]))
self._set_status_return_level(srl_for_id, convert=False) |
def parent_folder(path, base=None):
return path and os.path.dirname(resolved_path(path, base=base)) | :param str|None path: Path to file or folder
:param str|None base: Base folder to use for relative paths (default: current working dir)
:return str: Absolute path of parent folder of 'path' | ### Input:
:param str|None path: Path to file or folder
:param str|None base: Base folder to use for relative paths (default: current working dir)
:return str: Absolute path of parent folder of 'path'
### Response:
def parent_folder(path, base=None):
return path and os.path.dirname(resolved_path(path, base=base)) |
def postComponents(self, name, status, **kwargs):
kwargs[] = name
kwargs[] = status
return self.__postRequest(, kwargs) | Create a new component.
:param name: Name of the component
:param status: Status of the component; 1-4
:param description: (optional) Description of the component
:param link: (optional) A hyperlink to the component
:param order: (optional) Order of the component
:param group_id: (optional) The group id that the component is within
:param enabled: (optional)
:return: :class:`Response <Response>` object
:rtype: requests.Response | ### Input:
Create a new component.
:param name: Name of the component
:param status: Status of the component; 1-4
:param description: (optional) Description of the component
:param link: (optional) A hyperlink to the component
:param order: (optional) Order of the component
:param group_id: (optional) The group id that the component is within
:param enabled: (optional)
:return: :class:`Response <Response>` object
:rtype: requests.Response
### Response:
def postComponents(self, name, status, **kwargs):
kwargs[] = name
kwargs[] = status
return self.__postRequest(, kwargs) |
def goback(self,days = 1):
for i in xrange(days):
self.raw_data.pop()
self.data_date.pop()
self.stock_range.pop()
self.stock_vol.pop()
self.stock_open.pop()
self.stock_h.pop()
self.stock_l.pop() | Go back days
刪除最新天數資料數據
days 代表刪除多少天數(倒退幾天) | ### Input:
Go back days
刪除最新天數資料數據
days 代表刪除多少天數(倒退幾天)
### Response:
def goback(self,days = 1):
for i in xrange(days):
self.raw_data.pop()
self.data_date.pop()
self.stock_range.pop()
self.stock_vol.pop()
self.stock_open.pop()
self.stock_h.pop()
self.stock_l.pop() |
def check_is_undeclared(self, id_, lineno, classname=,
scope=None, show_error=False):
result = self.get_entry(id_, scope)
if result is None or not result.declared:
return True
if scope is None:
scope = self.current_scope
if show_error:
syntax_error(lineno,
%
(classname, id_, self.table[scope][id_].filename,
self.table[scope][id_].lineno))
return False | The reverse of the above.
Check the given identifier is not already declared. Returns True
if OK, False otherwise. | ### Input:
The reverse of the above.
Check the given identifier is not already declared. Returns True
if OK, False otherwise.
### Response:
def check_is_undeclared(self, id_, lineno, classname=,
scope=None, show_error=False):
result = self.get_entry(id_, scope)
if result is None or not result.declared:
return True
if scope is None:
scope = self.current_scope
if show_error:
syntax_error(lineno,
%
(classname, id_, self.table[scope][id_].filename,
self.table[scope][id_].lineno))
return False |
def find_chimera_indices(G):
try:
nlist = sorted(G.nodes)
except TypeError:
nlist = G.nodes()
n_nodes = len(nlist)
chimera_indices = {}
coloring = color(G)
if coloring[nlist[0]] == 1:
coloring = {v: 1 - coloring[v] for v in coloring}
dia = diameter(G)
if dia == 2:
shore_indices = [0, 0]
for v in nlist:
u = coloring[v]
chimera_indices[v] = (0, 0, u, shore_indices[u])
shore_indices[u] += 1
return chimera_indices
raise Exception() | Attempts to determine the Chimera indices of the nodes in graph G.
See the `chimera_graph()` function for a definition of a Chimera graph and Chimera
indices.
Parameters
----------
G : NetworkX graph
Should be a single-tile Chimera graph.
Returns
-------
chimera_indices : dict
A dict of the form {node: (i, j, u, k), ...} where (i, j, u, k)
is a 4-tuple of integer Chimera indices.
Examples
--------
>>> G = dnx.chimera_graph(1, 1, 4)
>>> chimera_indices = dnx.find_chimera_indices(G)
>>> G = nx.Graph()
>>> G.add_edges_from([(0, 2), (1, 2), (1, 3), (0, 3)])
>>> chimera_indices = dnx.find_chimera_indices(G)
>>> nx.set_node_attributes(G, chimera_indices, 'chimera_index') | ### Input:
Attempts to determine the Chimera indices of the nodes in graph G.
See the `chimera_graph()` function for a definition of a Chimera graph and Chimera
indices.
Parameters
----------
G : NetworkX graph
Should be a single-tile Chimera graph.
Returns
-------
chimera_indices : dict
A dict of the form {node: (i, j, u, k), ...} where (i, j, u, k)
is a 4-tuple of integer Chimera indices.
Examples
--------
>>> G = dnx.chimera_graph(1, 1, 4)
>>> chimera_indices = dnx.find_chimera_indices(G)
>>> G = nx.Graph()
>>> G.add_edges_from([(0, 2), (1, 2), (1, 3), (0, 3)])
>>> chimera_indices = dnx.find_chimera_indices(G)
>>> nx.set_node_attributes(G, chimera_indices, 'chimera_index')
### Response:
def find_chimera_indices(G):
try:
nlist = sorted(G.nodes)
except TypeError:
nlist = G.nodes()
n_nodes = len(nlist)
chimera_indices = {}
coloring = color(G)
if coloring[nlist[0]] == 1:
coloring = {v: 1 - coloring[v] for v in coloring}
dia = diameter(G)
if dia == 2:
shore_indices = [0, 0]
for v in nlist:
u = coloring[v]
chimera_indices[v] = (0, 0, u, shore_indices[u])
shore_indices[u] += 1
return chimera_indices
raise Exception() |
def append(self, value):
if self.asserted:
raise RuntimeError("Fact already asserted")
self._multifield.append(value) | Append an element to the fact. | ### Input:
Append an element to the fact.
### Response:
def append(self, value):
if self.asserted:
raise RuntimeError("Fact already asserted")
self._multifield.append(value) |
def klm(p, q):
p, q = flatten(p), flatten(q)
return max(abs(p * np.nan_to_num(np.log(p / q)))) | Compute the KLM divergence. | ### Input:
Compute the KLM divergence.
### Response:
def klm(p, q):
p, q = flatten(p), flatten(q)
return max(abs(p * np.nan_to_num(np.log(p / q)))) |
def do_print_inbox_count(parser, token):
bits = token.contents.split()
if len(bits) > 1:
if len(bits) != 3:
raise TemplateSyntaxError("inbox_count tag takes either no arguments or exactly two arguments")
if bits[1] != :
raise TemplateSyntaxError("first argument to inbox_count tag must be ")
return InboxOutput(bits[2])
else:
return InboxOutput() | A templatetag to show the unread-count for a logged in user.
Returns the number of unread messages in the user's inbox.
Usage::
{% load inbox %}
{% inbox_count %}
{# or assign the value to a variable: #}
{% inbox_count as my_var %}
{{ my_var }} | ### Input:
A templatetag to show the unread-count for a logged in user.
Returns the number of unread messages in the user's inbox.
Usage::
{% load inbox %}
{% inbox_count %}
{# or assign the value to a variable: #}
{% inbox_count as my_var %}
{{ my_var }}
### Response:
def do_print_inbox_count(parser, token):
bits = token.contents.split()
if len(bits) > 1:
if len(bits) != 3:
raise TemplateSyntaxError("inbox_count tag takes either no arguments or exactly two arguments")
if bits[1] != :
raise TemplateSyntaxError("first argument to inbox_count tag must be ")
return InboxOutput(bits[2])
else:
return InboxOutput() |
def signature(name: str) -> Optional[Tuple]:
return url_signature(name) if is_url(name) else file_signature(name) if is_file(name) else None | Return the file or URL signature for name
:param name:
:return: | ### Input:
Return the file or URL signature for name
:param name:
:return:
### Response:
def signature(name: str) -> Optional[Tuple]:
return url_signature(name) if is_url(name) else file_signature(name) if is_file(name) else None |
def dataframe(self, filtered_dims={}, unstack=False, df_class=None, add_code=False):
measure = self.table.column(measure)
p_dim = self.table.column(p_dim)
assert measure
assert p_dim
if s_dim:
s_dim = self.table.column(s_dim)
from six import text_type
def maybe_quote(v):
from six import string_types
if isinstance(v, string_types):
return .format(v)
else:
return v
all_dims = [p_dim.name] + filtered_dims.keys()
if s_dim:
all_dims.append(s_dim.name)
if filtered_dims:
all_dims += filtered_dims.keys()
all_dims = [text_type(c) for c in all_dims]
primary_dims = [text_type(c.name) for c in self.primary_dimensions]
if set(all_dims) != set(primary_dims):
raise ValueError("The primary, secondary and filtered dimensions must cover all dimensions" +
" {} != {}".format(sorted(all_dims), sorted(primary_dims)))
columns = []
p_dim_label = None
s_dim_label = None
if p_dim.label:
if p_dim.type_is_gvid:
columns.append(p_dim.name)
p_dim = p_dim_label = p_dim.label
columns.append(p_dim_label.name)
else:
columns.append(p_dim.name)
if s_dim:
if s_dim.label:
s_dim = s_dim_label = s_dim.label
columns.append(s_dim_label.name)
else:
columns.append(s_dim.name)
columns.append(measure.name)
if filtered_dims:
code = .join("row.{} == {}".format(k, maybe_quote(v)) for k, v in filtered_dims.items())
predicate = eval(.format(code))
else:
predicate = lambda row: True
df = self.analysis.dataframe(predicate, columns=columns, df_class=df_class)
if unstack:
if s_dim:
df = df.set_index([p_dim.name, s_dim.name])
df = df.unstack()
df.columns = df.columns.get_level_values(1)
else:
df = df.set_index(p_dim.name)
df.reset_index()
return df | Yield rows in a reduced format, with one dimension as an index, one measure column per
secondary dimension, and all other dimensions filtered.
:param measure: The column names of one or more measures
:param p_dim: The primary dimension. This will be the index of the dataframe.
:param s_dim: a secondary dimension. The returned frame will be unstacked on this dimension
:param unstack:
:param filtered_dims: A dict of dimension columns names that are filtered, mapped to the dimension value
to select.
:param add_code: When substitution a label for a column, also add the code value.
:return: | ### Input:
Yield rows in a reduced format, with one dimension as an index, one measure column per
secondary dimension, and all other dimensions filtered.
:param measure: The column names of one or more measures
:param p_dim: The primary dimension. This will be the index of the dataframe.
:param s_dim: a secondary dimension. The returned frame will be unstacked on this dimension
:param unstack:
:param filtered_dims: A dict of dimension columns names that are filtered, mapped to the dimension value
to select.
:param add_code: When substitution a label for a column, also add the code value.
:return:
### Response:
def dataframe(self, filtered_dims={}, unstack=False, df_class=None, add_code=False):
measure = self.table.column(measure)
p_dim = self.table.column(p_dim)
assert measure
assert p_dim
if s_dim:
s_dim = self.table.column(s_dim)
from six import text_type
def maybe_quote(v):
from six import string_types
if isinstance(v, string_types):
return .format(v)
else:
return v
all_dims = [p_dim.name] + filtered_dims.keys()
if s_dim:
all_dims.append(s_dim.name)
if filtered_dims:
all_dims += filtered_dims.keys()
all_dims = [text_type(c) for c in all_dims]
primary_dims = [text_type(c.name) for c in self.primary_dimensions]
if set(all_dims) != set(primary_dims):
raise ValueError("The primary, secondary and filtered dimensions must cover all dimensions" +
" {} != {}".format(sorted(all_dims), sorted(primary_dims)))
columns = []
p_dim_label = None
s_dim_label = None
if p_dim.label:
if p_dim.type_is_gvid:
columns.append(p_dim.name)
p_dim = p_dim_label = p_dim.label
columns.append(p_dim_label.name)
else:
columns.append(p_dim.name)
if s_dim:
if s_dim.label:
s_dim = s_dim_label = s_dim.label
columns.append(s_dim_label.name)
else:
columns.append(s_dim.name)
columns.append(measure.name)
if filtered_dims:
code = .join("row.{} == {}".format(k, maybe_quote(v)) for k, v in filtered_dims.items())
predicate = eval(.format(code))
else:
predicate = lambda row: True
df = self.analysis.dataframe(predicate, columns=columns, df_class=df_class)
if unstack:
if s_dim:
df = df.set_index([p_dim.name, s_dim.name])
df = df.unstack()
df.columns = df.columns.get_level_values(1)
else:
df = df.set_index(p_dim.name)
df.reset_index()
return df |
def search(cls, query, search_opts=None):
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_pool(
{
: query,
: search_opts,
: AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result[] = []
result[] = search_result[]
for pool in search_result[]:
p = Pool.from_dict(pool)
result[].append(p)
return result | Search pools.
Maps to the function :py:func:`nipap.backend.Nipap.search_pool` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values. | ### Input:
Search pools.
Maps to the function :py:func:`nipap.backend.Nipap.search_pool` in
the backend. Please see the documentation for the backend function
for information regarding input arguments and return values.
### Response:
def search(cls, query, search_opts=None):
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_pool(
{
: query,
: search_opts,
: AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result[] = []
result[] = search_result[]
for pool in search_result[]:
p = Pool.from_dict(pool)
result[].append(p)
return result |
def center(self):
try:
return self._center
except AttributeError:
pass
self._center = Point()
return self._center | Center point of the ellipse, equidistant from foci, Point class.\n
Defaults to the origin. | ### Input:
Center point of the ellipse, equidistant from foci, Point class.\n
Defaults to the origin.
### Response:
def center(self):
try:
return self._center
except AttributeError:
pass
self._center = Point()
return self._center |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.