code stringlengths 70 11.9k | docstring stringlengths 4 7.08k | text stringlengths 128 15k |
|---|---|---|
def _set_edge_loop_detection(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=edge_loop_detection.edge_loop_detection, is_container=, presence=True, yang_name="edge-loop-detection", rest_name="loop-detection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: u, u: None, u: None, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__edge_loop_detection = t
if hasattr(self, ):
self._set() | Setter method for edge_loop_detection, mapped from YANG variable /protocol/edge_loop_detection (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_edge_loop_detection is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_edge_loop_detection() directly. | ### Input:
Setter method for edge_loop_detection, mapped from YANG variable /protocol/edge_loop_detection (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_edge_loop_detection is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_edge_loop_detection() directly.
### Response:
def _set_edge_loop_detection(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=edge_loop_detection.edge_loop_detection, is_container=, presence=True, yang_name="edge-loop-detection", rest_name="loop-detection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: u, u: None, u: None, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__edge_loop_detection = t
if hasattr(self, ):
self._set() |
def get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret):
auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret)
auth.set_access_token(twitter_access_token, twitter_access_token_secret)
return auth | Make a tweepy auth object | ### Input:
Make a tweepy auth object
### Response:
def get_tweepy_auth(twitter_api_key,
twitter_api_secret,
twitter_access_token,
twitter_access_token_secret):
auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret)
auth.set_access_token(twitter_access_token, twitter_access_token_secret)
return auth |
def __handle_dropped_content(self, event):
if not event.mimeData().hasUrls():
return
urls = event.mimeData().urls()
self.__engine.start_processing("Loading Files ...", len(urls))
for url in event.mimeData().urls():
path = foundations.strings.to_string(url.path())
LOGGER.debug("> Handling dropped file.".format(path))
path = (platform.system() == "Windows" or platform.system() == "Microsoft") and \
re.search(r"^\/[A-Z]:", path) and path[1:] or path
self.load_path(path) and self.restore_development_layout()
self.__engine.step_processing()
self.__engine.stop_processing() | Handles dopped content event.
:param event: Content dropped event.
:type event: QEvent | ### Input:
Handles dopped content event.
:param event: Content dropped event.
:type event: QEvent
### Response:
def __handle_dropped_content(self, event):
if not event.mimeData().hasUrls():
return
urls = event.mimeData().urls()
self.__engine.start_processing("Loading Files ...", len(urls))
for url in event.mimeData().urls():
path = foundations.strings.to_string(url.path())
LOGGER.debug("> Handling dropped file.".format(path))
path = (platform.system() == "Windows" or platform.system() == "Microsoft") and \
re.search(r"^\/[A-Z]:", path) and path[1:] or path
self.load_path(path) and self.restore_development_layout()
self.__engine.step_processing()
self.__engine.stop_processing() |
def finalize(self):
for i in range(1, len(self._local_counts)):
self.counts[i].append(self._local_counts[i])
self.counts.pop(0)
for i in range(len(self.counts)):
self.counts[i] = np.array(self.counts[i]) | This will add the very last document to counts. We also get rid of counts[0] since that
represents document level which doesnt come under anything else. We also convert all count
values to numpy arrays so that stats can be computed easily. | ### Input:
This will add the very last document to counts. We also get rid of counts[0] since that
represents document level which doesnt come under anything else. We also convert all count
values to numpy arrays so that stats can be computed easily.
### Response:
def finalize(self):
for i in range(1, len(self._local_counts)):
self.counts[i].append(self._local_counts[i])
self.counts.pop(0)
for i in range(len(self.counts)):
self.counts[i] = np.array(self.counts[i]) |
def from_events(self, instance, ev_args, ctx):
attrs = ev_args[2]
if attrs and self.attr_policy == UnknownAttrPolicy.FAIL:
raise ValueError("unexpected attribute (at text only node)")
parts = []
while True:
ev_type, *ev_args = yield
if ev_type == "text":
parts.append(ev_args[0])
elif ev_type == "start":
yield from enforce_unknown_child_policy(
self.child_policy,
ev_args)
elif ev_type == "end":
break
joined = "".join(parts)
try:
parsed = self.type_.parse(joined)
except (ValueError, TypeError):
if self.erroneous_as_absent:
return
raise
self._set_from_recv(instance, parsed) | Starting with the element to which the start event information in
`ev_args` belongs, parse text data. If any children are encountered,
:attr:`child_policy` is enforced (see
:class:`UnknownChildPolicy`). Likewise, if the start event contains
attributes, :attr:`attr_policy` is enforced
(c.f. :class:`UnknownAttrPolicy`).
The extracted text is passed through :attr:`type_` and
:attr:`validator` and if it passes, stored in the attribute on the
`instance` with which the property is associated.
This method is suspendable. | ### Input:
Starting with the element to which the start event information in
`ev_args` belongs, parse text data. If any children are encountered,
:attr:`child_policy` is enforced (see
:class:`UnknownChildPolicy`). Likewise, if the start event contains
attributes, :attr:`attr_policy` is enforced
(c.f. :class:`UnknownAttrPolicy`).
The extracted text is passed through :attr:`type_` and
:attr:`validator` and if it passes, stored in the attribute on the
`instance` with which the property is associated.
This method is suspendable.
### Response:
def from_events(self, instance, ev_args, ctx):
attrs = ev_args[2]
if attrs and self.attr_policy == UnknownAttrPolicy.FAIL:
raise ValueError("unexpected attribute (at text only node)")
parts = []
while True:
ev_type, *ev_args = yield
if ev_type == "text":
parts.append(ev_args[0])
elif ev_type == "start":
yield from enforce_unknown_child_policy(
self.child_policy,
ev_args)
elif ev_type == "end":
break
joined = "".join(parts)
try:
parsed = self.type_.parse(joined)
except (ValueError, TypeError):
if self.erroneous_as_absent:
return
raise
self._set_from_recv(instance, parsed) |
def gate(self, gate, ID=None, apply_now=True):
def func(well):
return well.gate(gate, apply_now=apply_now)
return self.apply(func, output_format=, ID=ID) | Applies the gate to each Measurement in the Collection, returning a new Collection with gated data.
{_containers_held_in_memory_warning}
Parameters
----------
gate : {_gate_available_classes}
ID : [ str, numeric, None]
New ID to be given to the output. If None, the ID of the current collection will be used. | ### Input:
Applies the gate to each Measurement in the Collection, returning a new Collection with gated data.
{_containers_held_in_memory_warning}
Parameters
----------
gate : {_gate_available_classes}
ID : [ str, numeric, None]
New ID to be given to the output. If None, the ID of the current collection will be used.
### Response:
def gate(self, gate, ID=None, apply_now=True):
def func(well):
return well.gate(gate, apply_now=apply_now)
return self.apply(func, output_format=, ID=ID) |
def log_level(self, subsystem, level, **kwargs):
r
args = (subsystem, level)
return self._client.request(, args,
decoder=, **kwargs) | r"""Changes the logging output of a running daemon.
.. code-block:: python
>>> c.log_level("path", "info")
{'Message': "Changed log level of 'path' to 'info'\n"}
Parameters
----------
subsystem : str
The subsystem logging identifier (Use ``"all"`` for all subsystems)
level : str
The desired logging level. Must be one of:
* ``"debug"``
* ``"info"``
* ``"warning"``
* ``"error"``
* ``"fatal"``
* ``"panic"``
Returns
-------
dict : Status message | ### Input:
r"""Changes the logging output of a running daemon.
.. code-block:: python
>>> c.log_level("path", "info")
{'Message': "Changed log level of 'path' to 'info'\n"}
Parameters
----------
subsystem : str
The subsystem logging identifier (Use ``"all"`` for all subsystems)
level : str
The desired logging level. Must be one of:
* ``"debug"``
* ``"info"``
* ``"warning"``
* ``"error"``
* ``"fatal"``
* ``"panic"``
Returns
-------
dict : Status message
### Response:
def log_level(self, subsystem, level, **kwargs):
r
args = (subsystem, level)
return self._client.request(, args,
decoder=, **kwargs) |
def gatk_indel_realignment_cl(runner, align_bam, ref_file, intervals,
tmp_dir, region=None, deep_coverage=False,
known_vrns=None):
if not known_vrns:
known_vrns = {}
params = ["-T", "IndelRealigner",
"-I", align_bam,
"-R", ref_file,
"-targetIntervals", intervals,
]
if region:
params += ["-L", region]
if known_vrns.get("train_indels"):
params += ["--knownAlleles", known_vrns["train_indels"]]
if deep_coverage:
params += ["--maxReadsInMemory", "300000",
"--maxReadsForRealignment", str(int(5e5)),
"--maxReadsForConsensuses", "500",
"--maxConsensuses", "100"]
return runner.cl_gatk(params, tmp_dir) | Prepare input arguments for GATK indel realignment. | ### Input:
Prepare input arguments for GATK indel realignment.
### Response:
def gatk_indel_realignment_cl(runner, align_bam, ref_file, intervals,
tmp_dir, region=None, deep_coverage=False,
known_vrns=None):
if not known_vrns:
known_vrns = {}
params = ["-T", "IndelRealigner",
"-I", align_bam,
"-R", ref_file,
"-targetIntervals", intervals,
]
if region:
params += ["-L", region]
if known_vrns.get("train_indels"):
params += ["--knownAlleles", known_vrns["train_indels"]]
if deep_coverage:
params += ["--maxReadsInMemory", "300000",
"--maxReadsForRealignment", str(int(5e5)),
"--maxReadsForConsensuses", "500",
"--maxConsensuses", "100"]
return runner.cl_gatk(params, tmp_dir) |
def starts_expanded(name):
if name is :
return True
l = name.split(dir_sep())
if len(l) > len(_initial_cwd):
return False
if l != _initial_cwd[:len(l)]:
return False
return True | Return True if directory is a parent of initial cwd. | ### Input:
Return True if directory is a parent of initial cwd.
### Response:
def starts_expanded(name):
if name is :
return True
l = name.split(dir_sep())
if len(l) > len(_initial_cwd):
return False
if l != _initial_cwd[:len(l)]:
return False
return True |
def writeRoot(self, root):
output = self.header
wrapped_root = self.wrapRoot(root)
self.computeOffsets(wrapped_root, asReference=True, isRoot=True)
self.trailer = self.trailer._replace(**{:self.intSize(len(self.computedUniques))})
self.writeObjectReference(wrapped_root, output)
output = self.writeObject(wrapped_root, output, setReferencePosition=True)
self.trailer = self.trailer._replace(**{
:self.intSize(len(output)),
:len(self.computedUniques),
:len(output),
:0
})
output = self.writeOffsetTable(output)
output += pack(, *self.trailer)
self.file.write(output) | Strategy is:
- write header
- wrap root object so everything is hashable
- compute size of objects which will be written
- need to do this in order to know how large the object refs
will be in the list/dict/set reference lists
- write objects
- keep objects in writtenReferences
- keep positions of object references in referencePositions
- write object references with the length computed previously
- computer object reference length
- write object reference positions
- write trailer | ### Input:
Strategy is:
- write header
- wrap root object so everything is hashable
- compute size of objects which will be written
- need to do this in order to know how large the object refs
will be in the list/dict/set reference lists
- write objects
- keep objects in writtenReferences
- keep positions of object references in referencePositions
- write object references with the length computed previously
- computer object reference length
- write object reference positions
- write trailer
### Response:
def writeRoot(self, root):
output = self.header
wrapped_root = self.wrapRoot(root)
self.computeOffsets(wrapped_root, asReference=True, isRoot=True)
self.trailer = self.trailer._replace(**{:self.intSize(len(self.computedUniques))})
self.writeObjectReference(wrapped_root, output)
output = self.writeObject(wrapped_root, output, setReferencePosition=True)
self.trailer = self.trailer._replace(**{
:self.intSize(len(output)),
:len(self.computedUniques),
:len(output),
:0
})
output = self.writeOffsetTable(output)
output += pack(, *self.trailer)
self.file.write(output) |
def get_match_details(self, match_id=None, **kwargs):
if not in kwargs:
kwargs[] = match_id
url = self.__build_url(urls.GET_MATCH_DETAILS, **kwargs)
req = self.executor(url)
if self.logger:
self.logger.info(.format(url))
if not self.__check_http_err(req.status_code):
return response.build(req, url, self.raw_mode) | Returns a dictionary containing the details for a Dota 2 match
:param match_id: (int, optional)
:return: dictionary of matches, see :doc:`responses </responses>` | ### Input:
Returns a dictionary containing the details for a Dota 2 match
:param match_id: (int, optional)
:return: dictionary of matches, see :doc:`responses </responses>`
### Response:
def get_match_details(self, match_id=None, **kwargs):
if not in kwargs:
kwargs[] = match_id
url = self.__build_url(urls.GET_MATCH_DETAILS, **kwargs)
req = self.executor(url)
if self.logger:
self.logger.info(.format(url))
if not self.__check_http_err(req.status_code):
return response.build(req, url, self.raw_mode) |
def trellis_plot(self,fsize=(6,4)):
branches_from = self.branches
plt.figure(figsize=fsize)
plt.plot(0,0,)
plt.axis([-0.01, 1.01, -(self.Nstates-1)-0.05, 0.05])
for m in range(self.Nstates):
if branches_from.input1[m] == 0:
plt.plot([0, 1],[-branches_from.states1[m], -m],)
plt.plot([0, 1],[-branches_from.states1[m], -m],)
if branches_from.input2[m] == 0:
plt.plot([0, 1],[-branches_from.states2[m], -m],)
plt.plot([0, 1],[-branches_from.states2[m], -m],)
if branches_from.input1[m] == 1:
plt.plot([0, 1],[-branches_from.states1[m], -m],)
plt.plot([0, 1],[-branches_from.states1[m], -m],)
if branches_from.input2[m] == 1:
plt.plot([0, 1],[-branches_from.states2[m], -m],)
plt.plot([0, 1],[-branches_from.states2[m], -m],)
plt.xlabel()
plt.ylabel()
msg = %(self.rate, int(np.ceil(np.log2(self.Nstates)+1)))
plt.title(msg) | Plots a trellis diagram of the possible state transitions.
Parameters
----------
fsize : Plot size for matplotlib.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm.fec_conv import fec_conv
>>> cc = fec_conv()
>>> cc.trellis_plot()
>>> plt.show() | ### Input:
Plots a trellis diagram of the possible state transitions.
Parameters
----------
fsize : Plot size for matplotlib.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm.fec_conv import fec_conv
>>> cc = fec_conv()
>>> cc.trellis_plot()
>>> plt.show()
### Response:
def trellis_plot(self,fsize=(6,4)):
branches_from = self.branches
plt.figure(figsize=fsize)
plt.plot(0,0,)
plt.axis([-0.01, 1.01, -(self.Nstates-1)-0.05, 0.05])
for m in range(self.Nstates):
if branches_from.input1[m] == 0:
plt.plot([0, 1],[-branches_from.states1[m], -m],)
plt.plot([0, 1],[-branches_from.states1[m], -m],)
if branches_from.input2[m] == 0:
plt.plot([0, 1],[-branches_from.states2[m], -m],)
plt.plot([0, 1],[-branches_from.states2[m], -m],)
if branches_from.input1[m] == 1:
plt.plot([0, 1],[-branches_from.states1[m], -m],)
plt.plot([0, 1],[-branches_from.states1[m], -m],)
if branches_from.input2[m] == 1:
plt.plot([0, 1],[-branches_from.states2[m], -m],)
plt.plot([0, 1],[-branches_from.states2[m], -m],)
plt.xlabel()
plt.ylabel()
msg = %(self.rate, int(np.ceil(np.log2(self.Nstates)+1)))
plt.title(msg) |
def remove_namespace(doc, namespace):
ns = u % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
elem.attrib[] = namespace | Remove namespace in the passed document in place. | ### Input:
Remove namespace in the passed document in place.
### Response:
def remove_namespace(doc, namespace):
ns = u % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
elem.attrib[] = namespace |
def unique(self):
seen = set()
seen_add = seen.add
return Collection([x for x in self._items if not (x in seen or seen_add(x))]) | Return only unique items from the collection list.
:rtype: Collection | ### Input:
Return only unique items from the collection list.
:rtype: Collection
### Response:
def unique(self):
seen = set()
seen_add = seen.add
return Collection([x for x in self._items if not (x in seen or seen_add(x))]) |
def data_csv(request, measurement_list):
response = HttpResponse(content_type=)
response[] =
writer = csv.writer(response)
writer.writerow(["Animal", "Genotype", "Gender","Assay", "Value","Strain", "Background","Age", "Cage", "Feeding", "Treatment"])
for measurement in measurement_list:
writer.writerow([
measurement.animal,
measurement.animal.Genotype,
measurement.animal.Gender,
measurement.assay,
measurement.values.split()[0],
measurement.animal.Strain,
measurement.animal.Background,
measurement.age(),
measurement.animal.Cage,
measurement.experiment.feeding_state,
measurement.animal.treatment_set.all(),
])
return response | This view generates a csv output of all data for a strain.
For this function to work, you have to provide the filtered set of measurements. | ### Input:
This view generates a csv output of all data for a strain.
For this function to work, you have to provide the filtered set of measurements.
### Response:
def data_csv(request, measurement_list):
response = HttpResponse(content_type=)
response[] =
writer = csv.writer(response)
writer.writerow(["Animal", "Genotype", "Gender","Assay", "Value","Strain", "Background","Age", "Cage", "Feeding", "Treatment"])
for measurement in measurement_list:
writer.writerow([
measurement.animal,
measurement.animal.Genotype,
measurement.animal.Gender,
measurement.assay,
measurement.values.split()[0],
measurement.animal.Strain,
measurement.animal.Background,
measurement.age(),
measurement.animal.Cage,
measurement.experiment.feeding_state,
measurement.animal.treatment_set.all(),
])
return response |
def centers_of_labels(labels):
max_labels = np.max(labels)
if max_labels == 0:
return np.zeros((2,0),int)
result = scind.center_of_mass(np.ones(labels.shape),
labels,
np.arange(max_labels)+1)
result = np.array(result)
if result.ndim == 1:
result.shape = (2,1)
return result
return result.transpose() | Return the i,j coordinates of the centers of a labels matrix
The result returned is an 2 x n numpy array where n is the number
of the label minus one, result[0,x] is the i coordinate of the center
and result[x,1] is the j coordinate of the center.
You can unpack the result as "i,j = centers_of_labels(labels)" | ### Input:
Return the i,j coordinates of the centers of a labels matrix
The result returned is an 2 x n numpy array where n is the number
of the label minus one, result[0,x] is the i coordinate of the center
and result[x,1] is the j coordinate of the center.
You can unpack the result as "i,j = centers_of_labels(labels)"
### Response:
def centers_of_labels(labels):
max_labels = np.max(labels)
if max_labels == 0:
return np.zeros((2,0),int)
result = scind.center_of_mass(np.ones(labels.shape),
labels,
np.arange(max_labels)+1)
result = np.array(result)
if result.ndim == 1:
result.shape = (2,1)
return result
return result.transpose() |
def salt_main():
import salt.cli.salt
if in sys.path:
sys.path.remove()
client = salt.cli.salt.SaltCMD()
_install_signal_handlers(client)
client.run() | Publish commands to the salt system from the command line on the
master. | ### Input:
Publish commands to the salt system from the command line on the
master.
### Response:
def salt_main():
import salt.cli.salt
if in sys.path:
sys.path.remove()
client = salt.cli.salt.SaltCMD()
_install_signal_handlers(client)
client.run() |
def EnablePlugins(self, plugin_includes):
super(SyslogParser, self).EnablePlugins(plugin_includes)
self._plugin_by_reporter = {}
for plugin in self._plugins:
self._plugin_by_reporter[plugin.REPORTER] = plugin | Enables parser plugins.
Args:
plugin_includes (list[str]): names of the plugins to enable, where None
or an empty list represents all plugins. Note that the default plugin
is handled separately. | ### Input:
Enables parser plugins.
Args:
plugin_includes (list[str]): names of the plugins to enable, where None
or an empty list represents all plugins. Note that the default plugin
is handled separately.
### Response:
def EnablePlugins(self, plugin_includes):
super(SyslogParser, self).EnablePlugins(plugin_includes)
self._plugin_by_reporter = {}
for plugin in self._plugins:
self._plugin_by_reporter[plugin.REPORTER] = plugin |
def prepare_headers(self, headers, cache_info=None):
if self.use_advanced and cache_info:
hkeys = headers.keys()
if cache_info.access_time and "If-Modified-Since" not in hkeys:
headers[] = cache_info.access_time.strftime(
"%a, %d %b %Y %H:%M:%S GMT"
)
if cache_info.etag and "If-None-Match" not in hkeys:
headers[] = cache_info.etag
return headers | Prepare headers object for request (add cache information
:param headers: Headers object
:type headers: dict
:param cache_info: Cache information to add
:type cache_info: floscraper.models.CacheInfo
:return: Prepared headers
:rtype: dict | ### Input:
Prepare headers object for request (add cache information
:param headers: Headers object
:type headers: dict
:param cache_info: Cache information to add
:type cache_info: floscraper.models.CacheInfo
:return: Prepared headers
:rtype: dict
### Response:
def prepare_headers(self, headers, cache_info=None):
if self.use_advanced and cache_info:
hkeys = headers.keys()
if cache_info.access_time and "If-Modified-Since" not in hkeys:
headers[] = cache_info.access_time.strftime(
"%a, %d %b %Y %H:%M:%S GMT"
)
if cache_info.etag and "If-None-Match" not in hkeys:
headers[] = cache_info.etag
return headers |
def update_config(config):
update(bigchaindb.config, update_types(config, bigchaindb.config))
bigchaindb.config[] = True | Update bigchaindb.config with whatever is in the provided config dict,
and then set bigchaindb.config['CONFIGURED'] = True
Args:
config (dict): the config dict to read for changes
to the default config | ### Input:
Update bigchaindb.config with whatever is in the provided config dict,
and then set bigchaindb.config['CONFIGURED'] = True
Args:
config (dict): the config dict to read for changes
to the default config
### Response:
def update_config(config):
update(bigchaindb.config, update_types(config, bigchaindb.config))
bigchaindb.config[] = True |
def next_message(self):
msg = self.msg
if msg is None:
self.paused = True
if self.paused:
self.root.after(100, self.next_message)
return
try:
speed = float(self.playback.get())
except:
speed = 0.0
timestamp = getattr(msg, )
now = time.strftime("%H:%M:%S", time.localtime(timestamp))
self.clock.configure(text=now)
if speed == 0.0:
self.root.after(200, self.next_message)
else:
self.root.after(int(1000*(timestamp - self.last_timestamp) / speed), self.next_message)
self.last_timestamp = timestamp
while True:
self.msg = self.mlog.recv_match(condition=args.condition)
if self.msg is None and self.mlog.f.tell() > self.filesize - 10:
self.paused = True
return
if self.msg is not None and self.msg.get_type() != "BAD_DATA":
break
pos = float(self.mlog.f.tell()) / self.filesize
self.slider.set(pos)
self.filepos = self.slider.get()
if msg.get_type() != "BAD_DATA":
for m in self.mout:
m.write(msg.get_msgbuf())
if msg.get_type() == "GPS_RAW":
self.fdm.set(, msg.lat, units=)
self.fdm.set(, msg.lon, units=)
if args.gpsalt:
self.fdm.set(, msg.alt, units=)
if msg.get_type() == "GPS_RAW_INT":
self.fdm.set(, msg.lat/1.0e7, units=)
self.fdm.set(, msg.lon/1.0e7, units=)
if args.gpsalt:
self.fdm.set(, msg.alt/1.0e3, units=)
if msg.get_type() == "VFR_HUD":
if not args.gpsalt:
self.fdm.set(, msg.alt, units=)
self.fdm.set(, 1)
self.fdm.set(, msg.airspeed, units=)
if msg.get_type() == "ATTITUDE":
self.fdm.set(, msg.roll, units=)
self.fdm.set(, msg.pitch, units=)
self.fdm.set(, msg.yaw, units=)
self.fdm.set(, msg.rollspeed, units=)
self.fdm.set(, msg.pitchspeed, units=)
self.fdm.set(, msg.yawspeed, units=)
if msg.get_type() == "RC_CHANNELS_SCALED":
self.fdm.set("right_aileron", msg.chan1_scaled*0.0001)
self.fdm.set("left_aileron", -msg.chan1_scaled*0.0001)
self.fdm.set("rudder", msg.chan4_scaled*0.0001)
self.fdm.set("elevator", msg.chan2_scaled*0.0001)
self.fdm.set(, msg.chan3_scaled*0.01)
if msg.get_type() == :
print("APM: %s" % msg.text)
if msg.get_type() == :
self.flightmode.configure(text=self.mlog.flightmode)
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
if self.fdm.get() != 0:
for f in self.fgout:
f.write(self.fdm.pack()) | called as each msg is ready | ### Input:
called as each msg is ready
### Response:
def next_message(self):
msg = self.msg
if msg is None:
self.paused = True
if self.paused:
self.root.after(100, self.next_message)
return
try:
speed = float(self.playback.get())
except:
speed = 0.0
timestamp = getattr(msg, )
now = time.strftime("%H:%M:%S", time.localtime(timestamp))
self.clock.configure(text=now)
if speed == 0.0:
self.root.after(200, self.next_message)
else:
self.root.after(int(1000*(timestamp - self.last_timestamp) / speed), self.next_message)
self.last_timestamp = timestamp
while True:
self.msg = self.mlog.recv_match(condition=args.condition)
if self.msg is None and self.mlog.f.tell() > self.filesize - 10:
self.paused = True
return
if self.msg is not None and self.msg.get_type() != "BAD_DATA":
break
pos = float(self.mlog.f.tell()) / self.filesize
self.slider.set(pos)
self.filepos = self.slider.get()
if msg.get_type() != "BAD_DATA":
for m in self.mout:
m.write(msg.get_msgbuf())
if msg.get_type() == "GPS_RAW":
self.fdm.set(, msg.lat, units=)
self.fdm.set(, msg.lon, units=)
if args.gpsalt:
self.fdm.set(, msg.alt, units=)
if msg.get_type() == "GPS_RAW_INT":
self.fdm.set(, msg.lat/1.0e7, units=)
self.fdm.set(, msg.lon/1.0e7, units=)
if args.gpsalt:
self.fdm.set(, msg.alt/1.0e3, units=)
if msg.get_type() == "VFR_HUD":
if not args.gpsalt:
self.fdm.set(, msg.alt, units=)
self.fdm.set(, 1)
self.fdm.set(, msg.airspeed, units=)
if msg.get_type() == "ATTITUDE":
self.fdm.set(, msg.roll, units=)
self.fdm.set(, msg.pitch, units=)
self.fdm.set(, msg.yaw, units=)
self.fdm.set(, msg.rollspeed, units=)
self.fdm.set(, msg.pitchspeed, units=)
self.fdm.set(, msg.yawspeed, units=)
if msg.get_type() == "RC_CHANNELS_SCALED":
self.fdm.set("right_aileron", msg.chan1_scaled*0.0001)
self.fdm.set("left_aileron", -msg.chan1_scaled*0.0001)
self.fdm.set("rudder", msg.chan4_scaled*0.0001)
self.fdm.set("elevator", msg.chan2_scaled*0.0001)
self.fdm.set(, msg.chan3_scaled*0.01)
if msg.get_type() == :
print("APM: %s" % msg.text)
if msg.get_type() == :
self.flightmode.configure(text=self.mlog.flightmode)
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
if self.fdm.get() != 0:
for f in self.fgout:
f.write(self.fdm.pack()) |
def deluser(name, username, root=None):
*barfoo
grp_info = __salt__[](name)
try:
if username in grp_info[]:
cmd = .format(username, name)
ret = __salt__[](cmd, python_shell=False)
return not ret[]
else:
return True
except Exception:
return True | Remove a user from the group.
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo bar
Removes a member user 'bar' from a group 'foo'. If group is not present
then returns True. | ### Input:
Remove a user from the group.
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo bar
Removes a member user 'bar' from a group 'foo'. If group is not present
then returns True.
### Response:
def deluser(name, username, root=None):
*barfoo
grp_info = __salt__[](name)
try:
if username in grp_info[]:
cmd = .format(username, name)
ret = __salt__[](cmd, python_shell=False)
return not ret[]
else:
return True
except Exception:
return True |
def serial_udb_extra_f2_a_encode(self, sue_time, sue_status, sue_latitude, sue_longitude, sue_altitude, sue_waypoint_index, sue_rmat0, sue_rmat1, sue_rmat2, sue_rmat3, sue_rmat4, sue_rmat5, sue_rmat6, sue_rmat7, sue_rmat8, sue_cog, sue_sog, sue_cpu_load, sue_voltage_milis, sue_air_speed_3DIMU, sue_estimated_wind_0, sue_estimated_wind_1, sue_estimated_wind_2, sue_magFieldEarth0, sue_magFieldEarth1, sue_magFieldEarth2, sue_svs, sue_hdop):
return MAVLink_serial_udb_extra_f2_a_message(sue_time, sue_status, sue_latitude, sue_longitude, sue_altitude, sue_waypoint_index, sue_rmat0, sue_rmat1, sue_rmat2, sue_rmat3, sue_rmat4, sue_rmat5, sue_rmat6, sue_rmat7, sue_rmat8, sue_cog, sue_sog, sue_cpu_load, sue_voltage_milis, sue_air_speed_3DIMU, sue_estimated_wind_0, sue_estimated_wind_1, sue_estimated_wind_2, sue_magFieldEarth0, sue_magFieldEarth1, sue_magFieldEarth2, sue_svs, sue_hdop) | Backwards compatible MAVLink version of SERIAL_UDB_EXTRA - F2: Format
Part A
sue_time : Serial UDB Extra Time (uint32_t)
sue_status : Serial UDB Extra Status (uint8_t)
sue_latitude : Serial UDB Extra Latitude (int32_t)
sue_longitude : Serial UDB Extra Longitude (int32_t)
sue_altitude : Serial UDB Extra Altitude (int32_t)
sue_waypoint_index : Serial UDB Extra Waypoint Index (uint16_t)
sue_rmat0 : Serial UDB Extra Rmat 0 (int16_t)
sue_rmat1 : Serial UDB Extra Rmat 1 (int16_t)
sue_rmat2 : Serial UDB Extra Rmat 2 (int16_t)
sue_rmat3 : Serial UDB Extra Rmat 3 (int16_t)
sue_rmat4 : Serial UDB Extra Rmat 4 (int16_t)
sue_rmat5 : Serial UDB Extra Rmat 5 (int16_t)
sue_rmat6 : Serial UDB Extra Rmat 6 (int16_t)
sue_rmat7 : Serial UDB Extra Rmat 7 (int16_t)
sue_rmat8 : Serial UDB Extra Rmat 8 (int16_t)
sue_cog : Serial UDB Extra GPS Course Over Ground (uint16_t)
sue_sog : Serial UDB Extra Speed Over Ground (int16_t)
sue_cpu_load : Serial UDB Extra CPU Load (uint16_t)
sue_voltage_milis : Serial UDB Extra Voltage in MilliVolts (int16_t)
sue_air_speed_3DIMU : Serial UDB Extra 3D IMU Air Speed (uint16_t)
sue_estimated_wind_0 : Serial UDB Extra Estimated Wind 0 (int16_t)
sue_estimated_wind_1 : Serial UDB Extra Estimated Wind 1 (int16_t)
sue_estimated_wind_2 : Serial UDB Extra Estimated Wind 2 (int16_t)
sue_magFieldEarth0 : Serial UDB Extra Magnetic Field Earth 0 (int16_t)
sue_magFieldEarth1 : Serial UDB Extra Magnetic Field Earth 1 (int16_t)
sue_magFieldEarth2 : Serial UDB Extra Magnetic Field Earth 2 (int16_t)
sue_svs : Serial UDB Extra Number of Sattelites in View (int16_t)
sue_hdop : Serial UDB Extra GPS Horizontal Dilution of Precision (int16_t) | ### Input:
Backwards compatible MAVLink version of SERIAL_UDB_EXTRA - F2: Format
Part A
sue_time : Serial UDB Extra Time (uint32_t)
sue_status : Serial UDB Extra Status (uint8_t)
sue_latitude : Serial UDB Extra Latitude (int32_t)
sue_longitude : Serial UDB Extra Longitude (int32_t)
sue_altitude : Serial UDB Extra Altitude (int32_t)
sue_waypoint_index : Serial UDB Extra Waypoint Index (uint16_t)
sue_rmat0 : Serial UDB Extra Rmat 0 (int16_t)
sue_rmat1 : Serial UDB Extra Rmat 1 (int16_t)
sue_rmat2 : Serial UDB Extra Rmat 2 (int16_t)
sue_rmat3 : Serial UDB Extra Rmat 3 (int16_t)
sue_rmat4 : Serial UDB Extra Rmat 4 (int16_t)
sue_rmat5 : Serial UDB Extra Rmat 5 (int16_t)
sue_rmat6 : Serial UDB Extra Rmat 6 (int16_t)
sue_rmat7 : Serial UDB Extra Rmat 7 (int16_t)
sue_rmat8 : Serial UDB Extra Rmat 8 (int16_t)
sue_cog : Serial UDB Extra GPS Course Over Ground (uint16_t)
sue_sog : Serial UDB Extra Speed Over Ground (int16_t)
sue_cpu_load : Serial UDB Extra CPU Load (uint16_t)
sue_voltage_milis : Serial UDB Extra Voltage in MilliVolts (int16_t)
sue_air_speed_3DIMU : Serial UDB Extra 3D IMU Air Speed (uint16_t)
sue_estimated_wind_0 : Serial UDB Extra Estimated Wind 0 (int16_t)
sue_estimated_wind_1 : Serial UDB Extra Estimated Wind 1 (int16_t)
sue_estimated_wind_2 : Serial UDB Extra Estimated Wind 2 (int16_t)
sue_magFieldEarth0 : Serial UDB Extra Magnetic Field Earth 0 (int16_t)
sue_magFieldEarth1 : Serial UDB Extra Magnetic Field Earth 1 (int16_t)
sue_magFieldEarth2 : Serial UDB Extra Magnetic Field Earth 2 (int16_t)
sue_svs : Serial UDB Extra Number of Sattelites in View (int16_t)
sue_hdop : Serial UDB Extra GPS Horizontal Dilution of Precision (int16_t)
### Response:
def serial_udb_extra_f2_a_encode(self, sue_time, sue_status, sue_latitude, sue_longitude, sue_altitude, sue_waypoint_index, sue_rmat0, sue_rmat1, sue_rmat2, sue_rmat3, sue_rmat4, sue_rmat5, sue_rmat6, sue_rmat7, sue_rmat8, sue_cog, sue_sog, sue_cpu_load, sue_voltage_milis, sue_air_speed_3DIMU, sue_estimated_wind_0, sue_estimated_wind_1, sue_estimated_wind_2, sue_magFieldEarth0, sue_magFieldEarth1, sue_magFieldEarth2, sue_svs, sue_hdop):
return MAVLink_serial_udb_extra_f2_a_message(sue_time, sue_status, sue_latitude, sue_longitude, sue_altitude, sue_waypoint_index, sue_rmat0, sue_rmat1, sue_rmat2, sue_rmat3, sue_rmat4, sue_rmat5, sue_rmat6, sue_rmat7, sue_rmat8, sue_cog, sue_sog, sue_cpu_load, sue_voltage_milis, sue_air_speed_3DIMU, sue_estimated_wind_0, sue_estimated_wind_1, sue_estimated_wind_2, sue_magFieldEarth0, sue_magFieldEarth1, sue_magFieldEarth2, sue_svs, sue_hdop) |
def info(cls, name, get_state=True, get_pid=True):
command = [, , name]
response = subwrap.run(command)
lines = map(split_info_line, response.std_out.splitlines())
return dict(lines) | Retrieves and parses info about an LXC | ### Input:
Retrieves and parses info about an LXC
### Response:
def info(cls, name, get_state=True, get_pid=True):
command = [, , name]
response = subwrap.run(command)
lines = map(split_info_line, response.std_out.splitlines())
return dict(lines) |
def _quoteattr(self, attr):
attr = xml_safe(attr)
if isinstance(attr, unicode) and not UNICODE_STRINGS:
attr = attr.encode(self.encoding)
return saxutils.quoteattr(attr) | Escape an XML attribute. Value can be unicode. | ### Input:
Escape an XML attribute. Value can be unicode.
### Response:
def _quoteattr(self, attr):
attr = xml_safe(attr)
if isinstance(attr, unicode) and not UNICODE_STRINGS:
attr = attr.encode(self.encoding)
return saxutils.quoteattr(attr) |
def coerce_to_list(items, preprocess=None):
if not isinstance(items, list):
items = [items]
if preprocess:
items = list(map(preprocess, items))
return items | Given an instance or list, coerce to list.
With optional preprocessing. | ### Input:
Given an instance or list, coerce to list.
With optional preprocessing.
### Response:
def coerce_to_list(items, preprocess=None):
if not isinstance(items, list):
items = [items]
if preprocess:
items = list(map(preprocess, items))
return items |
def run(name,
image=None,
onlyif=None,
unless=None,
creates=None,
bg=False,
failhard=True,
replace=False,
force=False,
skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
client_timeout=salt.utils.docker.CLIENT_TIMEOUT,
**kwargs):
s
exit code.
.. note::
This has no effect if ``bg`` is set to ``True``.
replace : False
If ``True``, and if the named container already exists, this will
remove the existing container. The default behavior is to return a
``False`` result when the container already exists.
force : False
If ``True``, and the named container already exists, *and* ``replace``
is also set to ``True``, then the container will be forcibly removed.
Otherwise, the state will not proceed and will return a ``False``
result.
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
**USAGE EXAMPLE**
.. code-block:: jinja
{% set pkg_version = salt.pillar.get(, ) %}
build_package:
docker_container.run:
- image: myuser/builder:latest
- binds: /home/myuser/builds:/build_dir
- command: /scripts/build.sh {{ pkg_version }}
- creates: /home/myuser/builds/myapp-{{ pkg_version }}.noarch.rpm
- replace: True
- networks:
- mynet
- require:
- docker_network: mynet
namechangesresultcommentwatch_actionstartshutdown_timeoutfollowresultcommentThe \ argument is not supportedresultcommentThe \ argument is requirednetworksnetworksnetworksnetworksresultcommenttestresultcommentContainer would be run{0} in the backgroundre doing a bit of a hack here, so that we can get the exit code after
remove = None
for item in (, ):
try:
val = kwargs.pop(item)
except KeyError:
continue
if remove is not None:
if not ignore_collisions:
ret[] = False
ret[] = (
rm\auto_remove\
)
return ret
else:
remove = bool(val)
if remove is not None:
kwargs[] = False
else:
remove = False
try:
ret[] = __salt__[](
image,
name=name,
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
client_timeout=client_timeout,
bg=bg,
replace=replace,
force=force,
**kwargs)
except Exception as exc:
log.exception()
ret[] = False
ret[] = .format(exc)
else:
if bg:
ret[] =
else:
try:
retcode = ret[][]
except KeyError:
pass
else:
ret[] = False if failhard and retcode != 0 else True
ret[] = (
.format(retcode)
)
if remove:
id_ = ret.get(, {}).get()
if id_:
try:
__salt__[](ret[][])
except CommandExecutionError as exc:
ret.setdefault(, []).append(
.format(exc)
)
return ret | .. versionadded:: 2018.3.0
.. note::
If no tag is specified in the image name, and nothing matching the
specified image is pulled on the minion, the ``docker pull`` that
retrieves the image will pull *all tags* for the image. A tag of
``latest`` is not implicit for the pull. For this reason, it is
recommended to specify the image in ``repo:tag`` notation.
Like the :py:func:`cmd.run <salt.states.cmd.run>` state, only for Docker.
Does the equivalent of a ``docker run`` and returns information about the
container that was created, as well as its output.
This state accepts the same arguments as :py:func:`docker_container.running
<salt.states.docker_container.running>`, with the exception of
``watch_action``, ``start``, and ``shutdown_timeout`` (though the ``force``
argument has a different meaning in this state).
In addition, this state accepts the arguments from :py:func:`docker.logs
<salt.modules.dockermod.logs>`, with the exception of ``follow``, to
control how logs are returned.
Additionally, the following arguments are supported:
onlyif
A command or list of commands to run as a check. The container will
only run if any of the specified commands returns a zero exit status.
unless
A command or list of commands to run as a check. The container will
only run if any of the specified commands returns a non-zero exit
status.
creates
A path or list of paths. Only run if one or more of the specified paths
do not exist on the minion.
bg : False
If ``True``, run container in background and do not await or deliver
its results.
.. note::
This may not be useful in cases where other states depend on the
results of this state. Also, the logs will be inaccessible once the
container exits if ``auto_remove`` is set to ``True``, so keep this
in mind.
failhard : True
If ``True``, the state will return a ``False`` result if the exit code
of the container is non-zero. When this argument is set to ``False``,
the state will return a ``True`` result regardless of the container's
exit code.
.. note::
This has no effect if ``bg`` is set to ``True``.
replace : False
If ``True``, and if the named container already exists, this will
remove the existing container. The default behavior is to return a
``False`` result when the container already exists.
force : False
If ``True``, and the named container already exists, *and* ``replace``
is also set to ``True``, then the container will be forcibly removed.
Otherwise, the state will not proceed and will return a ``False``
result.
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
**USAGE EXAMPLE**
.. code-block:: jinja
{% set pkg_version = salt.pillar.get('pkg_version', '1.0-1') %}
build_package:
docker_container.run:
- image: myuser/builder:latest
- binds: /home/myuser/builds:/build_dir
- command: /scripts/build.sh {{ pkg_version }}
- creates: /home/myuser/builds/myapp-{{ pkg_version }}.noarch.rpm
- replace: True
- networks:
- mynet
- require:
- docker_network: mynet | ### Input:
.. versionadded:: 2018.3.0
.. note::
If no tag is specified in the image name, and nothing matching the
specified image is pulled on the minion, the ``docker pull`` that
retrieves the image will pull *all tags* for the image. A tag of
``latest`` is not implicit for the pull. For this reason, it is
recommended to specify the image in ``repo:tag`` notation.
Like the :py:func:`cmd.run <salt.states.cmd.run>` state, only for Docker.
Does the equivalent of a ``docker run`` and returns information about the
container that was created, as well as its output.
This state accepts the same arguments as :py:func:`docker_container.running
<salt.states.docker_container.running>`, with the exception of
``watch_action``, ``start``, and ``shutdown_timeout`` (though the ``force``
argument has a different meaning in this state).
In addition, this state accepts the arguments from :py:func:`docker.logs
<salt.modules.dockermod.logs>`, with the exception of ``follow``, to
control how logs are returned.
Additionally, the following arguments are supported:
onlyif
A command or list of commands to run as a check. The container will
only run if any of the specified commands returns a zero exit status.
unless
A command or list of commands to run as a check. The container will
only run if any of the specified commands returns a non-zero exit
status.
creates
A path or list of paths. Only run if one or more of the specified paths
do not exist on the minion.
bg : False
If ``True``, run container in background and do not await or deliver
its results.
.. note::
This may not be useful in cases where other states depend on the
results of this state. Also, the logs will be inaccessible once the
container exits if ``auto_remove`` is set to ``True``, so keep this
in mind.
failhard : True
If ``True``, the state will return a ``False`` result if the exit code
of the container is non-zero. When this argument is set to ``False``,
the state will return a ``True`` result regardless of the container's
exit code.
.. note::
This has no effect if ``bg`` is set to ``True``.
replace : False
If ``True``, and if the named container already exists, this will
remove the existing container. The default behavior is to return a
``False`` result when the container already exists.
force : False
If ``True``, and the named container already exists, *and* ``replace``
is also set to ``True``, then the container will be forcibly removed.
Otherwise, the state will not proceed and will return a ``False``
result.
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
**USAGE EXAMPLE**
.. code-block:: jinja
{% set pkg_version = salt.pillar.get('pkg_version', '1.0-1') %}
build_package:
docker_container.run:
- image: myuser/builder:latest
- binds: /home/myuser/builds:/build_dir
- command: /scripts/build.sh {{ pkg_version }}
- creates: /home/myuser/builds/myapp-{{ pkg_version }}.noarch.rpm
- replace: True
- networks:
- mynet
- require:
- docker_network: mynet
### Response:
def run(name,
image=None,
onlyif=None,
unless=None,
creates=None,
bg=False,
failhard=True,
replace=False,
force=False,
skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
client_timeout=salt.utils.docker.CLIENT_TIMEOUT,
**kwargs):
s
exit code.
.. note::
This has no effect if ``bg`` is set to ``True``.
replace : False
If ``True``, and if the named container already exists, this will
remove the existing container. The default behavior is to return a
``False`` result when the container already exists.
force : False
If ``True``, and the named container already exists, *and* ``replace``
is also set to ``True``, then the container will be forcibly removed.
Otherwise, the state will not proceed and will return a ``False``
result.
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
**USAGE EXAMPLE**
.. code-block:: jinja
{% set pkg_version = salt.pillar.get(, ) %}
build_package:
docker_container.run:
- image: myuser/builder:latest
- binds: /home/myuser/builds:/build_dir
- command: /scripts/build.sh {{ pkg_version }}
- creates: /home/myuser/builds/myapp-{{ pkg_version }}.noarch.rpm
- replace: True
- networks:
- mynet
- require:
- docker_network: mynet
namechangesresultcommentwatch_actionstartshutdown_timeoutfollowresultcommentThe \ argument is not supportedresultcommentThe \ argument is requirednetworksnetworksnetworksnetworksresultcommenttestresultcommentContainer would be run{0} in the backgroundre doing a bit of a hack here, so that we can get the exit code after
remove = None
for item in (, ):
try:
val = kwargs.pop(item)
except KeyError:
continue
if remove is not None:
if not ignore_collisions:
ret[] = False
ret[] = (
rm\auto_remove\
)
return ret
else:
remove = bool(val)
if remove is not None:
kwargs[] = False
else:
remove = False
try:
ret[] = __salt__[](
image,
name=name,
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
client_timeout=client_timeout,
bg=bg,
replace=replace,
force=force,
**kwargs)
except Exception as exc:
log.exception()
ret[] = False
ret[] = .format(exc)
else:
if bg:
ret[] =
else:
try:
retcode = ret[][]
except KeyError:
pass
else:
ret[] = False if failhard and retcode != 0 else True
ret[] = (
.format(retcode)
)
if remove:
id_ = ret.get(, {}).get()
if id_:
try:
__salt__[](ret[][])
except CommandExecutionError as exc:
ret.setdefault(, []).append(
.format(exc)
)
return ret |
def add_child(self, node, callback):
if node not in self.children:
self.children.append(ChildNode(node, callback)) | Add node and callback to the children set. | ### Input:
Add node and callback to the children set.
### Response:
def add_child(self, node, callback):
if node not in self.children:
self.children.append(ChildNode(node, callback)) |
def GenerateHelpText(self, env, sort=None):
if callable(sort):
options = sorted(self.options, key=cmp_to_key(lambda x,y: sort(x.key,y.key)))
elif sort is True:
options = sorted(self.options, key=lambda x: x.key)
else:
options = self.options
def format(opt, self=self, env=env):
if opt.key in env:
actual = env.subst( % opt.key)
else:
actual = None
return self.FormatVariableHelpText(env, opt.key, opt.help, opt.default, actual, opt.aliases)
lines = [_f for _f in map(format, options) if _f]
return .join(lines) | Generate the help text for the options.
env - an environment that is used to get the current values
of the options.
cmp - Either a function as follows: The specific sort function should take two arguments and return -1, 0 or 1
or a boolean to indicate if it should be sorted. | ### Input:
Generate the help text for the options.
env - an environment that is used to get the current values
of the options.
cmp - Either a function as follows: The specific sort function should take two arguments and return -1, 0 or 1
or a boolean to indicate if it should be sorted.
### Response:
def GenerateHelpText(self, env, sort=None):
if callable(sort):
options = sorted(self.options, key=cmp_to_key(lambda x,y: sort(x.key,y.key)))
elif sort is True:
options = sorted(self.options, key=lambda x: x.key)
else:
options = self.options
def format(opt, self=self, env=env):
if opt.key in env:
actual = env.subst( % opt.key)
else:
actual = None
return self.FormatVariableHelpText(env, opt.key, opt.help, opt.default, actual, opt.aliases)
lines = [_f for _f in map(format, options) if _f]
return .join(lines) |
def tail(ctx):
click.echo()
for e in ctx.tail()[-10:]:
ts = datetime.utcfromtimestamp(e[] // 1000).isoformat()
click.echo("{}: {}".format(ts, e[]))
click.echo() | Show the last 10 lines of the log file | ### Input:
Show the last 10 lines of the log file
### Response:
def tail(ctx):
click.echo()
for e in ctx.tail()[-10:]:
ts = datetime.utcfromtimestamp(e[] // 1000).isoformat()
click.echo("{}: {}".format(ts, e[]))
click.echo() |
def update(self, url, cache_info=None):
key = hashlib.md5(url).hexdigest()
access_time = None
if not cache_info:
cache_info = CacheInfo()
if not access_time:
cache_info.access_time = now_utc()
self._cache_meta_set(key, cache_info.to_dict()) | Update cache information for url
:param url: Update for this url
:type url: str | unicode
:param cache_info: Cache info
:type cache_info: floscraper.models.CacheInfo
:rtype: None | ### Input:
Update cache information for url
:param url: Update for this url
:type url: str | unicode
:param cache_info: Cache info
:type cache_info: floscraper.models.CacheInfo
:rtype: None
### Response:
def update(self, url, cache_info=None):
key = hashlib.md5(url).hexdigest()
access_time = None
if not cache_info:
cache_info = CacheInfo()
if not access_time:
cache_info.access_time = now_utc()
self._cache_meta_set(key, cache_info.to_dict()) |
def convert(self, request, response, data):
size = response.content_length
if not size:
size = "-" if self.conv_chr == else 0
return str(size) | Performs the desired Conversion.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary returned by the prepare()
method.
:returns: A string, the results of which are the desired
conversion. | ### Input:
Performs the desired Conversion.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary returned by the prepare()
method.
:returns: A string, the results of which are the desired
conversion.
### Response:
def convert(self, request, response, data):
size = response.content_length
if not size:
size = "-" if self.conv_chr == else 0
return str(size) |
def get_networks(context, limit=None, sorts=[], marker=None,
page_reverse=False, filters=None, fields=None):
LOG.info("get_networks for tenant %s with filters %s, fields %s" %
(context.tenant_id, filters, fields))
filters = filters or {}
nets = db_api.network_find(context, limit, sorts, marker, page_reverse,
join_subnets=True, **filters) or []
nets = [v._make_network_dict(net, fields=fields) for net in nets]
return nets | Retrieve a list of networks.
The contents of the list depends on the identity of the user
making the request (as indicated by the context) as well as any
filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a network as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictiontary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
: param fields: a list of strings that are valid keys in a
network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned. | ### Input:
Retrieve a list of networks.
The contents of the list depends on the identity of the user
making the request (as indicated by the context) as well as any
filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a network as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictiontary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
: param fields: a list of strings that are valid keys in a
network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
### Response:
def get_networks(context, limit=None, sorts=[], marker=None,
page_reverse=False, filters=None, fields=None):
LOG.info("get_networks for tenant %s with filters %s, fields %s" %
(context.tenant_id, filters, fields))
filters = filters or {}
nets = db_api.network_find(context, limit, sorts, marker, page_reverse,
join_subnets=True, **filters) or []
nets = [v._make_network_dict(net, fields=fields) for net in nets]
return nets |
def _check_wiremap_validity(self, wire_map, keymap, valmap):
for k, v in wire_map.items():
kname = "%s[%d]" % (k[0].name, k[1])
vname = "%s[%d]" % (v[0].name, v[1])
if k not in keymap:
raise DAGCircuitError("invalid wire mapping key %s" % kname)
if v not in valmap:
raise DAGCircuitError("invalid wire mapping value %s" % vname)
if type(k) is not type(v):
raise DAGCircuitError("inconsistent wire_map at (%s,%s)" %
(kname, vname)) | Check that the wiremap is consistent.
Check that the wiremap refers to valid wires and that
those wires have consistent types.
Args:
wire_map (dict): map from (register,idx) in keymap to
(register,idx) in valmap
keymap (dict): a map whose keys are wire_map keys
valmap (dict): a map whose keys are wire_map values
Raises:
DAGCircuitError: if wire_map not valid | ### Input:
Check that the wiremap is consistent.
Check that the wiremap refers to valid wires and that
those wires have consistent types.
Args:
wire_map (dict): map from (register,idx) in keymap to
(register,idx) in valmap
keymap (dict): a map whose keys are wire_map keys
valmap (dict): a map whose keys are wire_map values
Raises:
DAGCircuitError: if wire_map not valid
### Response:
def _check_wiremap_validity(self, wire_map, keymap, valmap):
for k, v in wire_map.items():
kname = "%s[%d]" % (k[0].name, k[1])
vname = "%s[%d]" % (v[0].name, v[1])
if k not in keymap:
raise DAGCircuitError("invalid wire mapping key %s" % kname)
if v not in valmap:
raise DAGCircuitError("invalid wire mapping value %s" % vname)
if type(k) is not type(v):
raise DAGCircuitError("inconsistent wire_map at (%s,%s)" %
(kname, vname)) |
def quoted_or_list(items):
selected = items[:MAX_LENGTH]
quoted_items = (.format(t) for t in selected)
def quoted_or_text(text, quoted_and_index):
index = quoted_and_index[0]
quoted_item = quoted_and_index[1]
text += (
(", " if len(selected) > 2 and not index == len(selected) - 1 else " ")
+ ("or " if index == len(selected) - 1 else "")
+ quoted_item
)
return text
enumerated_items = enumerate(quoted_items)
first_item = next(enumerated_items)[1]
return functools.reduce(quoted_or_text, enumerated_items, first_item) | Given [ A, B, C ] return '"A", "B" or "C"'. | ### Input:
Given [ A, B, C ] return '"A", "B" or "C"'.
### Response:
def quoted_or_list(items):
selected = items[:MAX_LENGTH]
quoted_items = (.format(t) for t in selected)
def quoted_or_text(text, quoted_and_index):
index = quoted_and_index[0]
quoted_item = quoted_and_index[1]
text += (
(", " if len(selected) > 2 and not index == len(selected) - 1 else " ")
+ ("or " if index == len(selected) - 1 else "")
+ quoted_item
)
return text
enumerated_items = enumerate(quoted_items)
first_item = next(enumerated_items)[1]
return functools.reduce(quoted_or_text, enumerated_items, first_item) |
def thanks(request, redirect_url=settings.LOGIN_REDIRECT_URL):
HttpResponseRedirect(redirect_url) | A user gets redirected here after hitting Twitter and authorizing your app to use their data.
This is the view that stores the tokens you want
for querying data. Pay attention to this. | ### Input:
A user gets redirected here after hitting Twitter and authorizing your app to use their data.
This is the view that stores the tokens you want
for querying data. Pay attention to this.
### Response:
def thanks(request, redirect_url=settings.LOGIN_REDIRECT_URL):
HttpResponseRedirect(redirect_url) |
def sender(url, **kwargs):
res = url_to_resources(url)
fnc = res["sender"]
return fnc(res.get("url"), **kwargs) | Return sender instance from connection url string
url <str> connection url eg. 'tcp://0.0.0.0:8080' | ### Input:
Return sender instance from connection url string
url <str> connection url eg. 'tcp://0.0.0.0:8080'
### Response:
def sender(url, **kwargs):
res = url_to_resources(url)
fnc = res["sender"]
return fnc(res.get("url"), **kwargs) |
def show_install(show_nvidia_smi:bool=False):
"Print users the
if have_nvidia_smi:
smi = result.stdout.decode()
match = re.findall(r, smi)
if match: rep.append(["nvidia driver", match[0]])
available = "available" if torch.cuda.is_available() else "**Not available** "
rep.append(["torch cuda", f"{torch.version.cuda} / is {available}"])
if torch.cuda.is_available():
enabled = "enabled" if torch.backends.cudnn.enabled else "**Not enabled** "
rep.append(["torch cudnn", f"{torch.backends.cudnn.version()} / is {enabled}"])
rep.append(["\n=== Hardware ===", None])
else:
rep.append([f"No GPUs available", None])
rep.append(["\n=== Environment ===", None])
rep.append(["platform", platform.platform()])
if platform.system() == :
distro = try_import()
if distro:
rep.append(["distro", .join(distro.linux_distribution())])
else:
opt_mods.append();
rep.append(["distro", platform.uname().version])
rep.append(["conda env", get_env()])
rep.append(["python", sys.executable])
rep.append(["sys.path", "\n".join(sys.path)])
print("\n\n```text")
keylen = max([len(e[0]) for e in rep if e[1] is not None])
for e in rep:
print(f"{e[0]:{keylen}}", (f": {e[1]}" if e[1] is not None else ""))
if have_nvidia_smi:
if show_nvidia_smi: print(f"\n{smi}")
else:
if torch_gpu_cnt: print("no nvidia-smi is found")
else: print("no supported gpus found on this system")
print("```\n")
print("Please make sure to include opening/closing ``` when you paste into forums/github to make the reports appear formatted as code sections.\n")
if opt_mods:
print("Optional package(s) to enhance the diagnostics can be installed with:")
print(f"pip install {.join(opt_mods)}")
print("Once installed, re-run this utility to get the additional information") | Print user's setup information | ### Input:
Print user's setup information
### Response:
def show_install(show_nvidia_smi:bool=False):
"Print users the
if have_nvidia_smi:
smi = result.stdout.decode()
match = re.findall(r, smi)
if match: rep.append(["nvidia driver", match[0]])
available = "available" if torch.cuda.is_available() else "**Not available** "
rep.append(["torch cuda", f"{torch.version.cuda} / is {available}"])
if torch.cuda.is_available():
enabled = "enabled" if torch.backends.cudnn.enabled else "**Not enabled** "
rep.append(["torch cudnn", f"{torch.backends.cudnn.version()} / is {enabled}"])
rep.append(["\n=== Hardware ===", None])
else:
rep.append([f"No GPUs available", None])
rep.append(["\n=== Environment ===", None])
rep.append(["platform", platform.platform()])
if platform.system() == :
distro = try_import()
if distro:
rep.append(["distro", .join(distro.linux_distribution())])
else:
opt_mods.append();
rep.append(["distro", platform.uname().version])
rep.append(["conda env", get_env()])
rep.append(["python", sys.executable])
rep.append(["sys.path", "\n".join(sys.path)])
print("\n\n```text")
keylen = max([len(e[0]) for e in rep if e[1] is not None])
for e in rep:
print(f"{e[0]:{keylen}}", (f": {e[1]}" if e[1] is not None else ""))
if have_nvidia_smi:
if show_nvidia_smi: print(f"\n{smi}")
else:
if torch_gpu_cnt: print("no nvidia-smi is found")
else: print("no supported gpus found on this system")
print("```\n")
print("Please make sure to include opening/closing ``` when you paste into forums/github to make the reports appear formatted as code sections.\n")
if opt_mods:
print("Optional package(s) to enhance the diagnostics can be installed with:")
print(f"pip install {.join(opt_mods)}")
print("Once installed, re-run this utility to get the additional information") |
def _infer_sig_len(file_name, fmt, n_sig, dir_name, pb_dir=None):
if pb_dir is None:
file_size = os.path.getsize(os.path.join(dir_name, file_name))
else:
file_size = download._remote_file_size(file_name=file_name,
pb_dir=pb_dir)
sig_len = int(file_size / (BYTES_PER_SAMPLE[fmt] * n_sig))
return sig_len | Infer the length of a signal from a dat file.
Parameters
----------
file_name : str
Name of the dat file
fmt : str
WFDB fmt of the dat file
n_sig : int
Number of signals contained in the dat file
Notes
-----
sig_len * n_sig * bytes_per_sample == file_size | ### Input:
Infer the length of a signal from a dat file.
Parameters
----------
file_name : str
Name of the dat file
fmt : str
WFDB fmt of the dat file
n_sig : int
Number of signals contained in the dat file
Notes
-----
sig_len * n_sig * bytes_per_sample == file_size
### Response:
def _infer_sig_len(file_name, fmt, n_sig, dir_name, pb_dir=None):
if pb_dir is None:
file_size = os.path.getsize(os.path.join(dir_name, file_name))
else:
file_size = download._remote_file_size(file_name=file_name,
pb_dir=pb_dir)
sig_len = int(file_size / (BYTES_PER_SAMPLE[fmt] * n_sig))
return sig_len |
def create_inner_transfer(self, from_account_id, to_account_id, amount, order_id=None):
data = {
: from_account_id,
: to_account_id,
: amount
}
if order_id:
data[] = order_id
else:
data[] = flat_uuid()
return self._post(, True, data=data) | Get account holds placed for any active orders or pending withdraw requests
https://docs.kucoin.com/#get-holds
:param from_account_id: ID of account to transfer funds from - from list_accounts()
:type from_account_id: str
:param to_account_id: ID of account to transfer funds to - from list_accounts()
:type to_account_id: str
:param amount: Amount to transfer
:type amount: int
:param order_id: (optional) Request ID (default flat_uuid())
:type order_id: string
.. code:: python
transfer = client.create_inner_transfer('5bd6e9216d99522a52e458d6', 5bc7f080b39c5c03286eef8e', 20)
:returns: API Response
.. code-block:: python
{
"orderId": "5bd6e9286d99522a52e458de"
}
:raises: KucoinResponseException, KucoinAPIException | ### Input:
Get account holds placed for any active orders or pending withdraw requests
https://docs.kucoin.com/#get-holds
:param from_account_id: ID of account to transfer funds from - from list_accounts()
:type from_account_id: str
:param to_account_id: ID of account to transfer funds to - from list_accounts()
:type to_account_id: str
:param amount: Amount to transfer
:type amount: int
:param order_id: (optional) Request ID (default flat_uuid())
:type order_id: string
.. code:: python
transfer = client.create_inner_transfer('5bd6e9216d99522a52e458d6', 5bc7f080b39c5c03286eef8e', 20)
:returns: API Response
.. code-block:: python
{
"orderId": "5bd6e9286d99522a52e458de"
}
:raises: KucoinResponseException, KucoinAPIException
### Response:
def create_inner_transfer(self, from_account_id, to_account_id, amount, order_id=None):
data = {
: from_account_id,
: to_account_id,
: amount
}
if order_id:
data[] = order_id
else:
data[] = flat_uuid()
return self._post(, True, data=data) |
def run_step(self):
task_config = self.step.task_config.copy()
task_config["options"] = task_config["options"].copy()
self.flow.resolve_return_value_options(task_config["options"])
exc = None
try:
task = self.step.task_class(
self.project_config,
TaskConfig(task_config),
org_config=self.org_config,
name=self.step.task_name,
stepnum=self.step.step_num,
flow=self.flow,
)
self._log_options(task)
task()
except Exception as e:
self.flow.logger.exception(
"Exception in task {}".format(self.step.task_name)
)
exc = e
return StepResult(
self.step.step_num,
self.step.task_name,
self.step.path,
task.result,
task.return_values,
exc,
) | Run a step.
:return: StepResult | ### Input:
Run a step.
:return: StepResult
### Response:
def run_step(self):
task_config = self.step.task_config.copy()
task_config["options"] = task_config["options"].copy()
self.flow.resolve_return_value_options(task_config["options"])
exc = None
try:
task = self.step.task_class(
self.project_config,
TaskConfig(task_config),
org_config=self.org_config,
name=self.step.task_name,
stepnum=self.step.step_num,
flow=self.flow,
)
self._log_options(task)
task()
except Exception as e:
self.flow.logger.exception(
"Exception in task {}".format(self.step.task_name)
)
exc = e
return StepResult(
self.step.step_num,
self.step.task_name,
self.step.path,
task.result,
task.return_values,
exc,
) |
def insertBefore(self, newchild, refchild):
for i, childNode in enumerate(self.childNodes):
if childNode is refchild:
self.childNodes.insert(i, newchild)
newchild.parentNode = self
self._verifyChildren(i)
return newchild
raise ValueError(refchild) | Insert a new child node before an existing child. It must
be the case that refchild is a child of this node; if not,
ValueError is raised. newchild is returned. | ### Input:
Insert a new child node before an existing child. It must
be the case that refchild is a child of this node; if not,
ValueError is raised. newchild is returned.
### Response:
def insertBefore(self, newchild, refchild):
for i, childNode in enumerate(self.childNodes):
if childNode is refchild:
self.childNodes.insert(i, newchild)
newchild.parentNode = self
self._verifyChildren(i)
return newchild
raise ValueError(refchild) |
def color(self, c=False):
if c is False:
return np.array(self.GetProperty().GetColor())
elif c is None:
self.GetMapper().ScalarVisibilityOn()
return self
else:
self.GetMapper().ScalarVisibilityOff()
self.GetProperty().SetColor(colors.getColor(c))
return self | Set/get actor's color.
If None is passed as input, will use colors from active scalars.
Same as `c()`. | ### Input:
Set/get actor's color.
If None is passed as input, will use colors from active scalars.
Same as `c()`.
### Response:
def color(self, c=False):
if c is False:
return np.array(self.GetProperty().GetColor())
elif c is None:
self.GetMapper().ScalarVisibilityOn()
return self
else:
self.GetMapper().ScalarVisibilityOff()
self.GetProperty().SetColor(colors.getColor(c))
return self |
def get_body(self, msg):
body = ""
charset = ""
if msg.is_multipart():
for part in msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get())
if ctype == and not in cdispo:
body = part.get_payload(decode=True)
charset = part.get_content_charset()
break
else:
body = msg.get_payload(decode=True)
charset = msg.get_content_charset()
return body.decode(charset) | Extracts and returns the decoded body from an EmailMessage object | ### Input:
Extracts and returns the decoded body from an EmailMessage object
### Response:
def get_body(self, msg):
body = ""
charset = ""
if msg.is_multipart():
for part in msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get())
if ctype == and not in cdispo:
body = part.get_payload(decode=True)
charset = part.get_content_charset()
break
else:
body = msg.get_payload(decode=True)
charset = msg.get_content_charset()
return body.decode(charset) |
def colorize(string, color=, bold=False, underline=False, highlight=False):
result =
if bold:
result += ColorMessage._bold
if underline:
result += ColorMessage._underline
if highlight:
result += ColorMessage._highlight
result += ColorMessage._colors.get(color, ColorMessage._colors[])
return result + string + ColorMessage._reset | :param string: message to colorize.
:type string: unicode
:param color: one of :attr:`fatbotslim.irc.colors.ColorMessage._colors`.
:type color: str
:param bold: if the string has to be in bold.
:type bold: bool
:param underline: if the string has to be underlined.
:type underline: bool
:param highlight: if the string foreground and background has to be switched.
:type highlight: bool | ### Input:
:param string: message to colorize.
:type string: unicode
:param color: one of :attr:`fatbotslim.irc.colors.ColorMessage._colors`.
:type color: str
:param bold: if the string has to be in bold.
:type bold: bool
:param underline: if the string has to be underlined.
:type underline: bool
:param highlight: if the string foreground and background has to be switched.
:type highlight: bool
### Response:
def colorize(string, color=, bold=False, underline=False, highlight=False):
result =
if bold:
result += ColorMessage._bold
if underline:
result += ColorMessage._underline
if highlight:
result += ColorMessage._highlight
result += ColorMessage._colors.get(color, ColorMessage._colors[])
return result + string + ColorMessage._reset |
def get_filehandle(self):
if os.path.exists(self.filename):
self.filehandle = SD(self.filename, SDC.READ)
logger.debug("Loading dataset {}".format(self.filename))
else:
raise IOError("Path {} does not exist.".format(self.filename)) | Get HDF4 filehandle. | ### Input:
Get HDF4 filehandle.
### Response:
def get_filehandle(self):
if os.path.exists(self.filename):
self.filehandle = SD(self.filename, SDC.READ)
logger.debug("Loading dataset {}".format(self.filename))
else:
raise IOError("Path {} does not exist.".format(self.filename)) |
def _cache_from_source(path: str) -> str:
cache_path, cache_file = os.path.split(importlib.util.cache_from_source(path))
filename, _ = os.path.splitext(cache_file)
return os.path.join(cache_path, filename + ".lpyc") | Return the path to the cached file for the given path. The original path
does not have to exist. | ### Input:
Return the path to the cached file for the given path. The original path
does not have to exist.
### Response:
def _cache_from_source(path: str) -> str:
cache_path, cache_file = os.path.split(importlib.util.cache_from_source(path))
filename, _ = os.path.splitext(cache_file)
return os.path.join(cache_path, filename + ".lpyc") |
def read_line(self, line):
if self.ignore:
return
for i, char in enumerate(line):
if char not in [, "\\':
continue
if self.single == char:
self.single = None
continue
if self.single is not None:
continue
if not self.python:
continue
if self.triple == char:
if line[i - 2:i + 1] == 3 * char:
self.triple = None
continue
if self.triple is not None:
continue
if line[i - 2:i + 1] == 3 * char:
self.triple = char
continue
self.single = char
if self.python:
self.single = None | Read a new line | ### Input:
Read a new line
### Response:
def read_line(self, line):
if self.ignore:
return
for i, char in enumerate(line):
if char not in [, "\\':
continue
if self.single == char:
self.single = None
continue
if self.single is not None:
continue
if not self.python:
continue
if self.triple == char:
if line[i - 2:i + 1] == 3 * char:
self.triple = None
continue
if self.triple is not None:
continue
if line[i - 2:i + 1] == 3 * char:
self.triple = char
continue
self.single = char
if self.python:
self.single = None |
def random(args):
from random import sample
p = OptionParser(random.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, N = args
N = int(N)
assert N > 0
f = Fasta(fastafile)
fw = must_open("stdout", "w")
for key in sample(f.keys(), N):
rec = f[key]
SeqIO.write([rec], fw, "fasta")
fw.close() | %prog random fasta 100 > random100.fasta
Take number of records randomly from fasta | ### Input:
%prog random fasta 100 > random100.fasta
Take number of records randomly from fasta
### Response:
def random(args):
from random import sample
p = OptionParser(random.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, N = args
N = int(N)
assert N > 0
f = Fasta(fastafile)
fw = must_open("stdout", "w")
for key in sample(f.keys(), N):
rec = f[key]
SeqIO.write([rec], fw, "fasta")
fw.close() |
def verify(self, string_version=None):
if string_version and string_version != str(self):
raise Exception("Supplied string version does not match current version.")
if self.dirty:
raise Exception("Current working directory is dirty.")
if self.release != self.expected_release:
raise Exception("Declared release does not match current release tag.")
if self.commit_count !=0:
raise Exception("Please update the VCS version tag before release.")
if self._expected_commit not in [None, "$Format:%h$"]:
raise Exception("Declared release does not match the VCS version tag") | Check that the version information is consistent with the VCS
before doing a release. If supplied with a string version,
this is also checked against the current version. Should be
called from setup.py with the declared package version before
releasing to PyPI. | ### Input:
Check that the version information is consistent with the VCS
before doing a release. If supplied with a string version,
this is also checked against the current version. Should be
called from setup.py with the declared package version before
releasing to PyPI.
### Response:
def verify(self, string_version=None):
if string_version and string_version != str(self):
raise Exception("Supplied string version does not match current version.")
if self.dirty:
raise Exception("Current working directory is dirty.")
if self.release != self.expected_release:
raise Exception("Declared release does not match current release tag.")
if self.commit_count !=0:
raise Exception("Please update the VCS version tag before release.")
if self._expected_commit not in [None, "$Format:%h$"]:
raise Exception("Declared release does not match the VCS version tag") |
def _fetch_AlignmentMapper(self, tx_ac, alt_ac, alt_aln_method):
return hgvs.alignmentmapper.AlignmentMapper(
self.hdp, tx_ac=tx_ac, alt_ac=alt_ac, alt_aln_method=alt_aln_method) | Get a new AlignmentMapper for the given transcript accession (ac),
possibly caching the result. | ### Input:
Get a new AlignmentMapper for the given transcript accession (ac),
possibly caching the result.
### Response:
def _fetch_AlignmentMapper(self, tx_ac, alt_ac, alt_aln_method):
return hgvs.alignmentmapper.AlignmentMapper(
self.hdp, tx_ac=tx_ac, alt_ac=alt_ac, alt_aln_method=alt_aln_method) |
def create_user(self, user, account_id=None):
if account_id is None:
account_id = self._canvas_account_id
if account_id is None:
raise MissingAccountID()
url = ACCOUNTS_API.format(account_id) + "/users"
data = self._post_resource(url, user.post_data())
return CanvasUser(data=data) | Create and return a new user and pseudonym for an account.
https://canvas.instructure.com/doc/api/users.html#method.users.create | ### Input:
Create and return a new user and pseudonym for an account.
https://canvas.instructure.com/doc/api/users.html#method.users.create
### Response:
def create_user(self, user, account_id=None):
if account_id is None:
account_id = self._canvas_account_id
if account_id is None:
raise MissingAccountID()
url = ACCOUNTS_API.format(account_id) + "/users"
data = self._post_resource(url, user.post_data())
return CanvasUser(data=data) |
def mousePressEvent(self, event):
item = self.itemAt(event.pos())
column = self.columnAt(event.pos().x())
mid_button = event.button() == QtCore.Qt.MidButton
ctrl_click = event.button() == QtCore.Qt.LeftButton and \
event.modifiers() == QtCore.Qt.ControlModifier
if item and column != -1:
self._downItem = weakref.ref(item)
self._downColumn = column
self._downState = item.checkState(column)
elif not item:
self.setCurrentItem(None)
self.clearSelection()
if (mid_button or ctrl_click) and item and column != -1:
self.itemMiddleClicked.emit(item, column)
index = self.indexAt(event.pos())
sel_model = self.selectionModel()
if self.isEditable() and index and sel_model.isSelected(index):
sel_model.setCurrentIndex(index, sel_model.NoUpdate)
self.edit(index, self.SelectedClicked, event)
event.accept()
else:
super(XTreeWidget, self).mousePressEvent(event) | Overloads when a mouse press occurs. If in editable mode, and the
click occurs on a selected index, then the editor will be created
and no selection change will occur.
:param event | <QMousePressEvent> | ### Input:
Overloads when a mouse press occurs. If in editable mode, and the
click occurs on a selected index, then the editor will be created
and no selection change will occur.
:param event | <QMousePressEvent>
### Response:
def mousePressEvent(self, event):
item = self.itemAt(event.pos())
column = self.columnAt(event.pos().x())
mid_button = event.button() == QtCore.Qt.MidButton
ctrl_click = event.button() == QtCore.Qt.LeftButton and \
event.modifiers() == QtCore.Qt.ControlModifier
if item and column != -1:
self._downItem = weakref.ref(item)
self._downColumn = column
self._downState = item.checkState(column)
elif not item:
self.setCurrentItem(None)
self.clearSelection()
if (mid_button or ctrl_click) and item and column != -1:
self.itemMiddleClicked.emit(item, column)
index = self.indexAt(event.pos())
sel_model = self.selectionModel()
if self.isEditable() and index and sel_model.isSelected(index):
sel_model.setCurrentIndex(index, sel_model.NoUpdate)
self.edit(index, self.SelectedClicked, event)
event.accept()
else:
super(XTreeWidget, self).mousePressEvent(event) |
def list_eids(self):
entities = self.list()
return sorted([int(eid) for eid in entities]) | Returns a list of all known eids | ### Input:
Returns a list of all known eids
### Response:
def list_eids(self):
entities = self.list()
return sorted([int(eid) for eid in entities]) |
def generate_private_key(key_size=2048):
return cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
public_exponent=65537,
key_size=key_size,
backend=cryptography.hazmat.backends.default_backend(),
) | Generate a private key | ### Input:
Generate a private key
### Response:
def generate_private_key(key_size=2048):
return cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
public_exponent=65537,
key_size=key_size,
backend=cryptography.hazmat.backends.default_backend(),
) |
def cmd(send, msg, args):
if not msg:
send("Google what?")
return
key = args[][][]
cx = args[][][]
data = get(, params={: key, : cx, : msg}).json()
if not in data:
send("Google didnitemslink']
send("Google says %s" % url) | Googles something.
Syntax: {command} <term> | ### Input:
Googles something.
Syntax: {command} <term>
### Response:
def cmd(send, msg, args):
if not msg:
send("Google what?")
return
key = args[][][]
cx = args[][][]
data = get(, params={: key, : cx, : msg}).json()
if not in data:
send("Google didnitemslink']
send("Google says %s" % url) |
def reciprocal_grid(grid, shift=True, axes=None, halfcomplex=False):
if axes is None:
axes = list(range(grid.ndim))
else:
try:
axes = [int(axes)]
except TypeError:
axes = list(axes)
shift_list = normalized_scalar_param_list(shift, length=len(axes),
param_conv=bool)
stride = grid.stride.copy()
stride[stride == 0] = 1
shape = np.array(grid.shape)
rmin = grid.min_pt.copy()
rmax = grid.max_pt.copy()
rshape = list(shape)
shifted = np.zeros(grid.ndim, dtype=bool)
shifted[axes] = shift_list
rmin[shifted] = -np.pi / stride[shifted]
rmax[shifted] = (-rmin[shifted] -
2 * np.pi / (stride[shifted] * shape[shifted]))
not_shifted = np.zeros(grid.ndim, dtype=bool)
not_shifted[axes] = np.logical_not(shift_list)
rmin[not_shifted] = ((-1.0 + 1.0 / shape[not_shifted]) *
np.pi / stride[not_shifted])
rmax[not_shifted] = -rmin[not_shifted]
if halfcomplex:
rshape[axes[-1]] = shape[axes[-1]] // 2 + 1
last_odd = shape[axes[-1]] % 2 == 1
last_shifted = shift_list[-1]
half_rstride = np.pi / (shape[axes[-1]] * stride[axes[-1]])
if last_odd and last_shifted:
rmax[axes[-1]] = -half_rstride
elif not last_odd and not last_shifted:
rmax[axes[-1]] = half_rstride
else:
rmax[axes[-1]] = 0
return uniform_grid(rmin, rmax, rshape) | Return the reciprocal of the given regular grid.
This function calculates the reciprocal (Fourier/frequency space)
grid for a given regular grid defined by the nodes::
x[k] = x[0] + k * s,
where ``k = (k[0], ..., k[d-1])`` is a ``d``-dimensional index in
the range ``0 <= k < N`` (component-wise). The multi-index
``N`` is the shape of the input grid.
This grid's reciprocal is then given by the nodes::
xi[j] = xi[0] + j * sigma,
with the reciprocal grid stride ``sigma = 2*pi / (s * N)``.
The minimum frequency ``xi[0]`` can in principle be chosen
freely, but usually it is chosen in a such a way that the reciprocal
grid is centered around zero. For this, there are two possibilities:
1. Make the grid point-symmetric around 0.
2. Make the grid "almost" point-symmetric around zero by shifting
it to the left by half a reciprocal stride.
In the first case, the minimum frequency (per axis) is given as::
xi_1[0] = -pi/s + pi/(s*n) = -pi/s + sigma/2.
For the second case, it is::
xi_1[0] = -pi / s.
Note that the zero frequency is contained in case 1 for an odd
number of points, while for an even size, the second option
guarantees that 0 is contained.
If a real-to-complex (half-complex) transform is to be computed,
the reciprocal grid has the shape ``M[i] = floor(N[i]/2) + 1``
in the last transform axis ``i``.
Parameters
----------
grid : uniform `RectGrid`
Original sampling grid,.
shift : bool or sequence of bools, optional
If ``True``, the grid is shifted by half a stride in the negative
direction. With a sequence, this option is applied separately on
each axis.
axes : int or sequence of ints, optional
Dimensions in which to calculate the reciprocal. The sequence
must have the same length as ``shift`` if the latter is given
as a sequence. ``None`` means all axes in ``grid``.
halfcomplex : bool, optional
If ``True``, return the half of the grid with last coordinate
less than zero. This is related to the fact that for real-valued
functions, the other half is the mirrored complex conjugate of
the given half and therefore needs not be stored.
Returns
-------
reciprocal_grid : uniform `RectGrid`
The reciprocal grid. | ### Input:
Return the reciprocal of the given regular grid.
This function calculates the reciprocal (Fourier/frequency space)
grid for a given regular grid defined by the nodes::
x[k] = x[0] + k * s,
where ``k = (k[0], ..., k[d-1])`` is a ``d``-dimensional index in
the range ``0 <= k < N`` (component-wise). The multi-index
``N`` is the shape of the input grid.
This grid's reciprocal is then given by the nodes::
xi[j] = xi[0] + j * sigma,
with the reciprocal grid stride ``sigma = 2*pi / (s * N)``.
The minimum frequency ``xi[0]`` can in principle be chosen
freely, but usually it is chosen in a such a way that the reciprocal
grid is centered around zero. For this, there are two possibilities:
1. Make the grid point-symmetric around 0.
2. Make the grid "almost" point-symmetric around zero by shifting
it to the left by half a reciprocal stride.
In the first case, the minimum frequency (per axis) is given as::
xi_1[0] = -pi/s + pi/(s*n) = -pi/s + sigma/2.
For the second case, it is::
xi_1[0] = -pi / s.
Note that the zero frequency is contained in case 1 for an odd
number of points, while for an even size, the second option
guarantees that 0 is contained.
If a real-to-complex (half-complex) transform is to be computed,
the reciprocal grid has the shape ``M[i] = floor(N[i]/2) + 1``
in the last transform axis ``i``.
Parameters
----------
grid : uniform `RectGrid`
Original sampling grid,.
shift : bool or sequence of bools, optional
If ``True``, the grid is shifted by half a stride in the negative
direction. With a sequence, this option is applied separately on
each axis.
axes : int or sequence of ints, optional
Dimensions in which to calculate the reciprocal. The sequence
must have the same length as ``shift`` if the latter is given
as a sequence. ``None`` means all axes in ``grid``.
halfcomplex : bool, optional
If ``True``, return the half of the grid with last coordinate
less than zero. This is related to the fact that for real-valued
functions, the other half is the mirrored complex conjugate of
the given half and therefore needs not be stored.
Returns
-------
reciprocal_grid : uniform `RectGrid`
The reciprocal grid.
### Response:
def reciprocal_grid(grid, shift=True, axes=None, halfcomplex=False):
if axes is None:
axes = list(range(grid.ndim))
else:
try:
axes = [int(axes)]
except TypeError:
axes = list(axes)
shift_list = normalized_scalar_param_list(shift, length=len(axes),
param_conv=bool)
stride = grid.stride.copy()
stride[stride == 0] = 1
shape = np.array(grid.shape)
rmin = grid.min_pt.copy()
rmax = grid.max_pt.copy()
rshape = list(shape)
shifted = np.zeros(grid.ndim, dtype=bool)
shifted[axes] = shift_list
rmin[shifted] = -np.pi / stride[shifted]
rmax[shifted] = (-rmin[shifted] -
2 * np.pi / (stride[shifted] * shape[shifted]))
not_shifted = np.zeros(grid.ndim, dtype=bool)
not_shifted[axes] = np.logical_not(shift_list)
rmin[not_shifted] = ((-1.0 + 1.0 / shape[not_shifted]) *
np.pi / stride[not_shifted])
rmax[not_shifted] = -rmin[not_shifted]
if halfcomplex:
rshape[axes[-1]] = shape[axes[-1]] // 2 + 1
last_odd = shape[axes[-1]] % 2 == 1
last_shifted = shift_list[-1]
half_rstride = np.pi / (shape[axes[-1]] * stride[axes[-1]])
if last_odd and last_shifted:
rmax[axes[-1]] = -half_rstride
elif not last_odd and not last_shifted:
rmax[axes[-1]] = half_rstride
else:
rmax[axes[-1]] = 0
return uniform_grid(rmin, rmax, rshape) |
def _normalize_json_search_response(self, json):
result = {}
if in json:
result[] = json[u]
if in json:
result[] = json[u]
if in json:
result[] = json[u]
if u in json:
result[] = json[u][u]
result[] = float(json[u][u])
docs = []
for doc in json[u][u]:
resdoc = {}
if u in doc:
resdoc = doc
else:
resdoc[u] = doc[u]
if u in doc:
for k, v in six.iteritems(doc[u]):
resdoc[k] = v
docs.append(resdoc)
result[] = docs
return result | Normalizes a JSON search response so that PB and HTTP have the
same return value | ### Input:
Normalizes a JSON search response so that PB and HTTP have the
same return value
### Response:
def _normalize_json_search_response(self, json):
result = {}
if in json:
result[] = json[u]
if in json:
result[] = json[u]
if in json:
result[] = json[u]
if u in json:
result[] = json[u][u]
result[] = float(json[u][u])
docs = []
for doc in json[u][u]:
resdoc = {}
if u in doc:
resdoc = doc
else:
resdoc[u] = doc[u]
if u in doc:
for k, v in six.iteritems(doc[u]):
resdoc[k] = v
docs.append(resdoc)
result[] = docs
return result |
async def make_default_options_response(self) -> Response:
methods = _request_ctx_stack.top.url_adapter.allowed_methods()
return self.response_class(, headers={: .join(methods)}) | This is the default route function for OPTIONS requests. | ### Input:
This is the default route function for OPTIONS requests.
### Response:
async def make_default_options_response(self) -> Response:
methods = _request_ctx_stack.top.url_adapter.allowed_methods()
return self.response_class(, headers={: .join(methods)}) |
def decrypt(self, txt, key):
assert isinstance(txt, six.text_type), "txt: %s is not text type!" % repr(txt)
assert isinstance(key, six.text_type), "key: %s is not text type!" % repr(key)
pbkdf2_hash, crypted = txt.rsplit("$",1)
try:
crypted = binascii.unhexlify(crypted)
except (binascii.Error, TypeError) as err:
raise SecureJSLoginError("unhexlify error: %s with data: %s" % (err, crypted))
if len(crypted) != len(key):
raise SecureJSLoginError("encrypt error: %s and must have the same length!" % (crypted, key))
key=force_bytes(key)
decrypted = self.xor(crypted, key)
try:
decrypted = force_text(decrypted)
except UnicodeDecodeError:
raise SecureJSLoginError("Can't decode data.")
test = PBKDF2SHA1Hasher1().verify(decrypted, pbkdf2_hash)
if not test:
raise SecureJSLoginError("XOR decrypted data: PBKDF2 hash test failed")
return decrypted | 1. Decrypt a XOR crypted String.
2. Compare the inserted SHA salt-hash checksum. | ### Input:
1. Decrypt a XOR crypted String.
2. Compare the inserted SHA salt-hash checksum.
### Response:
def decrypt(self, txt, key):
assert isinstance(txt, six.text_type), "txt: %s is not text type!" % repr(txt)
assert isinstance(key, six.text_type), "key: %s is not text type!" % repr(key)
pbkdf2_hash, crypted = txt.rsplit("$",1)
try:
crypted = binascii.unhexlify(crypted)
except (binascii.Error, TypeError) as err:
raise SecureJSLoginError("unhexlify error: %s with data: %s" % (err, crypted))
if len(crypted) != len(key):
raise SecureJSLoginError("encrypt error: %s and must have the same length!" % (crypted, key))
key=force_bytes(key)
decrypted = self.xor(crypted, key)
try:
decrypted = force_text(decrypted)
except UnicodeDecodeError:
raise SecureJSLoginError("Can't decode data.")
test = PBKDF2SHA1Hasher1().verify(decrypted, pbkdf2_hash)
if not test:
raise SecureJSLoginError("XOR decrypted data: PBKDF2 hash test failed")
return decrypted |
def _handle_result(self, test, status, exception=None, message=None):
if self.buffer:
stderr = self._stderr_buffer.getvalue()
stdout = self._stdout_buffer.getvalue()
else:
stderr = stdout = None
started_time = self._test_timing.get(self._testcase_to_key(test))
if started_time is None and isinstance(test, ErrorHolder):
started_time = datetime.utcnow()
elif started_time is None:
raise RuntimeError(
)
completion_time = datetime.utcnow()
duration = TestDuration(started_time, completion_time)
result = TestResult.from_test_case(
test,
status,
duration=duration,
exception=exception,
message=message,
stdout=stdout,
stderr=stderr,
)
self.add_result(result)
return result | Create a :class:`~.TestResult` and add it to this
:class:`~ResultCollector`.
Parameters
----------
test : unittest.TestCase
The test that this result will represent.
status : haas.result.TestCompletionStatus
The status of the test.
exception : tuple
``exc_info`` tuple ``(type, value, traceback)``.
message : str
Optional message associated with the result (e.g. skip
reason). | ### Input:
Create a :class:`~.TestResult` and add it to this
:class:`~ResultCollector`.
Parameters
----------
test : unittest.TestCase
The test that this result will represent.
status : haas.result.TestCompletionStatus
The status of the test.
exception : tuple
``exc_info`` tuple ``(type, value, traceback)``.
message : str
Optional message associated with the result (e.g. skip
reason).
### Response:
def _handle_result(self, test, status, exception=None, message=None):
if self.buffer:
stderr = self._stderr_buffer.getvalue()
stdout = self._stdout_buffer.getvalue()
else:
stderr = stdout = None
started_time = self._test_timing.get(self._testcase_to_key(test))
if started_time is None and isinstance(test, ErrorHolder):
started_time = datetime.utcnow()
elif started_time is None:
raise RuntimeError(
)
completion_time = datetime.utcnow()
duration = TestDuration(started_time, completion_time)
result = TestResult.from_test_case(
test,
status,
duration=duration,
exception=exception,
message=message,
stdout=stdout,
stderr=stderr,
)
self.add_result(result)
return result |
def _phase_kuramoto(self, teta, t, argv):
index = argv;
phase = 0;
for k in range(0, self._num_osc):
if (self.has_connection(index, k) == True):
phase += self._negative_weight * math.sin(self._phases[k] - teta);
else:
phase += self._positive_weight * math.sin(self._phases[k] - teta);
return ( phase / self._reduction ); | !
@brief Returns result of phase calculation for oscillator in the network.
@param[in] teta (double): Value of phase of the oscillator with index argv in the network.
@param[in] t (double): Unused, can be ignored.
@param[in] argv (uint): Index of the oscillator in the network.
@return (double) New value of phase for oscillator with index argv. | ### Input:
!
@brief Returns result of phase calculation for oscillator in the network.
@param[in] teta (double): Value of phase of the oscillator with index argv in the network.
@param[in] t (double): Unused, can be ignored.
@param[in] argv (uint): Index of the oscillator in the network.
@return (double) New value of phase for oscillator with index argv.
### Response:
def _phase_kuramoto(self, teta, t, argv):
index = argv;
phase = 0;
for k in range(0, self._num_osc):
if (self.has_connection(index, k) == True):
phase += self._negative_weight * math.sin(self._phases[k] - teta);
else:
phase += self._positive_weight * math.sin(self._phases[k] - teta);
return ( phase / self._reduction ); |
def is_correct(self):
state = True
cls = self.__class__
if not self.notificationways:
for prop in self.special_properties:
if not hasattr(self, prop):
msg = "[contact::%s] %s property is missing" % (self.get_name(), prop)
self.add_error(msg)
state = False
if not hasattr(self, ):
if hasattr(self, ):
self.contact_name = self.alias
for char in cls.illegal_object_name_chars:
if char not in self.contact_name:
continue
msg = "[contact::%s] %s character not allowed in contact_name" \
% (self.get_name(), char)
self.add_error(msg)
state = False
return super(Contact, self).is_correct() and state | Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool | ### Input:
Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
### Response:
def is_correct(self):
state = True
cls = self.__class__
if not self.notificationways:
for prop in self.special_properties:
if not hasattr(self, prop):
msg = "[contact::%s] %s property is missing" % (self.get_name(), prop)
self.add_error(msg)
state = False
if not hasattr(self, ):
if hasattr(self, ):
self.contact_name = self.alias
for char in cls.illegal_object_name_chars:
if char not in self.contact_name:
continue
msg = "[contact::%s] %s character not allowed in contact_name" \
% (self.get_name(), char)
self.add_error(msg)
state = False
return super(Contact, self).is_correct() and state |
def get_instance(self, payload):
return AssignedAddOnExtensionInstance(
self._version,
payload,
account_sid=self._solution[],
resource_sid=self._solution[],
assigned_add_on_sid=self._solution[],
) | Build an instance of AssignedAddOnExtensionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance | ### Input:
Build an instance of AssignedAddOnExtensionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionInstance
### Response:
def get_instance(self, payload):
return AssignedAddOnExtensionInstance(
self._version,
payload,
account_sid=self._solution[],
resource_sid=self._solution[],
assigned_add_on_sid=self._solution[],
) |
async def data(
self, message: Union[str, bytes], timeout: DefaultNumType = _default
) -> SMTPResponse:
await self._ehlo_or_helo_if_needed()
self._raise_error_if_disconnected()
if timeout is _default:
timeout = self.timeout
if isinstance(message, str):
message = message.encode("ascii")
async with self._command_lock:
start_response = await self.execute_command(b"DATA", timeout=timeout)
if start_response.code != SMTPStatus.start_input:
raise SMTPDataError(start_response.code, start_response.message)
try:
await self.protocol.write_message_data(
message, timeout=timeout
)
response = await self.protocol.read_response(
timeout=timeout
)
except SMTPServerDisconnected as exc:
self.close()
raise exc
if response.code != SMTPStatus.completed:
raise SMTPDataError(response.code, response.message)
return response | Send an SMTP DATA command, followed by the message given.
This method transfers the actual email content to the server.
:raises SMTPDataError: on unexpected server response code
:raises SMTPServerDisconnected: connection lost | ### Input:
Send an SMTP DATA command, followed by the message given.
This method transfers the actual email content to the server.
:raises SMTPDataError: on unexpected server response code
:raises SMTPServerDisconnected: connection lost
### Response:
async def data(
self, message: Union[str, bytes], timeout: DefaultNumType = _default
) -> SMTPResponse:
await self._ehlo_or_helo_if_needed()
self._raise_error_if_disconnected()
if timeout is _default:
timeout = self.timeout
if isinstance(message, str):
message = message.encode("ascii")
async with self._command_lock:
start_response = await self.execute_command(b"DATA", timeout=timeout)
if start_response.code != SMTPStatus.start_input:
raise SMTPDataError(start_response.code, start_response.message)
try:
await self.protocol.write_message_data(
message, timeout=timeout
)
response = await self.protocol.read_response(
timeout=timeout
)
except SMTPServerDisconnected as exc:
self.close()
raise exc
if response.code != SMTPStatus.completed:
raise SMTPDataError(response.code, response.message)
return response |
def addMultiTraitTerm(self,K=None,covar_type=,is_noise=False,normalize=True,Ks=None,offset=1e-4,rank=1,covar_K0=None):
assert self.P > 1,
assert K!=None or is_noise,
assert offset>=0,
if is_noise:
assert self.noisPos==None,
K = SP.eye(self.N)
self.noisPos = self.n_terms
else:
assert K.shape[0]==self.N,
assert K.shape[1]==self.N,
if Ks!=None:
assert Ks.shape[0]==self.N,
if normalize:
Norm = 1/K.diagonal().mean()
K *= Norm
if Ks!=None: Ks *= Norm
cov = limix.CSumCF()
if covar_type==:
cov.addCovariance(limix.CFreeFormCF(self.P))
L = SP.eye(self.P)
diag = SP.concatenate([L[i,:(i+1)] for i in range(self.P)])
elif covar_type==:
cov.addCovariance(limix.CFixedCF(covar_K0))
diag = SP.zeros(1)
elif covar_type==:
cov.addCovariance(limix.CDiagonalCF(self.P))
diag = SP.ones(self.P)
elif covar_type==:
cov.addCovariance(limix.CLowRankCF(self.P,rank))
diag = SP.zeros(self.P*rank)
elif covar_type==:
cov.addCovariance(limix.CLowRankCF(self.P,rank))
cov.addCovariance(limix.CFixedCF(SP.eye(self.P)))
diag = SP.concatenate([SP.zeros(self.P*rank),SP.ones(1)])
elif covar_type==:
cov.addCovariance(limix.CLowRankCF(self.P,rank))
cov.addCovariance(limix.CDiagonalCF(self.P))
diag = SP.concatenate([SP.zeros(self.P*rank),SP.ones(self.P)])
elif covar_type==:
cov.addCovariance(limix.CFixedCF(SP.ones((self.P,self.P))))
diag = SP.zeros(1)
elif covar_type==:
cov.addCovariance(limix.CFixedCF(SP.ones((self.P,self.P))))
cov.addCovariance(limix.CFixedCF(SP.eye(self.P)))
diag = SP.concatenate([SP.zeros(1),SP.ones(1)])
elif covar_type==:
cov.addCovariance(limix.CFixedCF(SP.ones((self.P,self.P))))
cov.addCovariance(limix.CDiagonalCF(self.P))
diag = SP.concatenate([SP.zeros(1),SP.ones(self.P)])
else:
assert True==False,
if offset>0:
_cov = limix.CFixedCF(SP.eye(self.P))
_cov.setParams(SP.array([SP.sqrt(offset)]))
_cov.setParamMask(SP.zeros(1))
cov.addCovariance(_cov)
self.offset.append(offset)
self.covar_type.append(covar_type)
self.diag.append(diag)
self.vd.addTerm(cov,K)
if Ks!=None: self.setKstar(self.n_terms,Ks)
self.n_terms+=1
self.gp = None
self.init = False
self.fast = False
self.optimum = None
self.cache[] = None
self.cache[] = None
self.cache[] = None
self.cache[]= None | add multi trait random effects term.
The inter-trait covariance is parametrized by covar_type, where parameters are optimized.
Args:
K: Individual-individual (Intra-Trait) Covariance Matrix [N, N]
(K is normalised in the C++ code such that K.trace()=N)
covar_type: type of covaraince to use. Default 'freeform'. possible values are
'freeform': free form optimization,
'fixed': use a fixed matrix specified in covar_K0,
'diag': optimize a diagonal matrix,
'lowrank': optimize a low rank matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_id': optimize a low rank matrix plus the weight of a constant diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_diag': optimize a low rank matrix plus a free diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'block': optimize the weight of a constant P x P block matrix of ones,
'block_id': optimize the weight of a constant P x P block matrix of ones plus the weight of a constant diagonal matrix,
'block_diag': optimize the weight of a constant P x P block matrix of ones plus a free diagonal matrix,
is_noise: Boolean indicator specifying if the matrix is homoscedastic noise (weighted identity covariance) (default False)
normalize: Boolean indicator specifying if K is normalized such that K.trace()=N.
Ks: NxNtest cross covariance for predictions
offset: diagonal contribution added to trait-to-trait covariance matrices for regularization
rank: rank of a possible lowrank component (default 1)
covar_K0: PxP matrix for the (predefined) trait-to-trait covariance matrix if fixed type is used | ### Input:
add multi trait random effects term.
The inter-trait covariance is parametrized by covar_type, where parameters are optimized.
Args:
K: Individual-individual (Intra-Trait) Covariance Matrix [N, N]
(K is normalised in the C++ code such that K.trace()=N)
covar_type: type of covaraince to use. Default 'freeform'. possible values are
'freeform': free form optimization,
'fixed': use a fixed matrix specified in covar_K0,
'diag': optimize a diagonal matrix,
'lowrank': optimize a low rank matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_id': optimize a low rank matrix plus the weight of a constant diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'lowrank_diag': optimize a low rank matrix plus a free diagonal matrix. The rank of the lowrank part is specified in the variable rank,
'block': optimize the weight of a constant P x P block matrix of ones,
'block_id': optimize the weight of a constant P x P block matrix of ones plus the weight of a constant diagonal matrix,
'block_diag': optimize the weight of a constant P x P block matrix of ones plus a free diagonal matrix,
is_noise: Boolean indicator specifying if the matrix is homoscedastic noise (weighted identity covariance) (default False)
normalize: Boolean indicator specifying if K is normalized such that K.trace()=N.
Ks: NxNtest cross covariance for predictions
offset: diagonal contribution added to trait-to-trait covariance matrices for regularization
rank: rank of a possible lowrank component (default 1)
covar_K0: PxP matrix for the (predefined) trait-to-trait covariance matrix if fixed type is used
### Response:
def addMultiTraitTerm(self,K=None,covar_type=,is_noise=False,normalize=True,Ks=None,offset=1e-4,rank=1,covar_K0=None):
assert self.P > 1,
assert K!=None or is_noise,
assert offset>=0,
if is_noise:
assert self.noisPos==None,
K = SP.eye(self.N)
self.noisPos = self.n_terms
else:
assert K.shape[0]==self.N,
assert K.shape[1]==self.N,
if Ks!=None:
assert Ks.shape[0]==self.N,
if normalize:
Norm = 1/K.diagonal().mean()
K *= Norm
if Ks!=None: Ks *= Norm
cov = limix.CSumCF()
if covar_type==:
cov.addCovariance(limix.CFreeFormCF(self.P))
L = SP.eye(self.P)
diag = SP.concatenate([L[i,:(i+1)] for i in range(self.P)])
elif covar_type==:
cov.addCovariance(limix.CFixedCF(covar_K0))
diag = SP.zeros(1)
elif covar_type==:
cov.addCovariance(limix.CDiagonalCF(self.P))
diag = SP.ones(self.P)
elif covar_type==:
cov.addCovariance(limix.CLowRankCF(self.P,rank))
diag = SP.zeros(self.P*rank)
elif covar_type==:
cov.addCovariance(limix.CLowRankCF(self.P,rank))
cov.addCovariance(limix.CFixedCF(SP.eye(self.P)))
diag = SP.concatenate([SP.zeros(self.P*rank),SP.ones(1)])
elif covar_type==:
cov.addCovariance(limix.CLowRankCF(self.P,rank))
cov.addCovariance(limix.CDiagonalCF(self.P))
diag = SP.concatenate([SP.zeros(self.P*rank),SP.ones(self.P)])
elif covar_type==:
cov.addCovariance(limix.CFixedCF(SP.ones((self.P,self.P))))
diag = SP.zeros(1)
elif covar_type==:
cov.addCovariance(limix.CFixedCF(SP.ones((self.P,self.P))))
cov.addCovariance(limix.CFixedCF(SP.eye(self.P)))
diag = SP.concatenate([SP.zeros(1),SP.ones(1)])
elif covar_type==:
cov.addCovariance(limix.CFixedCF(SP.ones((self.P,self.P))))
cov.addCovariance(limix.CDiagonalCF(self.P))
diag = SP.concatenate([SP.zeros(1),SP.ones(self.P)])
else:
assert True==False,
if offset>0:
_cov = limix.CFixedCF(SP.eye(self.P))
_cov.setParams(SP.array([SP.sqrt(offset)]))
_cov.setParamMask(SP.zeros(1))
cov.addCovariance(_cov)
self.offset.append(offset)
self.covar_type.append(covar_type)
self.diag.append(diag)
self.vd.addTerm(cov,K)
if Ks!=None: self.setKstar(self.n_terms,Ks)
self.n_terms+=1
self.gp = None
self.init = False
self.fast = False
self.optimum = None
self.cache[] = None
self.cache[] = None
self.cache[] = None
self.cache[]= None |
def list(payment):
if isinstance(payment, resources.Payment):
payment = payment.id
http_client = HttpClient()
response, _ = http_client.get(routes.url(routes.REFUND_RESOURCE, payment_id=payment))
return resources.APIResourceCollection(resources.Refund, **response) | List all the refunds for a payment.
:param payment: The payment object or the payment id
:type payment: resources.Payment|string
:return: A collection of refunds
:rtype resources.APIResourceCollection | ### Input:
List all the refunds for a payment.
:param payment: The payment object or the payment id
:type payment: resources.Payment|string
:return: A collection of refunds
:rtype resources.APIResourceCollection
### Response:
def list(payment):
if isinstance(payment, resources.Payment):
payment = payment.id
http_client = HttpClient()
response, _ = http_client.get(routes.url(routes.REFUND_RESOURCE, payment_id=payment))
return resources.APIResourceCollection(resources.Refund, **response) |
def __send_buffer(self):
bytes_written = self.serial.write(self.__out_buffer.raw)
if self.DEBUG_MODE:
print("Wrote: ".format(binascii.hexlify(self.__out_buffer.raw)))
if bytes_written != len(self.__out_buffer):
raise IOError("{} bytes written for output buffer of size {}".format(bytes_written,
len(self.__out_buffer)))
return bytes_written | Sends the contents of self.__out_buffer to serial device
:return: Number of bytes written | ### Input:
Sends the contents of self.__out_buffer to serial device
:return: Number of bytes written
### Response:
def __send_buffer(self):
bytes_written = self.serial.write(self.__out_buffer.raw)
if self.DEBUG_MODE:
print("Wrote: ".format(binascii.hexlify(self.__out_buffer.raw)))
if bytes_written != len(self.__out_buffer):
raise IOError("{} bytes written for output buffer of size {}".format(bytes_written,
len(self.__out_buffer)))
return bytes_written |
def add_logger(self, name, address, conn_type, log_dir_path=None, **kwargs):
s name.
address:
A tuple containing address data for the capturer. Check the
:class:`SocketStreamCapturer` documentation for what is
required.
conn_type:
A string defining the connection type. Check the
:class:`SocketStreamCapturer` documentation for a list of valid
options.
log_dir_path:
An optional path defining the directory where the
capturer should write its files. If this isn
capture_handler_conf = kwargs
if not log_dir_path:
log_dir_path = self._mngr_conf[]
log_dir_path = os.path.normpath(os.path.expanduser(log_dir_path))
capture_handler_conf[] = log_dir_path
capture_handler_conf[] = name
if not in capture_handler_conf:
capture_handler_conf[] = True
transforms = []
if in capture_handler_conf:
for transform in capture_handler_conf[]:
if isinstance(transform, str):
if globals().has_key(transform):
transforms.append(globals().get(transform))
else:
msg = (
).format(
transform,
capture_handler_conf[]
)
log.warn(msg)
elif hasattr(transform, ):
transforms.append(transform)
else:
msg = (
).format(transform)
log.warn(msg)
capture_handler_conf[] = transforms
address_key = str(address)
if address_key in self._stream_capturers:
capturer = self._stream_capturers[address_key][0]
capturer.add_handler(capture_handler_conf)
return
socket_logger = SocketStreamCapturer(capture_handler_conf,
address,
conn_type)
greenlet = gevent.spawn(socket_logger.socket_monitor_loop)
self._stream_capturers[address_key] = (
socket_logger,
greenlet
)
self._pool.add(greenlet) | Add a new stream capturer to the manager.
Add a new stream capturer to the manager with the provided configuration
details. If an existing capturer is monitoring the same address the
new handler will be added to it.
Args:
name:
A string defining the new capturer's name.
address:
A tuple containing address data for the capturer. Check the
:class:`SocketStreamCapturer` documentation for what is
required.
conn_type:
A string defining the connection type. Check the
:class:`SocketStreamCapturer` documentation for a list of valid
options.
log_dir_path:
An optional path defining the directory where the
capturer should write its files. If this isn't provided the root
log directory from the manager configuration is used. | ### Input:
Add a new stream capturer to the manager.
Add a new stream capturer to the manager with the provided configuration
details. If an existing capturer is monitoring the same address the
new handler will be added to it.
Args:
name:
A string defining the new capturer's name.
address:
A tuple containing address data for the capturer. Check the
:class:`SocketStreamCapturer` documentation for what is
required.
conn_type:
A string defining the connection type. Check the
:class:`SocketStreamCapturer` documentation for a list of valid
options.
log_dir_path:
An optional path defining the directory where the
capturer should write its files. If this isn't provided the root
log directory from the manager configuration is used.
### Response:
def add_logger(self, name, address, conn_type, log_dir_path=None, **kwargs):
s name.
address:
A tuple containing address data for the capturer. Check the
:class:`SocketStreamCapturer` documentation for what is
required.
conn_type:
A string defining the connection type. Check the
:class:`SocketStreamCapturer` documentation for a list of valid
options.
log_dir_path:
An optional path defining the directory where the
capturer should write its files. If this isn
capture_handler_conf = kwargs
if not log_dir_path:
log_dir_path = self._mngr_conf[]
log_dir_path = os.path.normpath(os.path.expanduser(log_dir_path))
capture_handler_conf[] = log_dir_path
capture_handler_conf[] = name
if not in capture_handler_conf:
capture_handler_conf[] = True
transforms = []
if in capture_handler_conf:
for transform in capture_handler_conf[]:
if isinstance(transform, str):
if globals().has_key(transform):
transforms.append(globals().get(transform))
else:
msg = (
).format(
transform,
capture_handler_conf[]
)
log.warn(msg)
elif hasattr(transform, ):
transforms.append(transform)
else:
msg = (
).format(transform)
log.warn(msg)
capture_handler_conf[] = transforms
address_key = str(address)
if address_key in self._stream_capturers:
capturer = self._stream_capturers[address_key][0]
capturer.add_handler(capture_handler_conf)
return
socket_logger = SocketStreamCapturer(capture_handler_conf,
address,
conn_type)
greenlet = gevent.spawn(socket_logger.socket_monitor_loop)
self._stream_capturers[address_key] = (
socket_logger,
greenlet
)
self._pool.add(greenlet) |
def get_publisher_name(self, **kwargs):
children = kwargs.get(, [])
for child in children:
if child.tag == :
return child.content
return None | Get the publisher name. | ### Input:
Get the publisher name.
### Response:
def get_publisher_name(self, **kwargs):
children = kwargs.get(, [])
for child in children:
if child.tag == :
return child.content
return None |
def _nested_to_proto(nested_value, nested_proto, process_leafs,
already_processed):
if not isinstance(nested_proto, module_pb2.NestedData):
raise base_errors.ModuleInfoError("Expected module_pb2.NestedData.")
if id(nested_value) in already_processed:
nested_proto.value = ""
return
for type_name, type_info in six.iteritems(_TO_PROTO_SPECIAL_TYPES):
if type_info.check(nested_value):
nested_proto.special_type.name = type_name
type_info.to_proto(
nested_value, nested_proto.special_type.object,
process_leafs, already_processed)
return
if _is_iterable(nested_value):
already_processed.add(id(nested_value))
if isinstance(nested_value, dict):
nested_proto.dict.SetInParent()
for key, child in six.iteritems(nested_value):
str_key = str(key)
child_proto = nested_proto.dict.map[str_key]
_nested_to_proto(child, child_proto, process_leafs, already_processed)
elif isinstance(nested_value, tuple):
if _is_namedtuple(nested_value):
nested_proto.named_tuple.name = type(nested_value).__name__
for str_key in nested_value._fields:
child = getattr(nested_value, str_key)
child_proto = nested_proto.named_tuple.map[str_key]
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.tuple.SetInParent()
for child in nested_value:
child_proto = nested_proto.tuple.list.add()
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.list.SetInParent()
for child in nested_value:
child_proto = nested_proto.list.list.add()
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.value = process_leafs(nested_value) | Serializes `nested_value` into `nested_proto`.
Args:
nested_value: A nested Python value.
nested_proto: A `module_pb2.NestedData` instance to be filled from the value
in `nested_value`.
process_leafs: A function to be applied to the leaf values of the nested
structure.
already_processed: Set of already processed objects (used to avoid
infinite recursion).
Raises:
ModuleInfoError: If `nested_proto` is not an instance of
`module_pb2.NestedData`. | ### Input:
Serializes `nested_value` into `nested_proto`.
Args:
nested_value: A nested Python value.
nested_proto: A `module_pb2.NestedData` instance to be filled from the value
in `nested_value`.
process_leafs: A function to be applied to the leaf values of the nested
structure.
already_processed: Set of already processed objects (used to avoid
infinite recursion).
Raises:
ModuleInfoError: If `nested_proto` is not an instance of
`module_pb2.NestedData`.
### Response:
def _nested_to_proto(nested_value, nested_proto, process_leafs,
already_processed):
if not isinstance(nested_proto, module_pb2.NestedData):
raise base_errors.ModuleInfoError("Expected module_pb2.NestedData.")
if id(nested_value) in already_processed:
nested_proto.value = ""
return
for type_name, type_info in six.iteritems(_TO_PROTO_SPECIAL_TYPES):
if type_info.check(nested_value):
nested_proto.special_type.name = type_name
type_info.to_proto(
nested_value, nested_proto.special_type.object,
process_leafs, already_processed)
return
if _is_iterable(nested_value):
already_processed.add(id(nested_value))
if isinstance(nested_value, dict):
nested_proto.dict.SetInParent()
for key, child in six.iteritems(nested_value):
str_key = str(key)
child_proto = nested_proto.dict.map[str_key]
_nested_to_proto(child, child_proto, process_leafs, already_processed)
elif isinstance(nested_value, tuple):
if _is_namedtuple(nested_value):
nested_proto.named_tuple.name = type(nested_value).__name__
for str_key in nested_value._fields:
child = getattr(nested_value, str_key)
child_proto = nested_proto.named_tuple.map[str_key]
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.tuple.SetInParent()
for child in nested_value:
child_proto = nested_proto.tuple.list.add()
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.list.SetInParent()
for child in nested_value:
child_proto = nested_proto.list.list.add()
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.value = process_leafs(nested_value) |
def main():
args = CLI.parse_args(__doc__)
if args[]:
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
if not args[]:
print("No API key given. Please create an API key on <https://octopart.com/api/dashboard>")
return ReturnValues.NO_APIKEY
if args[] == :
engine = PyPartsOctopart(args[], verbose=args[])
elif args[] == :
engine = PyPartsPartsIO(args[], verbose=args[])
else:
engine = PyPartsBase(args[], verbose=args[])
try:
if in args or in args:
return engine.part_search(args[])
elif in args:
return engine.part_specs(args[])
elif in args:
if args[] == :
if args[]:
return engine.part_datasheet(args[], command=args[], path=args[])
else:
return engine.part_datasheet(args[], command=args[])
elif args[] == :
return engine.part_datasheet(args[], path=args[])
elif in args:
return engine.part_show(args[], printout=args[])
except OctopartException as err:
print(err)
return ReturnValues.RUNTIME_ERROR | entry point of the application.
Parses the CLI commands and runs the actions. | ### Input:
entry point of the application.
Parses the CLI commands and runs the actions.
### Response:
def main():
args = CLI.parse_args(__doc__)
if args[]:
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
if not args[]:
print("No API key given. Please create an API key on <https://octopart.com/api/dashboard>")
return ReturnValues.NO_APIKEY
if args[] == :
engine = PyPartsOctopart(args[], verbose=args[])
elif args[] == :
engine = PyPartsPartsIO(args[], verbose=args[])
else:
engine = PyPartsBase(args[], verbose=args[])
try:
if in args or in args:
return engine.part_search(args[])
elif in args:
return engine.part_specs(args[])
elif in args:
if args[] == :
if args[]:
return engine.part_datasheet(args[], command=args[], path=args[])
else:
return engine.part_datasheet(args[], command=args[])
elif args[] == :
return engine.part_datasheet(args[], path=args[])
elif in args:
return engine.part_show(args[], printout=args[])
except OctopartException as err:
print(err)
return ReturnValues.RUNTIME_ERROR |
def cube(width, height, depth, center=(0.0, 0.0, 0.0), normals=True, uvs=True) -> VAO:
width, height, depth = width / 2.0, height / 2.0, depth / 2.0
pos = numpy.array([
center[0] + width, center[1] - height, center[2] + depth,
center[0] + width, center[1] + height, center[2] + depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] + width, center[1] + height, center[2] + depth,
center[0] - width, center[1] + height, center[2] + depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] + width, center[1] - height, center[2] - depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] + width, center[1] - height, center[2] + depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] + width, center[1] + height, center[2] + depth,
center[0] + width, center[1] - height, center[2] + depth,
center[0] + width, center[1] - height, center[2] - depth,
center[0] + width, center[1] - height, center[2] + depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] + width, center[1] - height, center[2] - depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] - width, center[1] - height, center[2] - depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] - width, center[1] + height, center[2] + depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] - width, center[1] - height, center[2] - depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] + width, center[1] - height, center[2] - depth,
center[0] - width, center[1] - height, center[2] - depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] - width, center[1] - height, center[2] - depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] + width, center[1] + height, center[2] + depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] - width, center[1] + height, center[2] + depth,
center[0] + width, center[1] + height, center[2] + depth,
], dtype=numpy.float32)
if normals:
normal_data = numpy.array([
-0, 0, 1,
-0, 0, 1,
-0, 0, 1,
0, 0, 1,
0, 0, 1,
0, 0, 1,
1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
-1, -0, 0,
-1, -0, 0,
-1, -0, 0,
-1, -0, 0,
-1, -0, 0,
-1, -0, 0,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
], dtype=numpy.float32)
if uvs:
uvs_data = numpy.array([
1, 0,
1, 1,
0, 0,
1, 1,
0, 1,
0, 0,
1, 0,
1, 1,
0, 0,
1, 1,
0, 1,
0, 0,
1, 1,
0, 1,
0, 0,
1, 1,
0, 0,
1, 0,
0, 1,
0, 0,
1, 0,
0, 1,
1, 0,
1, 1,
1, 0,
1, 1,
0, 1,
1, 0,
0, 1,
0, 0,
1, 1,
0, 1,
1, 0,
0, 1,
0, 0,
1, 0
], dtype=numpy.float32)
vao = VAO("geometry:cube")
vao.buffer(pos, , [])
if normals:
vao.buffer(normal_data, , [])
if uvs:
vao.buffer(uvs_data, , [])
return vao | Creates a cube VAO with normals and texture coordinates
Args:
width (float): Width of the cube
height (float): Height of the cube
depth (float): Depth of the cube
Keyword Args:
center: center of the cube as a 3-component tuple
normals: (bool) Include normals
uvs: (bool) include uv coordinates
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance | ### Input:
Creates a cube VAO with normals and texture coordinates
Args:
width (float): Width of the cube
height (float): Height of the cube
depth (float): Depth of the cube
Keyword Args:
center: center of the cube as a 3-component tuple
normals: (bool) Include normals
uvs: (bool) include uv coordinates
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance
### Response:
def cube(width, height, depth, center=(0.0, 0.0, 0.0), normals=True, uvs=True) -> VAO:
width, height, depth = width / 2.0, height / 2.0, depth / 2.0
pos = numpy.array([
center[0] + width, center[1] - height, center[2] + depth,
center[0] + width, center[1] + height, center[2] + depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] + width, center[1] + height, center[2] + depth,
center[0] - width, center[1] + height, center[2] + depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] + width, center[1] - height, center[2] - depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] + width, center[1] - height, center[2] + depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] + width, center[1] + height, center[2] + depth,
center[0] + width, center[1] - height, center[2] + depth,
center[0] + width, center[1] - height, center[2] - depth,
center[0] + width, center[1] - height, center[2] + depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] + width, center[1] - height, center[2] - depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] - width, center[1] - height, center[2] - depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] - width, center[1] + height, center[2] + depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] - width, center[1] - height, center[2] - depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] + width, center[1] - height, center[2] - depth,
center[0] - width, center[1] - height, center[2] - depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] - width, center[1] - height, center[2] - depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] + width, center[1] + height, center[2] + depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] - width, center[1] + height, center[2] + depth,
center[0] + width, center[1] + height, center[2] + depth,
], dtype=numpy.float32)
if normals:
normal_data = numpy.array([
-0, 0, 1,
-0, 0, 1,
-0, 0, 1,
0, 0, 1,
0, 0, 1,
0, 0, 1,
1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
-1, -0, 0,
-1, -0, 0,
-1, -0, 0,
-1, -0, 0,
-1, -0, 0,
-1, -0, 0,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
], dtype=numpy.float32)
if uvs:
uvs_data = numpy.array([
1, 0,
1, 1,
0, 0,
1, 1,
0, 1,
0, 0,
1, 0,
1, 1,
0, 0,
1, 1,
0, 1,
0, 0,
1, 1,
0, 1,
0, 0,
1, 1,
0, 0,
1, 0,
0, 1,
0, 0,
1, 0,
0, 1,
1, 0,
1, 1,
1, 0,
1, 1,
0, 1,
1, 0,
0, 1,
0, 0,
1, 1,
0, 1,
1, 0,
0, 1,
0, 0,
1, 0
], dtype=numpy.float32)
vao = VAO("geometry:cube")
vao.buffer(pos, , [])
if normals:
vao.buffer(normal_data, , [])
if uvs:
vao.buffer(uvs_data, , [])
return vao |
def create_new_update_job_from_an_existing_job(user, job_id):
values = {
: utils.gen_uuid(),
: datetime.datetime.utcnow().isoformat(),
: datetime.datetime.utcnow().isoformat(),
: utils.gen_etag(),
:
}
original_job_id = job_id
original_job = v1_utils.verify_existence_and_get(original_job_id,
models.JOBS)
if not user.is_in_team(original_job[]):
raise dci_exc.Unauthorized()
remoteci_id = str(original_job[])
remoteci = v1_utils.verify_existence_and_get(remoteci_id,
models.REMOTECIS)
values.update({: remoteci_id})
topic_id = str(original_job[])
v1_utils.verify_existence_and_get(topic_id, models.TOPICS)
values.update({
: flask.request.environ.get(),
: flask.request.environ.get(
),
})
values = _build_job(topic_id, remoteci, [], values,
update_previous_job_id=original_job_id)
return flask.Response(json.dumps({: values}), 201,
headers={: values[]},
content_type=) | Create a new job in the same topic as the job_id provided and
associate the latest components of this topic. | ### Input:
Create a new job in the same topic as the job_id provided and
associate the latest components of this topic.
### Response:
def create_new_update_job_from_an_existing_job(user, job_id):
values = {
: utils.gen_uuid(),
: datetime.datetime.utcnow().isoformat(),
: datetime.datetime.utcnow().isoformat(),
: utils.gen_etag(),
:
}
original_job_id = job_id
original_job = v1_utils.verify_existence_and_get(original_job_id,
models.JOBS)
if not user.is_in_team(original_job[]):
raise dci_exc.Unauthorized()
remoteci_id = str(original_job[])
remoteci = v1_utils.verify_existence_and_get(remoteci_id,
models.REMOTECIS)
values.update({: remoteci_id})
topic_id = str(original_job[])
v1_utils.verify_existence_and_get(topic_id, models.TOPICS)
values.update({
: flask.request.environ.get(),
: flask.request.environ.get(
),
})
values = _build_job(topic_id, remoteci, [], values,
update_previous_job_id=original_job_id)
return flask.Response(json.dumps({: values}), 201,
headers={: values[]},
content_type=) |
def initialize(album_cache, image_cache, albums, images):
if not hasattr(album_cache, ):
album_cache = dict()
if not hasattr(image_cache, ):
image_cache = dict()
for imgur_id in albums:
album_cache.setdefault(imgur_id, Album(imgur_id))
for imgur_id in images:
image_cache.setdefault(imgur_id, Image(imgur_id))
return album_cache, image_cache | Instantiate Album or Image instances not already in cache.
:param dict album_cache: Cache of Imgur albums to update. Keys are Imgur IDs, values are Album instances.
:param dict image_cache: Cache of Imgur images to update. Keys are Imgur IDs, values are Image instances.
:param iter albums: List of album Imgur IDs.
:param iter images: List of image Imgur IDs.
:return: Same album and image cache dictionaries from parameters.
:rtype: tuple | ### Input:
Instantiate Album or Image instances not already in cache.
:param dict album_cache: Cache of Imgur albums to update. Keys are Imgur IDs, values are Album instances.
:param dict image_cache: Cache of Imgur images to update. Keys are Imgur IDs, values are Image instances.
:param iter albums: List of album Imgur IDs.
:param iter images: List of image Imgur IDs.
:return: Same album and image cache dictionaries from parameters.
:rtype: tuple
### Response:
def initialize(album_cache, image_cache, albums, images):
if not hasattr(album_cache, ):
album_cache = dict()
if not hasattr(image_cache, ):
image_cache = dict()
for imgur_id in albums:
album_cache.setdefault(imgur_id, Album(imgur_id))
for imgur_id in images:
image_cache.setdefault(imgur_id, Image(imgur_id))
return album_cache, image_cache |
def get_halfs_double(self, vertex_a1, vertex_b1, vertex_a2, vertex_b2):
if vertex_a1 not in self.neighbors[vertex_b1]:
raise GraphError("vertex_a1 must be a neighbor of vertex_b1.")
if vertex_a2 not in self.neighbors[vertex_b2]:
raise GraphError("vertex_a2 must be a neighbor of vertex_b2.")
vertex_a_new = set(self.neighbors[vertex_a1])
vertex_a_new.discard(vertex_b1)
if vertex_a1 == vertex_b2:
vertex_a2, vertex_b2 = vertex_b2, vertex_a2
if vertex_a1 == vertex_a2:
vertex_a_new.discard(vertex_b2)
vertex_a_part = set([vertex_a1])
touched = False
while len(vertex_a_new) > 0:
pivot = vertex_a_new.pop()
if pivot == vertex_b1:
raise GraphError("The graph can not be separated in two halfs. "
"vertex_b1 reached by vertex_a1.")
vertex_a_part.add(pivot)
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= vertex_a_part
if pivot == vertex_a2 or pivot == vertex_b2:
if pivot == vertex_b2:
if touched:
raise GraphError("The graph can not be separated in "
"two halfs. vertex_b2 reached by "
"vertex_a1.")
else:
vertex_a2, vertex_b2 = vertex_b2, vertex_a2
pivot_neighbors.discard(vertex_b2)
touched = True
vertex_a_new |= pivot_neighbors
if vertex_a2 not in vertex_a_part:
raise GraphError("The graph can not be separated in two halfs. "
"vertex_a1 can not reach vertex_a2 trough "
"vertex_a_part")
if vertex_b1 == vertex_b2:
closed = True
else:
vertex_b_new = set(self.neighbors[vertex_b1])
vertex_b_new.discard(vertex_a1)
vertex_b_part = set([vertex_b1])
closed = False
while len(vertex_b_new) > 0:
pivot = vertex_b_new.pop()
if pivot == vertex_b2:
closed = True
break
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= vertex_b_part
vertex_b_new |= pivot_neighbors
vertex_b_part.add(pivot)
if not closed:
raise GraphError("The graph can not be separated in two halfs. "
"vertex_b1 can not reach vertex_b2 trough "
"vertex_b_part")
vertex_b_part = set(range(self.num_vertices)) - vertex_a_part
return vertex_a_part, vertex_b_part, \
(vertex_a1, vertex_b1, vertex_a2, vertex_b2) | Compute the two parts separated by ``(vertex_a1, vertex_b1)`` and ``(vertex_a2, vertex_b2)``
Raise a GraphError when ``(vertex_a1, vertex_b1)`` and
``(vertex_a2, vertex_b2)`` do not separate the graph in two
disconnected parts. The edges must be neighbors. If not a GraphError
is raised. The for vertices must not coincide or a GraphError is
raised.
Returns the vertices of the two halfs and the four 'hinge' vertices
in the correct order, i.e. both ``vertex_a1`` and ``vertex_a2`` are
in the first half and both ``vertex_b1`` and ``vertex_b2`` are in the
second half. | ### Input:
Compute the two parts separated by ``(vertex_a1, vertex_b1)`` and ``(vertex_a2, vertex_b2)``
Raise a GraphError when ``(vertex_a1, vertex_b1)`` and
``(vertex_a2, vertex_b2)`` do not separate the graph in two
disconnected parts. The edges must be neighbors. If not a GraphError
is raised. The for vertices must not coincide or a GraphError is
raised.
Returns the vertices of the two halfs and the four 'hinge' vertices
in the correct order, i.e. both ``vertex_a1`` and ``vertex_a2`` are
in the first half and both ``vertex_b1`` and ``vertex_b2`` are in the
second half.
### Response:
def get_halfs_double(self, vertex_a1, vertex_b1, vertex_a2, vertex_b2):
if vertex_a1 not in self.neighbors[vertex_b1]:
raise GraphError("vertex_a1 must be a neighbor of vertex_b1.")
if vertex_a2 not in self.neighbors[vertex_b2]:
raise GraphError("vertex_a2 must be a neighbor of vertex_b2.")
vertex_a_new = set(self.neighbors[vertex_a1])
vertex_a_new.discard(vertex_b1)
if vertex_a1 == vertex_b2:
vertex_a2, vertex_b2 = vertex_b2, vertex_a2
if vertex_a1 == vertex_a2:
vertex_a_new.discard(vertex_b2)
vertex_a_part = set([vertex_a1])
touched = False
while len(vertex_a_new) > 0:
pivot = vertex_a_new.pop()
if pivot == vertex_b1:
raise GraphError("The graph can not be separated in two halfs. "
"vertex_b1 reached by vertex_a1.")
vertex_a_part.add(pivot)
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= vertex_a_part
if pivot == vertex_a2 or pivot == vertex_b2:
if pivot == vertex_b2:
if touched:
raise GraphError("The graph can not be separated in "
"two halfs. vertex_b2 reached by "
"vertex_a1.")
else:
vertex_a2, vertex_b2 = vertex_b2, vertex_a2
pivot_neighbors.discard(vertex_b2)
touched = True
vertex_a_new |= pivot_neighbors
if vertex_a2 not in vertex_a_part:
raise GraphError("The graph can not be separated in two halfs. "
"vertex_a1 can not reach vertex_a2 trough "
"vertex_a_part")
if vertex_b1 == vertex_b2:
closed = True
else:
vertex_b_new = set(self.neighbors[vertex_b1])
vertex_b_new.discard(vertex_a1)
vertex_b_part = set([vertex_b1])
closed = False
while len(vertex_b_new) > 0:
pivot = vertex_b_new.pop()
if pivot == vertex_b2:
closed = True
break
pivot_neighbors = set(self.neighbors[pivot])
pivot_neighbors -= vertex_b_part
vertex_b_new |= pivot_neighbors
vertex_b_part.add(pivot)
if not closed:
raise GraphError("The graph can not be separated in two halfs. "
"vertex_b1 can not reach vertex_b2 trough "
"vertex_b_part")
vertex_b_part = set(range(self.num_vertices)) - vertex_a_part
return vertex_a_part, vertex_b_part, \
(vertex_a1, vertex_b1, vertex_a2, vertex_b2) |
def get_the_node_dict(G, name):
for node in G.nodes(data=True):
if node[0] == name:
return node[1] | Helper function that returns the node data
of the node with the name supplied | ### Input:
Helper function that returns the node data
of the node with the name supplied
### Response:
def get_the_node_dict(G, name):
for node in G.nodes(data=True):
if node[0] == name:
return node[1] |
def _render_response(self, value, system):
view = system[]
enc_class = getattr(view, , None)
if enc_class is None:
enc_class = get_json_encoder()
return json.dumps(value, cls=enc_class) | Render a response | ### Input:
Render a response
### Response:
def _render_response(self, value, system):
view = system[]
enc_class = getattr(view, , None)
if enc_class is None:
enc_class = get_json_encoder()
return json.dumps(value, cls=enc_class) |
def get_field_value(self, field_key):
def get_value(document, field_key):
if document is None:
return None
current_key, new_key_array = trim_field_key(document, field_key)
key_array_digit = int(new_key_array[-1]) if new_key_array and has_digit(new_key_array) else None
new_key = make_key(new_key_array)
if key_array_digit is not None and len(new_key_array) > 0:
if len(new_key_array) == 1:
return_data = document._data.get(current_key, [])
elif isinstance(document, BaseList):
return_list = []
if len(document) > 0:
return_list = [get_value(doc, new_key) for doc in document]
return_data = return_list
else:
return_data = get_value(getattr(document, current_key), new_key)
elif len(new_key_array) > 0:
return_data = get_value(document._data.get(current_key), new_key)
else:
try:
return_data = (document._data.get(None, None) if current_key == "id" else
document._data.get(current_key, None))
except:
return_data = document._data.get(current_key, None)
return return_data
if self.is_initialized:
return get_value(self.model_instance, field_key)
else:
return None | Given field_key will return value held at self.model_instance. If
model_instance has not been provided will return None. | ### Input:
Given field_key will return value held at self.model_instance. If
model_instance has not been provided will return None.
### Response:
def get_field_value(self, field_key):
def get_value(document, field_key):
if document is None:
return None
current_key, new_key_array = trim_field_key(document, field_key)
key_array_digit = int(new_key_array[-1]) if new_key_array and has_digit(new_key_array) else None
new_key = make_key(new_key_array)
if key_array_digit is not None and len(new_key_array) > 0:
if len(new_key_array) == 1:
return_data = document._data.get(current_key, [])
elif isinstance(document, BaseList):
return_list = []
if len(document) > 0:
return_list = [get_value(doc, new_key) for doc in document]
return_data = return_list
else:
return_data = get_value(getattr(document, current_key), new_key)
elif len(new_key_array) > 0:
return_data = get_value(document._data.get(current_key), new_key)
else:
try:
return_data = (document._data.get(None, None) if current_key == "id" else
document._data.get(current_key, None))
except:
return_data = document._data.get(current_key, None)
return return_data
if self.is_initialized:
return get_value(self.model_instance, field_key)
else:
return None |
def compile_sass(self, sass_filename, sass_fileurl):
compile_kwargs = {
: sass_filename,
: SassProcessor.include_paths + APPS_INCLUDE_DIRS,
: get_custom_functions(),
}
if self.sass_precision:
compile_kwargs[] = self.sass_precision
if self.sass_output_style:
compile_kwargs[] = self.sass_output_style
content = sass.compile(**compile_kwargs)
self.save_to_destination(content, sass_filename, sass_fileurl)
self.processed_files.append(sass_filename)
if self.verbosity > 1:
self.stdout.write("Compiled SASS/SCSS file: \n".format(sass_filename)) | Compile the given SASS file into CSS | ### Input:
Compile the given SASS file into CSS
### Response:
def compile_sass(self, sass_filename, sass_fileurl):
compile_kwargs = {
: sass_filename,
: SassProcessor.include_paths + APPS_INCLUDE_DIRS,
: get_custom_functions(),
}
if self.sass_precision:
compile_kwargs[] = self.sass_precision
if self.sass_output_style:
compile_kwargs[] = self.sass_output_style
content = sass.compile(**compile_kwargs)
self.save_to_destination(content, sass_filename, sass_fileurl)
self.processed_files.append(sass_filename)
if self.verbosity > 1:
self.stdout.write("Compiled SASS/SCSS file: \n".format(sass_filename)) |
def classify_file(f):
cols=f[1].columns
if len(cols) == 2:
return (True,False)
elif len(cols) == 3 and ( in cols.names):
return (True,True)
elif len(cols) > 2 and ( not in cols.names):
return (True,False)
else:
return (False,True) | Examine the column names to determine which type of file
this is. Return a tuple:
retvalue[0] = "file is non-parameterized"
retvalue[1] = "file contains error column" | ### Input:
Examine the column names to determine which type of file
this is. Return a tuple:
retvalue[0] = "file is non-parameterized"
retvalue[1] = "file contains error column"
### Response:
def classify_file(f):
cols=f[1].columns
if len(cols) == 2:
return (True,False)
elif len(cols) == 3 and ( in cols.names):
return (True,True)
elif len(cols) > 2 and ( not in cols.names):
return (True,False)
else:
return (False,True) |
def locked_context(self, key=None, value_type=list):
plugin_context_name = str(type(self))
with self.manticore.locked_context(plugin_context_name, dict) as context:
assert value_type in (list, dict, set)
ctx = context.get(key, value_type())
yield ctx
context[key] = ctx | A context manager that provides safe parallel access to the global Manticore context.
This should be used to access the global Manticore context
when parallel analysis is activated. Code within the `with` block is executed
atomically, so access of shared variables should occur within. | ### Input:
A context manager that provides safe parallel access to the global Manticore context.
This should be used to access the global Manticore context
when parallel analysis is activated. Code within the `with` block is executed
atomically, so access of shared variables should occur within.
### Response:
def locked_context(self, key=None, value_type=list):
plugin_context_name = str(type(self))
with self.manticore.locked_context(plugin_context_name, dict) as context:
assert value_type in (list, dict, set)
ctx = context.get(key, value_type())
yield ctx
context[key] = ctx |
def _prepare_if_args(stmt):
args = stmt[]
if args and args[0].startswith() and args[-1].endswith():
args[0] = args[0][1:].lstrip()
args[-1] = args[-1][:-1].rstrip()
start = int(not args[0])
end = len(args) - int(not args[-1])
args[:] = args[start:end] | Removes parentheses from an "if" directive's arguments | ### Input:
Removes parentheses from an "if" directive's arguments
### Response:
def _prepare_if_args(stmt):
args = stmt[]
if args and args[0].startswith() and args[-1].endswith():
args[0] = args[0][1:].lstrip()
args[-1] = args[-1][:-1].rstrip()
start = int(not args[0])
end = len(args) - int(not args[-1])
args[:] = args[start:end] |
def parse_delta(filename):
aln_length, sim_errors = 0, 0
for line in [l.strip().split() for l in open(filename, "r").readlines()]:
if line[0] == "NUCMER" or line[0].startswith(">"):
continue
if len(line) == 7:
aln_length += abs(int(line[1]) - int(line[0]))
sim_errors += int(line[4])
return aln_length, sim_errors | Returns (alignment length, similarity errors) tuple from passed .delta.
- filename - path to the input .delta file
Extracts the aligned length and number of similarity errors for each
aligned uniquely-matched region, and returns the cumulative total for
each as a tuple. | ### Input:
Returns (alignment length, similarity errors) tuple from passed .delta.
- filename - path to the input .delta file
Extracts the aligned length and number of similarity errors for each
aligned uniquely-matched region, and returns the cumulative total for
each as a tuple.
### Response:
def parse_delta(filename):
aln_length, sim_errors = 0, 0
for line in [l.strip().split() for l in open(filename, "r").readlines()]:
if line[0] == "NUCMER" or line[0].startswith(">"):
continue
if len(line) == 7:
aln_length += abs(int(line[1]) - int(line[0]))
sim_errors += int(line[4])
return aln_length, sim_errors |
def get_hexdigest(algorithm, salt, raw_password):
if isinstance(salt, unicode):
salt = salt.encode()
if algorithm == :
try:
import crypt
except ImportError:
raise ValueError()
return crypt.crypt(raw_password, salt)
try:
import hashlib
except ImportError:
if algorithm == :
import md5
return md5.new(salt + raw_password).hexdigest()
elif algorithm == :
import sha
return sha.new(salt + raw_password).hexdigest()
else:
if algorithm == :
return hashlib.md5(salt + raw_password).hexdigest()
elif algorithm == :
return hashlib.sha1(salt + raw_password).hexdigest()
raise ValueError("Got unknown password algorithm type in password.") | Returns a string of the hexdigest of the given plaintext password and salt
using the given algorithm ('md5', 'sha1' or 'crypt'). | ### Input:
Returns a string of the hexdigest of the given plaintext password and salt
using the given algorithm ('md5', 'sha1' or 'crypt').
### Response:
def get_hexdigest(algorithm, salt, raw_password):
if isinstance(salt, unicode):
salt = salt.encode()
if algorithm == :
try:
import crypt
except ImportError:
raise ValueError()
return crypt.crypt(raw_password, salt)
try:
import hashlib
except ImportError:
if algorithm == :
import md5
return md5.new(salt + raw_password).hexdigest()
elif algorithm == :
import sha
return sha.new(salt + raw_password).hexdigest()
else:
if algorithm == :
return hashlib.md5(salt + raw_password).hexdigest()
elif algorithm == :
return hashlib.sha1(salt + raw_password).hexdigest()
raise ValueError("Got unknown password algorithm type in password.") |
def remove_signal_receiver(self, signal):
if (signal in self._signal_names):
s = self._signals.get(signal)
if (s):
self._bus.remove_signal_receiver(s.signal_handler,
signal,
dbus_interface=self._dbus_addr)
self._signals.pop(signal)
else:
raise ConnSignalNameNotRecognisedException | Remove an installed signal receiver by signal name.
See also :py:meth:`add_signal_receiver`
:py:exc:`exceptions.ConnSignalNameNotRecognisedException`
:param str signal:
Signal name to uninstall e.g., :py:attr:`SIGNAL_PROPERTY_CHANGED`
:return:
:raises ConnSignalNameNotRecognisedException:
if the signal name is not registered | ### Input:
Remove an installed signal receiver by signal name.
See also :py:meth:`add_signal_receiver`
:py:exc:`exceptions.ConnSignalNameNotRecognisedException`
:param str signal:
Signal name to uninstall e.g., :py:attr:`SIGNAL_PROPERTY_CHANGED`
:return:
:raises ConnSignalNameNotRecognisedException:
if the signal name is not registered
### Response:
def remove_signal_receiver(self, signal):
if (signal in self._signal_names):
s = self._signals.get(signal)
if (s):
self._bus.remove_signal_receiver(s.signal_handler,
signal,
dbus_interface=self._dbus_addr)
self._signals.pop(signal)
else:
raise ConnSignalNameNotRecognisedException |
def get_interpretations3(self):
if "specimen" not in self.spec_data.columns or \
"meas_step_min" not in self.spec_data.columns or \
"meas_step_max" not in self.spec_data.columns or \
"meas_step_unit" not in self.spec_data.columns or \
"method_codes" not in self.spec_data.columns:
return
if "dir_comp" in self.spec_data.columns:
fnames =
elif "dir_comp_name" in self.spec_data.columns:
fnames =
else:
fnames =
self.spec_data[] =
if "result_quality" not in self.spec_data.columns:
self.spec_data["result_quality"] = "g"
if "dir_tilt_correction" not in self.spec_data.columns:
self.spec_data["dir_tilt_correction"] = ""
fdict = self.spec_data[[, fnames, , , ,
, , ]].to_dict("records")
for i in range(len(fdict)):
spec = fdict[i][]
if spec not in self.specimens:
print(("-E- specimen %s does not exist in measurement data" % (spec)))
continue
fname = fdict[i][fnames]
if fname == None or (spec in list(self.pmag_results_data[].keys()) and fname in [x.name for x in self.pmag_results_data[][spec]]):
continue
if fdict[i][] == "K":
fmin = int(float(fdict[i][])-273)
fmax = int(float(fdict[i][])-273)
if fmin == 0:
fmin = str(fmin)
else:
fmin = str(fmin)+"C"
if fmax == 0:
fmax = str(fmax)
else:
fmax = str(fmax)+"C"
elif fdict[i][] == "T":
fmin = float(fdict[i][])*1000
fmax = float(fdict[i][])*1000
if fmin == 0:
fmin = str(int(fmin))
else:
fmin = str(fmin)+"mT"
if fmax == 0:
fmax = str(int(fmax))
else:
fmax = str(fmax)+"mT"
else:
fmin = fdict[i][]
fmax = fdict[i][]
PCA_types = ["DE-BFL", "DE-BFL-A", "DE-BFL-O", "DE-FM", "DE-BFP"]
PCA_type_list = [x for x in str(fdict[i][]).split(
) if x.strip() in PCA_types]
if len(PCA_type_list) > 0:
PCA_type = PCA_type_list[0].strip()
else:
PCA_type = "DE-BFL"
fit = self.add_fit(spec, fname, fmin, fmax, PCA_type)
if fdict[i][] == :
self.bad_fits.append(fit) | Used instead of update_pmag_tables in data model 3.0 to fetch
interpretations from contribution objects | ### Input:
Used instead of update_pmag_tables in data model 3.0 to fetch
interpretations from contribution objects
### Response:
def get_interpretations3(self):
if "specimen" not in self.spec_data.columns or \
"meas_step_min" not in self.spec_data.columns or \
"meas_step_max" not in self.spec_data.columns or \
"meas_step_unit" not in self.spec_data.columns or \
"method_codes" not in self.spec_data.columns:
return
if "dir_comp" in self.spec_data.columns:
fnames =
elif "dir_comp_name" in self.spec_data.columns:
fnames =
else:
fnames =
self.spec_data[] =
if "result_quality" not in self.spec_data.columns:
self.spec_data["result_quality"] = "g"
if "dir_tilt_correction" not in self.spec_data.columns:
self.spec_data["dir_tilt_correction"] = ""
fdict = self.spec_data[[, fnames, , , ,
, , ]].to_dict("records")
for i in range(len(fdict)):
spec = fdict[i][]
if spec not in self.specimens:
print(("-E- specimen %s does not exist in measurement data" % (spec)))
continue
fname = fdict[i][fnames]
if fname == None or (spec in list(self.pmag_results_data[].keys()) and fname in [x.name for x in self.pmag_results_data[][spec]]):
continue
if fdict[i][] == "K":
fmin = int(float(fdict[i][])-273)
fmax = int(float(fdict[i][])-273)
if fmin == 0:
fmin = str(fmin)
else:
fmin = str(fmin)+"C"
if fmax == 0:
fmax = str(fmax)
else:
fmax = str(fmax)+"C"
elif fdict[i][] == "T":
fmin = float(fdict[i][])*1000
fmax = float(fdict[i][])*1000
if fmin == 0:
fmin = str(int(fmin))
else:
fmin = str(fmin)+"mT"
if fmax == 0:
fmax = str(int(fmax))
else:
fmax = str(fmax)+"mT"
else:
fmin = fdict[i][]
fmax = fdict[i][]
PCA_types = ["DE-BFL", "DE-BFL-A", "DE-BFL-O", "DE-FM", "DE-BFP"]
PCA_type_list = [x for x in str(fdict[i][]).split(
) if x.strip() in PCA_types]
if len(PCA_type_list) > 0:
PCA_type = PCA_type_list[0].strip()
else:
PCA_type = "DE-BFL"
fit = self.add_fit(spec, fname, fmin, fmax, PCA_type)
if fdict[i][] == :
self.bad_fits.append(fit) |
def get_search_schema(self, schema):
if not self.yz_wm_schema:
raise NotImplementedError("Search 2.0 administration is not "
"supported for this version")
url = self.search_schema_path(schema)
status, _, body = self._request(, url)
if status == 200:
result = {}
result[] = schema
result[] = bytes_to_str(body)
return result
else:
raise RiakError() | Fetch a Solr schema from Yokozuna.
:param schema: name of Solr schema
:type schema: string
:rtype dict | ### Input:
Fetch a Solr schema from Yokozuna.
:param schema: name of Solr schema
:type schema: string
:rtype dict
### Response:
def get_search_schema(self, schema):
if not self.yz_wm_schema:
raise NotImplementedError("Search 2.0 administration is not "
"supported for this version")
url = self.search_schema_path(schema)
status, _, body = self._request(, url)
if status == 200:
result = {}
result[] = schema
result[] = bytes_to_str(body)
return result
else:
raise RiakError() |
def parse_params(self,
nb_candidate=10,
overshoot=0.02,
max_iter=50,
clip_min=0.,
clip_max=1.,
**kwargs):
self.nb_candidate = nb_candidate
self.overshoot = overshoot
self.max_iter = max_iter
self.clip_min = clip_min
self.clip_max = clip_max
if len(kwargs.keys()) > 0:
warnings.warn("kwargs is unused and will be removed on or after "
"2019-04-26.")
return True | :param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for deepfool
:param clip_min: Minimum component value for clipping
:param clip_max: Maximum component value for clipping | ### Input:
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for deepfool
:param clip_min: Minimum component value for clipping
:param clip_max: Maximum component value for clipping
### Response:
def parse_params(self,
nb_candidate=10,
overshoot=0.02,
max_iter=50,
clip_min=0.,
clip_max=1.,
**kwargs):
self.nb_candidate = nb_candidate
self.overshoot = overshoot
self.max_iter = max_iter
self.clip_min = clip_min
self.clip_max = clip_max
if len(kwargs.keys()) > 0:
warnings.warn("kwargs is unused and will be removed on or after "
"2019-04-26.")
return True |
def update_sentry_logging(logging_dict: DictStrAny,
sentry_dsn: Optional[str],
*loggers: str,
level: Union[str, int] = None,
**kwargs: Any) -> None:
r
if not sentry_dsn:
return
kwargs[] =
kwargs[] = sentry_dsn
logging_dict[][] = dict(
level=level or ,
**kwargs)
loggers = tuple(logging_dict[]) if not loggers else loggers
for logger in loggers:
logger_dict = logging_dict[].get(logger)
if not logger_dict:
continue
if logger_dict.pop(, False):
continue
handlers = list(logger_dict.setdefault(, []))
handlers.append()
logger_dict[] = tuple(handlers) | r"""Enable Sentry logging if Sentry DSN passed.
.. note::
Sentry logging requires `raven <http://pypi.python.org/pypi/raven>`_
library to be installed.
**Usage**::
from logging.config import dictConfig
LOGGING = default_logging_dict()
SENTRY_DSN = '...'
update_sentry_logging(LOGGING, SENTRY_DSN)
dictConfig(LOGGING)
**Using AioHttpTransport for SentryHandler**
This will allow to use ``aiohttp.client`` for pushing data to Sentry in
your ``aiohttp.web`` app, which means elimination of sync calls to Sentry.
::
from raven_aiohttp import AioHttpTransport
update_sentry_logging(LOGGING, SENTRY_DSN, transport=AioHttpTransport)
:param logging_dict: Logging dict.
:param sentry_dsn:
Sentry DSN value. If ``None`` do not update logging dict at all.
:param \*loggers:
Use Sentry logging for each logger in the sequence. If the sequence is
empty use Sentry logging to each available logger.
:param \*\*kwargs: Additional kwargs to be passed to ``SentryHandler``. | ### Input:
r"""Enable Sentry logging if Sentry DSN passed.
.. note::
Sentry logging requires `raven <http://pypi.python.org/pypi/raven>`_
library to be installed.
**Usage**::
from logging.config import dictConfig
LOGGING = default_logging_dict()
SENTRY_DSN = '...'
update_sentry_logging(LOGGING, SENTRY_DSN)
dictConfig(LOGGING)
**Using AioHttpTransport for SentryHandler**
This will allow to use ``aiohttp.client`` for pushing data to Sentry in
your ``aiohttp.web`` app, which means elimination of sync calls to Sentry.
::
from raven_aiohttp import AioHttpTransport
update_sentry_logging(LOGGING, SENTRY_DSN, transport=AioHttpTransport)
:param logging_dict: Logging dict.
:param sentry_dsn:
Sentry DSN value. If ``None`` do not update logging dict at all.
:param \*loggers:
Use Sentry logging for each logger in the sequence. If the sequence is
empty use Sentry logging to each available logger.
:param \*\*kwargs: Additional kwargs to be passed to ``SentryHandler``.
### Response:
def update_sentry_logging(logging_dict: DictStrAny,
sentry_dsn: Optional[str],
*loggers: str,
level: Union[str, int] = None,
**kwargs: Any) -> None:
r
if not sentry_dsn:
return
kwargs[] =
kwargs[] = sentry_dsn
logging_dict[][] = dict(
level=level or ,
**kwargs)
loggers = tuple(logging_dict[]) if not loggers else loggers
for logger in loggers:
logger_dict = logging_dict[].get(logger)
if not logger_dict:
continue
if logger_dict.pop(, False):
continue
handlers = list(logger_dict.setdefault(, []))
handlers.append()
logger_dict[] = tuple(handlers) |
def _normalize_data_types(self, strategy):
for k, v in strategy.iteritems():
if not isinstance(v, str):
continue
if v == :
strategy[k] = True
elif v == or v is None:
strategy[k] = False
else:
try:
if v.find() > 0:
strategy[k] = float(v)
else:
strategy[k] = int(v)
except ValueError:
pass | some contexts only retrieves strings, giving back right type | ### Input:
some contexts only retrieves strings, giving back right type
### Response:
def _normalize_data_types(self, strategy):
for k, v in strategy.iteritems():
if not isinstance(v, str):
continue
if v == :
strategy[k] = True
elif v == or v is None:
strategy[k] = False
else:
try:
if v.find() > 0:
strategy[k] = float(v)
else:
strategy[k] = int(v)
except ValueError:
pass |
def get_science_segs_from_datafind_outs(datafindcaches):
newScienceSegs = {}
for cache in datafindcaches:
if len(cache) > 0:
groupSegs = segments.segmentlist(e.segment for e in cache).coalesce()
ifo = cache.ifo
if ifo not in newScienceSegs:
newScienceSegs[ifo] = groupSegs
else:
newScienceSegs[ifo].extend(groupSegs)
newScienceSegs[ifo].coalesce()
return newScienceSegs | This function will calculate the science segments that are covered in
the OutGroupList containing the frame files returned by various
calls to the datafind server. This can then be used to check whether this
list covers what it is expected to cover.
Parameters
----------
datafindcaches : OutGroupList
List of all the datafind output files.
Returns
--------
newScienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances
The times covered by the frames found in datafindOuts. | ### Input:
This function will calculate the science segments that are covered in
the OutGroupList containing the frame files returned by various
calls to the datafind server. This can then be used to check whether this
list covers what it is expected to cover.
Parameters
----------
datafindcaches : OutGroupList
List of all the datafind output files.
Returns
--------
newScienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances
The times covered by the frames found in datafindOuts.
### Response:
def get_science_segs_from_datafind_outs(datafindcaches):
newScienceSegs = {}
for cache in datafindcaches:
if len(cache) > 0:
groupSegs = segments.segmentlist(e.segment for e in cache).coalesce()
ifo = cache.ifo
if ifo not in newScienceSegs:
newScienceSegs[ifo] = groupSegs
else:
newScienceSegs[ifo].extend(groupSegs)
newScienceSegs[ifo].coalesce()
return newScienceSegs |
def save_png_bytes_io(self):
bytearr = QtCore.QByteArray()
buf = QtCore.QBuffer(bytearr)
buf.open(QtCore.QIODevice.WriteOnly)
self.contents.save(buf, "PNG")
bio = io.BytesIO(bytearr.data())
img = Image.open(bio)
img = img.resize((self.screen_size[0], self.screen_size[1]),
Image.ANTIALIAS)
img.info["dpi"] = (96, 96)
converted = io.BytesIO()
img.save(converted, format=)
return converted | Save contents as PNG format file(BytesIO object)
Note: DPI is platform dependent due to QPaintDevice DPI handling.
Mac -> 72dpi, Ubuntu -> 96dpi | ### Input:
Save contents as PNG format file(BytesIO object)
Note: DPI is platform dependent due to QPaintDevice DPI handling.
Mac -> 72dpi, Ubuntu -> 96dpi
### Response:
def save_png_bytes_io(self):
bytearr = QtCore.QByteArray()
buf = QtCore.QBuffer(bytearr)
buf.open(QtCore.QIODevice.WriteOnly)
self.contents.save(buf, "PNG")
bio = io.BytesIO(bytearr.data())
img = Image.open(bio)
img = img.resize((self.screen_size[0], self.screen_size[1]),
Image.ANTIALIAS)
img.info["dpi"] = (96, 96)
converted = io.BytesIO()
img.save(converted, format=)
return converted |
def _setup_template_file(self, template_file_path):
try:
template_file = template_file_path
template_env = get_environment_for(template_file_path)
template = template_env.get_template(os.path.basename(template_file))
except:
raise
else:
self._template_file = template_file
self._template_env = template_env
self.template = template | Setup self.template
Parameters
----------
template_file_path: str
Document template file path. | ### Input:
Setup self.template
Parameters
----------
template_file_path: str
Document template file path.
### Response:
def _setup_template_file(self, template_file_path):
try:
template_file = template_file_path
template_env = get_environment_for(template_file_path)
template = template_env.get_template(os.path.basename(template_file))
except:
raise
else:
self._template_file = template_file
self._template_env = template_env
self.template = template |
def local_variable_action(self, text, loc, var):
exshared.setpos(loc, text)
if DEBUG > 0:
print("LOCAL_VAR:",var, var.name, var.type)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_local_var(var.name, var.type, self.shared.function_vars)
self.shared.function_vars += 1
return index | Code executed after recognising a local variable | ### Input:
Code executed after recognising a local variable
### Response:
def local_variable_action(self, text, loc, var):
exshared.setpos(loc, text)
if DEBUG > 0:
print("LOCAL_VAR:",var, var.name, var.type)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_local_var(var.name, var.type, self.shared.function_vars)
self.shared.function_vars += 1
return index |
def create_admin(self, account_id, user_id, role):
url = ADMINS_API.format(account_id)
body = {"user_id": unquote(str(user_id)),
"role": role,
"send_confirmation": False}
return CanvasAdmin(data=self._post_resource(url, body)) | Flag an existing user as an admin within the account.
https://canvas.instructure.com/doc/api/admins.html#method.admins.create | ### Input:
Flag an existing user as an admin within the account.
https://canvas.instructure.com/doc/api/admins.html#method.admins.create
### Response:
def create_admin(self, account_id, user_id, role):
url = ADMINS_API.format(account_id)
body = {"user_id": unquote(str(user_id)),
"role": role,
"send_confirmation": False}
return CanvasAdmin(data=self._post_resource(url, body)) |
def _nodemap_changed(self, data, stat):
if not stat:
raise EnvironmentNotFoundException(self.nodemap_path)
try:
conf_path = self._deserialize_nodemap(data)[self.hostname]
except KeyError:
conf_path = % self.service
self.config_watcher = DataWatch(
self.zk, conf_path,
self._config_changed
) | Called when the nodemap changes. | ### Input:
Called when the nodemap changes.
### Response:
def _nodemap_changed(self, data, stat):
if not stat:
raise EnvironmentNotFoundException(self.nodemap_path)
try:
conf_path = self._deserialize_nodemap(data)[self.hostname]
except KeyError:
conf_path = % self.service
self.config_watcher = DataWatch(
self.zk, conf_path,
self._config_changed
) |
def prod():
common_conf()
env.user = settings.LOGIN_USER_PROD
env.machine =
env.host_string = settings.HOST_PROD
env.hosts = [env.host_string, ] | Option to do something on the production server. | ### Input:
Option to do something on the production server.
### Response:
def prod():
common_conf()
env.user = settings.LOGIN_USER_PROD
env.machine =
env.host_string = settings.HOST_PROD
env.hosts = [env.host_string, ] |
def schedule_blast_from_template(self, template, list_name, schedule_time, options=None):
options = options or {}
data = options.copy()
data[] = template
data[] = list_name
data[] = schedule_time
return self.api_post(, data) | Schedule a mass mail blast from template
http://docs.sailthru.com/api/blast
@param template: template to copy from
@param list_name: list to send to
@param schedule_time
@param options: additional optional params | ### Input:
Schedule a mass mail blast from template
http://docs.sailthru.com/api/blast
@param template: template to copy from
@param list_name: list to send to
@param schedule_time
@param options: additional optional params
### Response:
def schedule_blast_from_template(self, template, list_name, schedule_time, options=None):
options = options or {}
data = options.copy()
data[] = template
data[] = list_name
data[] = schedule_time
return self.api_post(, data) |
def get_legacy_storage_path(self):
config_dir = os.path.dirname(
self.py3_wrapper.config.get("i3status_config_path", "/tmp")
)
storage_path = os.path.join(config_dir, "py3status.data")
if os.path.exists(storage_path):
return storage_path
else:
return None | Detect and return existing legacy storage path. | ### Input:
Detect and return existing legacy storage path.
### Response:
def get_legacy_storage_path(self):
config_dir = os.path.dirname(
self.py3_wrapper.config.get("i3status_config_path", "/tmp")
)
storage_path = os.path.join(config_dir, "py3status.data")
if os.path.exists(storage_path):
return storage_path
else:
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.