code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def JoinKeyPath(path_segments):
path_segments = [
segment.split(definitions.KEY_PATH_SEPARATOR)
for segment in path_segments]
path_segments = [
element for sublist in path_segments for element in sublist]
path_segments = filter(None, path_segments)
key_path = definitions.KEY_PATH_SEPARATOR.join(path_segments)
if not key_path.startswith():
key_path = .format(definitions.KEY_PATH_SEPARATOR, key_path)
return key_path | Joins the path segments into key path.
Args:
path_segments (list[str]): Windows Registry key path segments.
Returns:
str: key path. |
def get_link_name (self, tag, attrs, attr):
if tag == and attr == :
data = self.parser.peek(MAX_NAMELEN)
data = data.decode(self.parser.encoding, "ignore")
name = linkname.href_name(data)
if not name:
name = attrs.get_true(, u)
elif tag == :
name = attrs.get_true(, u)
if not name:
name = attrs.get_true(, u)
else:
name = u""
return name | Parse attrs for link name. Return name of link. |
def is_arabicstring(text):
if re.search(u"([^\u0600-\u0652%s%s%s\s\d])" \
% (LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_MADDA_ABOVE), text):
return False
return True | Checks for an Arabic standard Unicode block characters
An arabic string can contain spaces, digits and pounctuation.
but only arabic standard characters, not extended arabic
@param text: input text
@type text: unicode
@return: True if all charaters are in Arabic block
@rtype: Boolean |
def update_policy(self,defaultHeaders):
if self.inputs is not None:
for k,v in defaultHeaders.items():
if k not in self.inputs:
self.inputs[k] = v
return self.inputs
else:
return self.inputs | if policy in default but not input still return |
def execute(self, email):
print(.format(email))
self.email_sender.send(email, .format(email)) | Execute use case handling. |
def moist_static_energy(heights, temperature, specific_humidity):
r
return (dry_static_energy(heights, temperature)
+ mpconsts.Lv * specific_humidity.to()).to() | r"""Calculate the moist static energy of parcels.
This function will calculate the moist static energy following
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{moist static energy} = c_{pd} * T + gz + L_v q
* :math:`T` is temperature
* :math:`z` is height
* :math:`q` is specific humidity
Parameters
----------
heights : array-like
Atmospheric height
temperature : array-like
Atmospheric temperature
specific_humidity : array-like
Atmospheric specific humidity
Returns
-------
`pint.Quantity`
The moist static energy |
def find_descriptor(self, uuid):
for desc in self.list_descriptors():
if desc.uuid == uuid:
return desc
return None | Return the first child descriptor found that has the specified
UUID. Will return None if no descriptor that matches is found. |
def get_protein_data_pgrouped(proteindata, p_acc, headerfields):
report = get_protein_data_base(proteindata, p_acc, headerfields)
return get_cov_protnumbers(proteindata, p_acc, report) | Parses protein data for a certain protein into tsv output
dictionary |
def no_wait_release(self, connection: Connection):
_logger.debug()
release_task = asyncio.get_event_loop().create_task(
self.release(connection)
)
self._release_tasks.add(release_task) | Synchronous version of :meth:`release`. |
def build_absolute_uri(self, uri):
request = self.context.get(, None)
return (
request.build_absolute_uri(uri) if request is not None else uri
) | Return a fully qualified absolute url for the given uri. |
def _format_coredump_stdout(cmd_ret):
ret_dict = {}
for line in cmd_ret[].splitlines():
line = line.strip().lower()
if line.startswith():
enabled = line.split()
if in enabled[1]:
ret_dict[] = True
else:
ret_dict[] = False
break
if line.startswith():
host_vnic = line.split()
ret_dict[] = host_vnic[1].strip()
if line.startswith():
ip = line.split()
ret_dict[] = ip[1].strip()
if line.startswith():
ip_port = line.split()
ret_dict[] = ip_port[1].strip()
return ret_dict | Helper function to format the stdout from the get_coredump_network_config function.
cmd_ret
The return dictionary that comes from a cmd.run_all call. |
def gen_blocks(output, ascii_props=False, append=False, prefix=""):
with codecs.open(output, if append else , ) as f:
if not append:
f.write(HEADER)
f.write( % prefix)
no_block = []
last = -1
max_range = MAXASCII if ascii_props else MAXUNICODE
formatter = bytesformat if ascii_props else uniformat
with codecs.open(os.path.join(HOME, , UNIVERSION, ), , ) as uf:
for line in uf:
if not line.startswith():
data = line.split()
if len(data) < 2:
continue
block = [int(i, 16) for i in data[0].strip().split()]
if block[0] > last + 1:
if (last + 1) <= max_range:
endval = block[0] - 1 if (block[0] - 1) < max_range else max_range
no_block.append((last + 1, endval))
last = block[1]
name = format_name(data[1])
inverse_range = []
if block[0] > max_range:
if ascii_props:
f.write( % name)
f.write( % (name, formatter(0), formatter(max_range)))
continue
if block[0] > 0:
inverse_range.append("%s-%s" % (formatter(0), formatter(block[0] - 1)))
if block[1] < max_range:
inverse_range.append("%s-%s" % (formatter(block[1] + 1), formatter(max_range)))
f.write( % (name, formatter(block[0]), formatter(block[1])))
f.write( % (name, .join(inverse_range)))
if last < max_range:
if (last + 1) <= max_range:
no_block.append((last + 1, max_range))
last = -1
no_block_inverse = []
if not no_block:
no_block_inverse.append((0, max_range))
else:
for piece in no_block:
if piece[0] > last + 1:
no_block_inverse.append((last + 1, piece[0] - 1))
last = piece[1]
for block, name in ((no_block, ), (no_block_inverse, )):
f.write( % name)
for piece in block:
if piece[0] == piece[1]:
f.write(formatter(piece[0]))
else:
f.write("%s-%s" % (formatter(piece[0]), formatter(piece[1])))
f.write()
f.write() | Generate Unicode blocks. |
def add_bits4subtree_ids(self, relevant_ids):
if relevant_ids:
checking = True
else:
checking = False
relevant_ids = {}
bit = 1
self.bits2internal_node = {}
for node in self.postorder_node_iter():
p = node._parent
if p is None:
if not node.is_leaf:
self.bits2internal_node[node.bits4subtree_ids] = node
continue
if not hasattr(p, ):
p.bits4subtree_ids = 0
i = node._id
if checking:
b = relevant_ids.get(i)
if b:
if node.is_leaf:
node.bits4subtree_ids = b
else:
node.bits4subtree_ids |= b
else:
if node.is_leaf:
relevant_ids[i] = bit
node.bits4subtree_ids = bit
bit <<= 1
if not node.is_leaf:
self.bits2internal_node[node.bits4subtree_ids] = node
p.bits4subtree_ids |= node.bits4subtree_ids
return relevant_ids | Adds a long integer bits4subtree_ids to each node (Fails cryptically if that field is already present!)
relevant_ids can be a dict of _id to bit representation.
If it is not supplied, a dict will be created by registering the leaf._id into a dict (and returning the dict)
the bits4subtree_ids will have a 1 bit if the _id is at or descended from this node and 0 if it is not
in this subtree.
Returns the dict of ids -> longs
Also creates a dict of long -> node mappings for all internal nodes. Stores this in self as bits2internal_node |
def delete(self, block_type, block_num):
logger.info("deleting block")
blocktype = snap7.snap7types.block_types[block_type]
result = self.library.Cli_Delete(self.pointer, blocktype, block_num)
return result | Deletes a block
:param block_type: Type of block
:param block_num: Bloc number |
def maps_get_rules_output_rules_policyname(self, **kwargs):
config = ET.Element("config")
maps_get_rules = ET.Element("maps_get_rules")
config = maps_get_rules
output = ET.SubElement(maps_get_rules, "output")
rules = ET.SubElement(output, "rules")
policyname = ET.SubElement(rules, "policyname")
policyname.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
def _assign_udf_desc_extents(descs, start_extent):
current_extent = start_extent
descs.pvd.set_extent_location(current_extent)
current_extent += 1
descs.impl_use.set_extent_location(current_extent)
current_extent += 1
descs.partition.set_extent_location(current_extent)
current_extent += 1
descs.logical_volume.set_extent_location(current_extent)
current_extent += 1
descs.unallocated_space.set_extent_location(current_extent)
current_extent += 1
descs.terminator.set_extent_location(current_extent)
current_extent += 1 | An internal function to assign a consecutive sequence of extents for the
given set of UDF Descriptors, starting at the given extent.
Parameters:
descs - The PyCdlib._UDFDescriptors object to assign extents for.
start_extent - The starting extent to assign from.
Returns:
Nothing. |
def get_attributes(**kwargs):
attrs = db.DBSession.query(Attr).order_by(Attr.name).all()
return attrs | Get all attributes |
def file_matches(filename, patterns):
return any(fnmatch.fnmatch(filename, pat)
or fnmatch.fnmatch(os.path.basename(filename), pat)
for pat in patterns) | Does this filename match any of the patterns? |
def substatements(self) -> List[Statement]:
res = []
self.opt_separator()
while self.peek() != "}":
res.append(self.statement())
self.opt_separator()
self.offset += 1
return res | Parse substatements.
Raises:
EndOfInput: If past the end of input. |
def index(self, sub, *args):
pos = self.find(sub, *args)
if pos == -1:
raise ValueError()
return pos | Like newstr.find() but raise ValueError when the substring is not
found. |
def remove_board(board_id):
log.debug(, board_id)
lines = boards_txt().lines()
lines = filter(lambda x: not x.strip().startswith(board_id + ), lines)
boards_txt().write_lines(lines) | remove board.
:param board_id: board id (e.g. 'diecimila')
:rtype: None |
def sum(self, phi1, inplace=True):
phi = self if inplace else self.copy()
if isinstance(phi1, (int, float)):
phi.values += phi1
else:
phi1 = phi1.copy()
extra_vars = set(phi1.variables) - set(phi.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi.values = phi.values[tuple(slice_)]
phi.variables.extend(extra_vars)
new_var_card = phi1.get_cardinality(extra_vars)
phi.cardinality = np.append(phi.cardinality, [new_var_card[var] for var in extra_vars])
extra_vars = set(phi.variables) - set(phi1.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi1.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi1.values = phi1.values[tuple(slice_)]
phi1.variables.extend(extra_vars)
for axis in range(phi.values.ndim):
exchange_index = phi1.variables.index(phi.variables[axis])
phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], \
phi1.variables[axis]
phi1.values = phi1.values.swapaxes(axis, exchange_index)
phi.values = phi.values + phi1.values
if not inplace:
return phi | DiscreteFactor sum with `phi1`.
Parameters
----------
phi1: `DiscreteFactor` instance.
DiscreteFactor to be added.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Example
-------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> phi1.sum(phi2, inplace=True)
>>> phi1.variables
['x1', 'x2', 'x3', 'x4']
>>> phi1.cardinality
array([2, 3, 2, 2])
>>> phi1.values
array([[[[ 0, 0],
[ 4, 6]],
[[ 0, 4],
[12, 18]],
[[ 0, 8],
[20, 30]]],
[[[ 6, 18],
[35, 49]],
[[ 8, 24],
[45, 63]],
[[10, 30],
[55, 77]]]]) |
def ok(self):
rgb, hsv, hexa = self.square.get()
if self.alpha_channel:
hexa = self.hexa.get()
rgb += (self.alpha.get(),)
self.color = rgb, hsv, hexa
self.destroy() | Validate color selection and destroy dialog. |
def click_at_coordinates(self, x, y):
self.device.click(int(x), int(y)) | Click at (x,y) coordinates. |
def dataset_path_iterator(file_path: str) -> Iterator[str]:
logger.info("Reading CONLL sentences from dataset files at: %s", file_path)
for root, _, files in list(os.walk(file_path)):
for data_file in files:
if not data_file.endswith("gold_conll"):
continue
yield os.path.join(root, data_file) | An iterator returning file_paths in a directory
containing CONLL-formatted files. |
def get_or_create_author(self, name: str) -> Author:
author = self.object_cache_author.get(name)
if author is not None:
self.session.add(author)
return author
author = self.get_author_by_name(name)
if author is not None:
self.object_cache_author[name] = author
return author
author = self.object_cache_author[name] = Author.from_name(name=name)
self.session.add(author)
return author | Get an author by name, or creates one if it does not exist. |
def other_Orange_tables(self):
target_table = self.db.target_table
if not self.db.orng_tables:
return [self.convert_table(table, None) for table in self.db.tables if table != target_table]
else:
return [table for name, table in list(self.db.orng_tables.items()) if name != target_table] | Returns the related tables as Orange example tables.
:rtype: list |
def get_default_config(self):
config = super(DseOpsCenterCollector, self).get_default_config()
metrics = [
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
]
config.update({
: ,
: 8888,
: ,
: ,
: .join(metrics),
: ,
})
return config | Returns the default collector settings |
def Slot(self, slotnum):
self.assertNested()
self.current_vtable[slotnum] = self.Offset() | Slot sets the vtable key `voffset` to the current location in the
buffer. |
def default_error_handler(socket, error_name, error_message, endpoint,
msg_id, quiet):
pkt = dict(type=, name=,
args=[error_name, error_message],
endpoint=endpoint)
if msg_id:
pkt[] = msg_id
if not quiet:
socket.send_packet(pkt)
log.error(u"default_error_handler: {}, {} (endpoint={}, msg_id={})".format(
error_name, error_message, endpoint, msg_id
)) | This is the default error handler, you can override this when
calling :func:`socketio.socketio_manage`.
It basically sends an event through the socket with the 'error' name.
See documentation for :meth:`Socket.error`.
:param quiet: if quiet, this handler will not send a packet to the
user, but only log for the server developer. |
def write_nexus_files(self, force=False, quiet=False):
existing = glob.glob(os.path.join(self.workdir, self.name, "*.nex"))
if any(existing):
if force:
for rfile in existing:
os.remove(rfile)
else:
path = os.path.join(self.workdir, self.name)
raise IPyradWarningExit(EXISTING_NEX_FILES.format(path))
with open(self.files.data) as infile:
loci = iter(infile.read().strip().split("|\n"))
if not self.samples:
with open(self.files.data) as infile:
samples = set((i.split()[0] for i in infile.readlines() \
if "//" not in i))
else:
samples = set(self.samples)
totn = len(samples)
nloci = 0
if self._alleles:
msamples = {i+rbin() for i in samples}
else:
msamples = samples
for loc in loci:
dat = loc.split("\n")[:-1]
try:
names = [i.split()[0] for i in dat]
snames = set(names)
seqs = np.array([list(i.split()[1]) for i in dat])
except IndexError:
print(ALLELESBUGFIXED)
continue
if len(snames.intersection(msamples)) == totn:
if self._alleles:
_samples = [i+rbin() for i in samples]
else:
_samples = samples
seqsamp = seqs[[names.index(tax) for tax in _samples]]
if not self._alleles:
seqsamp = _resolveambig(seqsamp)
if _count_PIS(seqsamp, self.params.minsnps):
nloci += 1
copied = seqsamp.copy()
copied[copied == "-"] == "N"
rmcol = np.all(copied == "N", axis=0)
seqsamp = seqsamp[:, ~rmcol]
if self._alleles:
samps = [i.rsplit("_", 1)[0] for i in _samples]
mdict = dict(zip(samps, [i.tostring() for i in seqsamp]))
else:
mdict = dict(zip(_samples, [i.tostring() for i in seqsamp]))
self._write_nex(mdict, nloci)
if nloci == self.params.maxloci:
break
if not quiet:
path = os.path.join(self.workdir, self.name)
path = path.replace(os.path.expanduser("~"), "~")
print("wrote {} nexus files to {}".format(nloci, path)) | Write nexus files to {workdir}/{name}/[0-N].nex, If the directory already
exists an exception will be raised unless you use the force flag which
will remove all files in the directory.
Parameters:
-----------
force (bool):
If True then all files in {workdir}/{name}/*.nex* will be removed. |
def _parse(self, filename):
self.names = {}
with codecs.open(filename, encoding="iso8859-1") as f:
for line in f:
if any(map(lambda c: 128 < ord(c) < 160, line)):
line = line.encode("iso8859-1").decode("windows-1252")
self._eat_name_line(line.strip()) | Opens data file and for each line, calls _eat_name_line |
def _extract_stack(limit=10):
frame = sys._getframe().f_back
try:
stack = traceback.StackSummary.extract(
traceback.walk_stack(frame), lookup_lines=False)
finally:
del frame
apg_path = asyncpg.__path__[0]
i = 0
while i < len(stack) and stack[i][0].startswith(apg_path):
i += 1
stack = stack[i:i + limit]
stack.reverse()
return .join(traceback.format_list(stack)) | Replacement for traceback.extract_stack() that only does the
necessary work for asyncio debug mode. |
def _all_recall_native_type(self, data, ptitem, prefix):
typestr = self._all_get_from_attrs(ptitem, prefix + HDF5StorageService.SCALAR_TYPE)
colltype = self._all_get_from_attrs(ptitem, prefix + HDF5StorageService.COLL_TYPE)
type_changed = False
if colltype == HDF5StorageService.COLL_SCALAR:
if isinstance(data, np.ndarray):
data = np.array([data])[0]
type_changed = True
if not typestr is None:
if typestr != type(data).__name__:
if typestr == str.__name__:
data = data.decode(self._encoding)
else:
try:
data = pypetconstants.PARAMETERTYPEDICT[typestr](data)
except KeyError:
data = pypetconstants.COMPATPARAMETERTYPEDICT[typestr](data)
type_changed = True
elif (colltype == HDF5StorageService.COLL_TUPLE or
colltype == HDF5StorageService.COLL_LIST):
if type(data) is not list and type is not tuple:
type_changed = True
data = list(data)
if len(data) > 0:
first_item = data[0]
if not typestr == type(first_item).__name__:
if not isinstance(data, list):
data = list(data)
for idx, item in enumerate(data):
if typestr == str.__name__:
data[idx] = data[idx].decode(self._encoding)
else:
try:
data[idx] = pypetconstants.PARAMETERTYPEDICT[typestr](item)
except KeyError:
data[idx] = pypetconstants.COMPATPARAMETERTYPEDICT[typestr](item)
type_changed = True
if colltype == HDF5StorageService.COLL_TUPLE:
if type(data) is not tuple:
data = tuple(data)
type_changed = True
elif colltype == HDF5StorageService.COLL_EMPTY_DICT:
data = {}
type_changed = True
elif isinstance(data, np.ndarray):
if typestr == str.__name__:
data = np.core.defchararray.decode(data, self._encoding)
type_changed = True
if colltype == HDF5StorageService.COLL_MATRIX:
data = np.matrix(data)
type_changed = True
return data, type_changed | Checks if loaded data has the type it was stored in. If not converts it.
:param data: Data item to be checked and converted
:param ptitem: HDf5 Node or Leaf from where data was loaded
:param prefix: Prefix for recalling the data type from the hdf5 node attributes
:return:
Tuple, first item is the (converted) `data` item, second boolean whether
item was converted or not. |
def show_input(self, template_helper, language, seed):
header = ParsableText(self.gettext(language, self._header), "rst",
translation=self._translations.get(language, gettext.NullTranslations()))
return str(DisplayableMatchProblem.get_renderer(template_helper).tasks.match(self.get_id(), header)) | Show MatchProblem |
def ToScriptHash(self, address):
if len(address) == 34:
if address[0] == :
data = b58decode(address)
if data[0] != self.AddressVersion:
raise ValueError()
checksum = Crypto.Default().Hash256(data[:21])[:4]
if checksum != data[21:]:
raise Exception()
return UInt160(data=data[1:21])
else:
raise Exception()
else:
raise ValueError() | Retrieve the script_hash based from an address.
Args:
address (str): a base58 encoded address.
Raises:
ValuesError: if an invalid address is supplied or the coin version is incorrect
Exception: if the address string does not start with 'A' or the checksum fails
Returns:
UInt160: script hash. |
def print_partlist(input, timeout=20, showgui=False):
print raw_partlist(input=input, timeout=timeout, showgui=showgui) | print partlist text delivered by eagle
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None |
def __header(self, line):
self.cpu_number = len(line.split())
return self.cpu_number | Build the header (contain the number of CPU).
CPU0 CPU1 CPU2 CPU3
0: 21 0 0 0 IO-APIC 2-edge timer |
def cache(self, key, value):
fin = value.__finalizer__()
try:
if self.data[key]() is not None:
raise CacheInconsistency(
"Duplicate cache key: %r %r %r" % (
key, value, self.data[key]))
except KeyError:
pass
callback = createCacheRemoveCallback(self._ref(self), key, fin)
self.data[key] = self._ref(value, callback)
return value | Add an entry to the cache.
A weakref to the value is stored, rather than a direct reference. The
value must have a C{__finalizer__} method that returns a callable which
will be invoked when the weakref is broken.
@param key: The key identifying the cache entry.
@param value: The value for the cache entry. |
def callback_liveIn_button_press(red_clicks, blue_clicks, green_clicks,
rc_timestamp, bc_timestamp, gc_timestamp, **kwargs):
if not rc_timestamp:
rc_timestamp = 0
if not bc_timestamp:
bc_timestamp = 0
if not gc_timestamp:
gc_timestamp = 0
if (rc_timestamp + bc_timestamp + gc_timestamp) < 1:
change_col = None
timestamp = 0
else:
if rc_timestamp > bc_timestamp:
change_col = "red"
timestamp = rc_timestamp
else:
change_col = "blue"
timestamp = bc_timestamp
if gc_timestamp > timestamp:
timestamp = gc_timestamp
change_col = "green"
value = {:red_clicks,
:blue_clicks,
:green_clicks,
:change_col,
:timestamp,
:str(kwargs.get(, ))}
send_to_pipe_channel(channel_name="live_button_counter",
label="named_counts",
value=value)
return "Number of local clicks so far is %s red and %s blue; last change is %s at %s" % (red_clicks,
blue_clicks,
change_col,
datetime.fromtimestamp(0.001*timestamp)) | Input app button pressed, so do something interesting |
def get_list(shapes, types):
if types is not None:
type_dict = dict(types)
return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes]
else:
return [DataDesc(x[0], x[1]) for x in shapes] | Get DataDesc list from attribute lists.
Parameters
----------
shapes : a tuple of (name_, shape_)
types : a tuple of (name_, np.dtype) |
def release(self, key, owner):
status = self.collection.find_and_modify(
{: key, : owner},
{: False, : None, : None, : None}
) | Release lock with given name.
`key` - lock name
`owner` - name of application/component/whatever which held a lock
Raises `MongoLockException` if no such a lock. |
def is_permitted(self, identifiers, permission_s):
identifier = identifiers.primary_identifier
for required in permission_s:
domain = Permission.get_domain(required)
assigned = self.get_authzd_permissions(identifier, domain)
is_permitted = False
for perms_blob in assigned:
is_permitted = self.permission_verifier.\
is_permitted_from_json(required, perms_blob)
yield (required, is_permitted) | If the authorization info cannot be obtained from the accountstore,
permission check tuple yields False.
:type identifiers: subject_abcs.IdentifierCollection
:param permission_s: a collection of one or more permissions, represented
as string-based permissions or Permission objects
and NEVER comingled types
:type permission_s: list of string(s)
:yields: tuple(Permission, Boolean) |
def dry(self, *args, **kwargs):
return % (
self.name, Args(self.spec).explain(*args, **kwargs)) | Perform a dry-run of the task |
def reset(self):
self.clear()
self._initialise()
self.configspec = None
self._original_configspec = None | Clear ConfigObj instance and restore to 'freshly created' state. |
def GetRootFileEntry(self):
if platform.system() == :
location = os.getcwd()
location, _, _ = location.partition()
location = .format(location)
else:
location =
if not os.path.exists(location):
return None
path_spec = os_path_spec.OSPathSpec(location=location)
return self.GetFileEntryByPathSpec(path_spec) | Retrieves the root file entry.
Returns:
OSFileEntry: a file entry or None if not available. |
def get_delete_security_group_rule_commands(self, sg_id, sg_rule):
return self._get_rule_cmds(sg_id, sg_rule, delete=True) | Commands for removing rule from ACLS |
def artists(self, spotify_ids):
route = Route(, )
payload = {: spotify_ids}
return self.request(route, params=payload) | Get a spotify artists by their IDs.
Parameters
----------
spotify_id : List[str]
The spotify_ids to search with. |
def get_terminal_converted(self, attr):
value = self.data.get(attr.repr_name)
return self.converter_registry.convert_to_representation(
value,
attr.value_type) | Returns the value of the specified attribute converted to a
representation value.
:param attr: Attribute to retrieve.
:type attr: :class:`everest.representers.attributes.MappedAttribute`
:returns: Representation string. |
def iter_steps(self):
for func, decorator in self._iter_step_func_decorators():
step = self._step_decorator_args(decorator)
if step:
yield step, func.name, self._span_for_node(func, True) | Iterate over steps in the parsed file. |
def get_topic_keyword_dictionary():
topic_keyword_dictionary = dict()
file_row_gen = get_file_row_generator(get_package_path() + "/twitter/res/topics/topic_keyword_mapping" + ".txt",
",",
"utf-8")
for file_row in file_row_gen:
topic_keyword_dictionary[file_row[0]] = set([keyword for keyword in file_row[1:]])
return topic_keyword_dictionary | Opens the topic-keyword map resource file and returns the corresponding python dictionary.
- Input: - file_path: The path pointing to the topic-keyword map resource file.
- Output: - topic_set: A topic to keyword python dictionary. |
def vq_nearest_neighbor(x, hparams):
bottleneck_size = 2**hparams.bottleneck_bits
means = hparams.means
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
if hparams.bottleneck_kind == "em":
x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
x_means_hot = tf.one_hot(
x_means_idx, depth=bottleneck_size)
x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
else:
x_means_idx = tf.argmax(-dist, axis=-1)
x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
x_means = tf.matmul(x_means_hot, means)
e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, e_loss | Find the nearest element in means to elements in x. |
def buy_market(self, quantity, **kwargs):
kwargs[] = 0
kwargs[] = "MARKET"
self.parent.order("BUY", self, quantity=quantity, **kwargs) | Shortcut for ``instrument.order("BUY", ...)`` and accepts all of its
`optional parameters <#qtpylib.instrument.Instrument.order>`_
:Parameters:
quantity : int
Order quantity |
def _get_assistants_snippets(path, name):
result = []
subdirs = {: 2, : 1}
for loc in subdirs:
for root, dirs, files in os.walk(os.path.join(path, loc)):
for filename in [utils.strip_prefix(os.path.join(root, f), path) for f in files]:
stripped = os.path.sep.join(filename.split(os.path.sep)[subdirs[loc]:])
if stripped.startswith(os.path.join(name, )) or stripped == name + :
result.append(os.path.join(, filename))
return result | Get Assistants and Snippets for a given DAP name on a given path |
def write(self, text):
sys.stdout.write("\r")
self._clear_line()
_text = to_unicode(text)
if PY2:
_text = _text.encode(ENCODING)
assert isinstance(_text, builtin_str)
sys.stdout.write("{0}\n".format(_text)) | Write text in the terminal without breaking the spinner. |
def format_sql(self):
sql =
select_segment = self.build_select_fields()
select_segment = select_segment.replace(, , 1)
fields = [field.strip() for field in select_segment.split()]
sql += .format(.join(fields))
from_segment = self.build_from_table()
from_segment = from_segment.replace(, , 1)
tables = [table.strip() for table in from_segment.split()]
sql += .format(.join(tables))
order_by_segment = self.build_order_by()
if len(order_by_segment):
order_by_segment = order_by_segment.replace(, , 1)
sorters = [sorter.strip() for sorter in order_by_segment.split()]
sql += .format(.join(sorters))
limit_segment = self.build_limit()
if len(limit_segment):
if in limit_segment:
limit_segment = limit_segment.replace(, , 1)
if in limit_segment:
limit_segment = limit_segment.replace(, , 1)
elif in limit_segment:
limit_segment = limit_segment.replace(, , 1)
sql += limit_segment
return sql | Builds the sql in a format that is easy for humans to read and debug
:return: The formatted sql for this query
:rtype: str |
def buy_market_order(self, amount):
amount = str(amount)
self._log("buy {} {} at market price".format(amount, self.major))
return self._rest_client.post(
endpoint=,
payload={: self.name, : amount}
) | Place a buy order at market price.
:param amount: Amount of major currency to buy at market price.
:type amount: int | float | str | unicode | decimal.Decimal
:return: Order details.
:rtype: dict |
def _initActions(self):
def createAction(text, shortcut, slot, iconFileName=None):
action = QAction(text, self)
if iconFileName is not None:
action.setIcon(getIcon(iconFileName))
keySeq = shortcut if isinstance(shortcut, QKeySequence) else QKeySequence(shortcut)
action.setShortcut(keySeq)
action.setShortcutContext(Qt.WidgetShortcut)
action.triggered.connect(slot)
self.addAction(action)
return action
self.scrollUpAction = createAction(, ,
lambda: self._onShortcutScroll(down = False),
)
self.scrollDownAction = createAction(, ,
lambda: self._onShortcutScroll(down = True),
)
self.selectAndScrollUpAction = createAction(, ,
lambda: self._onShortcutSelectAndScroll(down = False))
self.selectAndScrollDownAction = createAction(, ,
lambda: self._onShortcutSelectAndScroll(down = True))
self.increaseIndentAction = createAction(, ,
self._onShortcutIndent,
)
self.decreaseIndentAction = createAction(, ,
lambda: self._indenter.onChangeSelectedBlocksIndent(increase = False),
)
self.autoIndentLineAction = createAction(, ,
self._indenter.onAutoIndentTriggered)
self.indentWithSpaceAction = createAction(, ,
lambda: self._indenter.onChangeSelectedBlocksIndent(increase=True,
withSpace=True))
self.unIndentWithSpaceAction = createAction(, ,
lambda: self._indenter.onChangeSelectedBlocksIndent(increase=False,
withSpace=True))
self.undoAction = createAction(, QKeySequence.Undo,
self.undo, )
self.redoAction = createAction(, QKeySequence.Redo,
self.redo, )
self.moveLineUpAction = createAction(, ,
lambda: self._onShortcutMoveLine(down = False), )
self.moveLineDownAction = createAction(, ,
lambda: self._onShortcutMoveLine(down = True), )
self.deleteLineAction = createAction(, , self._onShortcutDeleteLine, )
self.cutLineAction = createAction(, , self._onShortcutCutLine, )
self.copyLineAction = createAction(, , self._onShortcutCopyLine, )
self.pasteLineAction = createAction(, , self._onShortcutPasteLine, )
self.duplicateLineAction = createAction(, , self._onShortcutDuplicateLine)
self.invokeCompletionAction = createAction(, , self._completer.invokeCompletion)
self.printAction = createAction(, , self._onShortcutPrint, ) | Init shortcuts for text editing |
def is_all_field_none(self):
if self._billing_date is not None:
return False
if self._type_description is not None:
return False
if self._type_description_translated is not None:
return False
if self._unit_vat_exclusive is not None:
return False
if self._unit_vat_inclusive is not None:
return False
if self._vat is not None:
return False
if self._quantity is not None:
return False
if self._total_vat_exclusive is not None:
return False
if self._total_vat_inclusive is not None:
return False
return True | :rtype: bool |
def to_dict(self):
return {
"type": self.type,
"name": self.name,
"group_by_key": self.group_by_key,
"role": self.role,
"units": self.units,
"options": self.build_options()
} | Converts the column to a dictionary representation accepted
by the Citrination server.
:return: Dictionary with basic options, plus any column type specific
options held under the "options" key
:rtype: dict |
def _cooked_fields(self, dj_fields):
from django.db import models
valids = []
for field in dj_fields:
try:
dj_field, _, _, _ = self.model._meta.get_field_by_name(field)
if isinstance(dj_field, models.ForeignKey):
valids.append((field + "_id", field, dj_field))
else:
valids.append((field, field, dj_field))
except models.FieldDoesNotExist:
valids.append((field, field, None))
return valids | Returns a tuple of cooked fields
:param dj_fields: a list of django name fields
:return: |
def unregister(self):
uuid = self.metadata["tracker"]["uuid"]
result = requests.delete(urljoin(self.tracker, + "/" + uuid))
logger.debug("unregistered at server %s with %s: %s", self.tracker, uuid, result) | unregister model at tracking server |
def _ixs(self, i, axis=0):
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis)
else:
return self._get_val_at(i) | Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence) |
def create_from_assocs(self, assocs, **args):
amap = defaultdict(list)
subject_label_map = {}
for a in assocs:
subj = a[]
subj_id = subj[]
subj_label = subj[]
subject_label_map[subj_id] = subj_label
if not a[]:
amap[subj_id].append(a[][])
aset = AssociationSet(subject_label_map=subject_label_map, association_map=amap, **args)
aset.associations_by_subj = defaultdict(list)
aset.associations_by_subj_obj = defaultdict(list)
for a in assocs:
sub_id = a[][]
obj_id = a[][]
aset.associations_by_subj[sub_id].append(a)
aset.associations_by_subj_obj[(sub_id,obj_id)].append(a)
return aset | Creates from a list of association objects |
def configure_file_logger(name, log_dir, log_level=logging.DEBUG):
from .srothandler import SizeRotatingFileHandler
root = logging.getLogger()
root.setLevel(log_level)
handler = SizeRotatingFileHandler(os.path.join(log_dir, % name))
handler.setLevel(log_level)
handler.setFormatter(logging.Formatter(LOG_FORMAT_STANDARD))
root.addHandler(handler) | Configures logging to use the :class:`SizeRotatingFileHandler` |
def insert_one(self, doc, *args, **kwargs):
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u)
_id = doc[u] = doc.get() or generate_id()
bypass_document_validation = kwargs.get()
if bypass_document_validation is True:
eid = self.table.insert(doc)
else:
existing = self.find_one({: _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id) | Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult |
def get_composition_admin_session(self):
if not self.supports_composition_admin():
raise errors.Unimplemented()
return sessions.CompositionAdminSession(runtime=self._runtime) | Gets a composition administration session for creating, updating and deleting compositions.
return: (osid.repository.CompositionAdminSession) - a
``CompositionAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_composition_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_composition_admin()`` is ``true``.* |
def get_encodings_from_content(content):
if isinstance(content, bytes):
find_charset = re.compile(
br]*([a-z0-9\-_]+?) *?["\, flags=re.I
).findall
find_xml = re.compile(
br]*([a-z0-9\-_]+?) *?["\
).findall
return [encoding.decode() for encoding in
find_charset(content) + find_xml(content)]
else:
find_charset = re.compile(
r]*([a-z0-9\-_]+?) *?["\, flags=re.I
).findall
find_xml = re.compile(
r]*([a-z0-9\-_]+?) *?["\
).findall
return find_charset(content) + find_xml(content) | Code from:
https://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py
Return encodings from given content string.
:param content: string to extract encodings from. |
def recCopyElement(oldelement):
newelement = ETREE.Element(oldelement.tag, oldelement.attrib)
if len(oldelement.getchildren()) > 0:
for childelement in oldelement.getchildren():
newelement.append(recCopyElement(childelement))
return newelement | Generates a copy of an xml element and recursively of all
child elements.
:param oldelement: an instance of lxml.etree._Element
:returns: a copy of the "oldelement"
.. warning::
doesn't copy ``.text`` or ``.tail`` of xml elements |
def set_mode(self, mode):
_LOGGER.debug()
if not mode:
_LOGGER.info()
elif mode not in CONST.ALL_MODES:
_LOGGER.warning()
response_object = self._lupusec.set_mode(CONST.MODE_TRANSLATION[mode])
if response_object[] != 1:
_LOGGER.warning()
self._json_state[] = mode
_LOGGER.info(, mode)
return True | Set Lupusec alarm mode. |
def unknown_command(self, args):
mode_mapping = self.master.mode_mapping()
mode = args[0].upper()
if mode in mode_mapping:
self.master.set_mode(mode_mapping[mode])
return True
return False | handle mode switch by mode name as command |
def parameterstep(timestep=None):
if timestep is not None:
parametertools.Parameter.parameterstep(timestep)
namespace = inspect.currentframe().f_back.f_locals
model = namespace.get()
if model is None:
model = namespace[]()
namespace[] = model
if hydpy.pub.options.usecython and in namespace:
cythonizer = namespace[]
namespace[] = cythonizer.cymodule
model.cymodel = cythonizer.cymodule.Model()
namespace[] = model.cymodel
model.cymodel.parameters = cythonizer.cymodule.Parameters()
model.cymodel.sequences = cythonizer.cymodule.Sequences()
for numpars_name in (, ):
if hasattr(cythonizer.cymodule, numpars_name):
numpars_new = getattr(cythonizer.cymodule, numpars_name)()
numpars_old = getattr(model, numpars_name.lower())
for (name_numpar, numpar) in vars(numpars_old).items():
setattr(numpars_new, name_numpar, numpar)
setattr(model.cymodel, numpars_name.lower(), numpars_new)
for name in dir(model.cymodel):
if (not name.startswith()) and hasattr(model, name):
setattr(model, name, getattr(model.cymodel, name))
if not in namespace:
namespace[] = parametertools.Parameters
model.parameters = namespace[](namespace)
if not in namespace:
namespace[] = sequencetools.Sequences
model.sequences = namespace[](**namespace)
namespace[] = model.parameters
for pars in model.parameters:
namespace[pars.name] = pars
namespace[] = model.sequences
for seqs in model.sequences:
namespace[seqs.name] = seqs
if in namespace:
model.masks = namespace[](model)
namespace[] = model.masks
try:
namespace.update(namespace[])
except KeyError:
pass
focus = namespace.get()
for par in model.parameters.control:
try:
if (focus is None) or (par is focus):
namespace[par.name] = par
else:
namespace[par.name] = lambda *args, **kwargs: None
except AttributeError:
pass | Define a parameter time step size within a parameter control file.
Argument:
* timestep(|Period|): Time step size.
Function parameterstep should usually be be applied in a line
immediately behind the model import. Defining the step size of time
dependent parameters is a prerequisite to access any model specific
parameter.
Note that parameterstep implements some namespace magic by
means of the module |inspect|. This makes things a little
complicated for framework developers, but it eases the definition of
parameter control files for framework users. |
def last_page(self_or_cls, max_=None):
result = self_or_cls()
result.before = Before()
result.max_ = max_
return result | Return a query set which requests the last page.
:param max_: Maximum number of items to return.
:type max_: :class:`int` or :data:`None`
:rtype: :class:`ResultSetMetadata`
:return: A new request set up to request the last page. |
def read_raw_table(self, table):
fields = self.table_relations(table) if self.cast else None
field_names = [f.name for f in self.table_relations(table)]
field_len = len(field_names)
table_path = os.path.join(self.root, table)
with _open_table(table_path, self.encoding) as tbl:
for line in tbl:
cols = decode_row(line, fields=fields)
if len(cols) != field_len:
logging.error(
.format(len(cols), field_len))
row = OrderedDict(zip(field_names, cols))
yield row | Yield rows in the [incr tsdb()] *table*. A row is a dictionary
mapping column names to values. Data from a profile is decoded
by decode_row(). No filters or applicators are used. |
def _click(x, y, button):
if button == :
try:
_sendMouseEvent(MOUSEEVENTF_LEFTCLICK, x, y)
except (PermissionError, OSError):
pass
elif button == :
try:
_sendMouseEvent(MOUSEEVENTF_MIDDLECLICK, x, y)
except (PermissionError, OSError):
pass
elif button == :
try:
_sendMouseEvent(MOUSEEVENTF_RIGHTCLICK, x, y)
except (PermissionError, OSError):
pass
else:
assert False, "button argument not in (, , )" | Send the mouse click event to Windows by calling the mouse_event() win32
function.
Args:
button (str): The mouse button, either 'left', 'middle', or 'right'
x (int): The x position of the mouse event.
y (int): The y position of the mouse event.
Returns:
None |
def cmd_velocity(self, args):
if (len(args) != 3):
print("Usage: velocity x y z (m/s)")
return
if (len(args) == 3):
x_mps = float(args[0])
y_mps = float(args[1])
z_mps = float(args[2])
self.master.mav.set_position_target_local_ned_send(
0,
0, 0,
mavutil.mavlink.MAV_FRAME_LOCAL_NED,
0b0000111111000111,
0, 0, 0,
x_mps, y_mps, -z_mps,
0, 0, 0,
0, 0) | velocity x-ms y-ms z-ms |
def split_bits(value, *bits):
result = []
for b in reversed(bits):
mask = (1 << b) - 1
result.append(value & mask)
value = value >> b
assert value == 0
result.reverse()
return result | Split integer value into list of ints, according to `bits` list.
For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4] |
def update_project(self, org_name, part_name, dci_id=UNKNOWN_DCI_ID,
service_node_ip=UNKNOWN_SRVN_NODE_IP,
vrf_prof=None, desc=None):
desc = desc or org_name
res = self._create_or_update_partition(org_name, part_name, desc,
dci_id=dci_id,
service_node_ip=service_node_ip,
vrf_prof=vrf_prof,
operation=)
if res and res.status_code in self._resp_ok:
LOG.debug("Update %s partition in DCNM.", part_name)
else:
LOG.error("Failed to update %(part)s partition in DCNM."
"Response: %(res)s", {: part_name, : res})
raise dexc.DfaClientRequestFailed(reason=res) | Update project on the DCNM.
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project. |
def map(cls, iterable, func, *a, **kw):
return cls(func(x, *a, **kw) for x in iterable) | Iterable-first replacement of Python's built-in `map()` function. |
def char2hex(a: str):
if "0" <= a <= "9":
return ord(a) - 48
elif "A" <= a <= "F":
return ord(a) - 55
elif "a" <= a <= "f":
return ord(a) - 87
return -1 | Convert a hex character to its integer value.
'0' becomes 0, '9' becomes 9
'A' becomes 10, 'F' becomes 15
'a' becomes 10, 'f' becomes 15
Returns -1 on error. |
def validate(table, constraints=None, header=None):
return ProblemsView(table, constraints=constraints, header=header) | Validate a `table` against a set of `constraints` and/or an expected
`header`, e.g.::
>>> import petl as etl
>>> # define some validation constraints
... header = ('foo', 'bar', 'baz')
>>> constraints = [
... dict(name='foo_int', field='foo', test=int),
... dict(name='bar_date', field='bar', test=etl.dateparser('%Y-%m-%d')),
... dict(name='baz_enum', field='baz', assertion=lambda v: v in ['Y', 'N']),
... dict(name='not_none', assertion=lambda row: None not in row),
... dict(name='qux_int', field='qux', test=int, optional=True),
... ]
>>> # now validate a table
... table = (('foo', 'bar', 'bazzz'),
... (1, '2000-01-01', 'Y'),
... ('x', '2010-10-10', 'N'),
... (2, '2000/01/01', 'Y'),
... (3, '2015-12-12', 'x'),
... (4, None, 'N'),
... ('y', '1999-99-99', 'z'),
... (6, '2000-01-01'),
... (7, '2001-02-02', 'N', True))
>>> problems = etl.validate(table, constraints=constraints, header=header)
>>> problems.lookall()
+--------------+-----+-------+--------------+------------------+
| name | row | field | value | error |
+==============+=====+=======+==============+==================+
| '__header__' | 0 | None | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'foo_int' | 2 | 'foo' | 'x' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 3 | 'bar' | '2000/01/01' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 4 | 'baz' | 'x' | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 5 | 'bar' | None | 'AttributeError' |
+--------------+-----+-------+--------------+------------------+
| 'not_none' | 5 | None | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'foo_int' | 6 | 'foo' | 'y' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 6 | 'bar' | '1999-99-99' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 6 | 'baz' | 'z' | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| '__len__' | 7 | None | 2 | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 7 | 'baz' | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| '__len__' | 8 | None | 4 | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
Returns a table of validation problems. |
def make_params(
key_parts: Sequence[str],
variable_parts: VariablePartsType) -> Dict[str, Union[str, Tuple[str]]]:
return dict(zip(reversed(key_parts), _unwrap(variable_parts))) | Map keys to variables. This map\
URL-pattern variables to\
a URL related parts
:param key_parts: A list of URL parts
:param variable_parts: A linked-list\
(ala nested tuples) of URL parts
:return: The param dict with the values\
assigned to the keys
:private: |
def decode_tuple(data, encoding=None, errors=, keep=False,
normalize=False, preserve_dict_class=False, to_str=False):
return tuple(
decode_list(data, encoding, errors, keep, normalize,
preserve_dict_class, True, to_str)
) | Decode all string values to Unicode. Optionally use to_str=True to ensure
strings are str types and not unicode on Python 2. |
def fixed_width_binning(data=None, bin_width: Union[float, int] = 1, *, range=None, includes_right_edge=False, **kwargs) -> FixedWidthBinning:
result = FixedWidthBinning(bin_width=bin_width, includes_right_edge=includes_right_edge,
**kwargs)
if range:
result._force_bin_existence(range[0])
result._force_bin_existence(range[1], includes_right_edge=True)
if not kwargs.get("adaptive"):
return result
if data is not None and data.shape[0]:
result._force_bin_existence([np.min(data), np.max(data)],
includes_right_edge=includes_right_edge)
return result | Construct fixed-width binning schema.
Parameters
----------
bin_width: float
range: Optional[tuple]
(min, max)
align: Optional[float]
Must be multiple of bin_width |
def fmt_text(text):
PRINTABLE_CHAR = set(
list(range(ord(), ord() + 1)) + [ord(), ord()])
newtext = ("\\x{:02X}".format(
c) if c not in PRINTABLE_CHAR else chr(c) for c in text)
textlines = "\r\n".join(l.strip()
for l in "".join(newtext).split())
return textlines | convert characters that aren't printable to hex format |
def ack(self):
if self.acknowledged:
raise self.MessageStateError(
"Message already acknowledged with state: %s" % self._state)
self.backend.ack(self._frame)
self._state = "ACK" | Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected. |
def restore(path, password_file=None):
vault = VaultLib(get_vault_password(password_file))
atk_path = os.path.join(ATK_VAULT, path)
with open(os.path.join(atk_path, ), ) as f:
old_data = f.read()
with open(os.path.join(atk_path, ), ) as f:
old_hash = f.read()
with open(path, ) as f:
new_data = f.read()
new_hash = hashlib.sha1(new_data).hexdigest()
if old_hash != new_hash:
new_data = vault.encrypt(new_data)
else:
new_data = old_data
with open(path, ) as f:
f.write(new_data)
os.remove(os.path.join(atk_path, ))
os.remove(os.path.join(atk_path, )) | Retrieves a file from the atk vault and restores it to its original
location, re-encrypting it if it has changed.
:param path: path to original file |
def listRunSummaries(self, dataset="", run_num=-1):
if run_num==-1:
dbsExceptionHandler("dbsException-invalid-input",
"The run_num parameter is mandatory",
self.logger.exception)
if re.search(, dataset):
dbsExceptionHandler("dbsException-invalid-input",
"No wildcards are allowed in dataset",
self.logger.exception)
if ((run_num == -1 or run_num == ) and dataset==):
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input when no dataset is present.",
self.logger.exception)
conn = None
try:
conn = self.dbi.connection()
return self.dbsRunSummaryListDAO.execute(conn, dataset, run_num)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listRunSummaries. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler(, dbsExceptionCode[],
self.logger.exception, sError)
finally:
if conn:
conn.close() | API to list run summaries, like the maximal lumisection in a run.
:param dataset: dataset name (Optional)
:type dataset: str
:param run_num: Run number (Required)
:type run_num: str, long, int
:rtype: list containing a dictionary with key max_lumi |
def label_from_func(self, func:Callable, label_cls:Callable=None, **kwargs)->:
"Apply `func` to every input to get its label."
return self._label_from_list([func(o) for o in self.items], label_cls=label_cls, **kwargs) | Apply `func` to every input to get its label. |
def hid(manufacturer: str, serial_number: str, model: str) -> str:
return Naming.url_word(manufacturer) + + Naming.url_word(serial_number) + + Naming.url_word(model) | Computes the HID for the given properties of a device. The HID is suitable to use to an URI. |
def decode(self, encoded):
if self.enforce_reversible:
self.enforce_reversible = False
if self.encode(self.decode(encoded)) != encoded:
raise ValueError( % encoded)
self.enforce_reversible = True
return encoded | Decodes an object.
Args:
object_ (object): Encoded object.
Returns:
object: Object decoded. |
def on_key_release(self, symbol, modifiers):
self.keyboard_event(symbol, self.keys.ACTION_RELEASE, modifiers) | Pyglet specific key release callback.
Forwards and translates the events to :py:func:`keyboard_event` |
def error_response(self, kwargs_lens, kwargs_ps):
C_D_response, model_error = [], []
for i in range(self._num_bands):
if self._compute_bool[i] is True:
kwargs_lens_i = [kwargs_lens[k] for k in self._idex_lens_list[i]]
C_D_response_i, model_error_i = self._imageModel_list[i].error_response(kwargs_lens_i, kwargs_ps)
model_error.append(model_error_i)
if C_D_response == []:
C_D_response = C_D_response_i
else:
C_D_response = np.append(C_D_response, C_D_response_i)
return C_D_response, model_error | returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties) |
def read_reply(self) -> Reply:
_logger.debug()
reply = Reply()
while True:
line = yield from self._connection.readline()
if line[-1:] != b:
raise NetworkError()
self._data_event_dispatcher.notify_read(line)
reply.parse(line)
if reply.code is not None:
break
return reply | Read a reply from the stream.
Returns:
.ftp.request.Reply: The reply
Coroutine. |
def download_reference_files(job, inputs, samples):
shared_ids = {}
urls = [(, inputs.amb), (, inputs.ann), (, inputs.bwt),
(, inputs.pac), (, inputs.sa)]
if inputs.alt:
urls.append((, inputs.alt))
download_ref = job.wrapJobFn(download_url_job, inputs.ref, disk=)
job.addChild(download_ref)
shared_ids[] = download_ref.rv()
if inputs.fai:
shared_ids[] = job.addChildJobFn(download_url_job, inputs.fai).rv()
else:
faidx = job.wrapJobFn(run_samtools_faidx, download_ref.rv())
shared_ids[] = download_ref.addChild(faidx).rv()
if all(x[1] for x in urls):
for name, url in urls:
shared_ids[name] = job.addChildJobFn(download_url_job, url).rv()
else:
job.fileStore.logToMaster()
bwa_index = job.wrapJobFn(run_bwa_index, download_ref.rv())
download_ref.addChild(bwa_index)
for x, name in enumerate([, , , , ]):
shared_ids[name] = bwa_index.rv(x)
job.addFollowOnJobFn(map_job, download_sample_and_align, samples, inputs, shared_ids) | Downloads shared files that are used by all samples for alignment, or generates them if they were not provided.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace inputs: Input arguments (see main)
:param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]] |
async def request(self, method, url, **kwargs):
rate_limiter = RateLimiter(max_calls=59, period=60, callback=limited)
if not self.token:
raise UnauthorizedDetected()
headers = {
: self.user_agent,
:
}
if in kwargs:
kwargs[] = to_json(kwargs.pop())
kwargs[] = headers
headers[] = self.token
for tries in range(5):
async with self.session.request(method, url, **kwargs) as resp:
log.debug(, method,
url, kwargs.get(), resp.status)
data = await json_or_text(resp)
if 300 > resp.status >= 200:
return data
if resp.status == 429:
fmt =
retry_after = json.loads(resp.headers.get())
mins = retry_after / 60
log.warning(fmt, retry_after, mins)
raise HTTPException(resp, data) | Handles requests to the API |
def rebin(a, factor, func=None):
u
a = np.asarray(a)
dim = a.ndim
if np.isscalar(factor):
factor = dim*(factor,)
elif len(factor) != dim:
raise ValueError(
.format(dim, len(factor)))
if func is None:
func = np.mean
for f in factor:
if f != int(f):
raise ValueError(
.format(f))
new_shape = [n//f for n, f in zip(a.shape, factor)]+list(factor)
new_strides = [s*f for s, f in zip(a.strides, factor)]+list(a.strides)
aa = as_strided(a, shape=new_shape, strides=new_strides)
return func(aa, axis=tuple(range(-dim, 0))) | u"""Aggregate data from the input array ``a`` into rectangular tiles.
The output array results from tiling ``a`` and applying `func` to
each tile. ``factor`` specifies the size of the tiles. More
precisely, the returned array ``out`` is such that::
out[i0, i1, ...] = func(a[f0*i0:f0*(i0+1), f1*i1:f1*(i1+1), ...])
If ``factor`` is an integer-like scalar, then
``f0 = f1 = ... = factor`` in the above formula. If ``factor`` is a
sequence of integer-like scalars, then ``f0 = factor[0]``,
``f1 = factor[1]``, ... and the length of ``factor`` must equal the
number of dimensions of ``a``.
The reduction function ``func`` must accept an ``axis`` argument.
Examples of such function are
- ``numpy.mean`` (default),
- ``numpy.sum``,
- ``numpy.product``,
- ...
The following example shows how a (4, 6) array is reduced to a
(2, 2) array
>>> import numpy
>>> from rebin import rebin
>>> a = numpy.arange(24).reshape(4, 6)
>>> rebin(a, factor=(2, 3), func=numpy.sum)
array([[ 24, 42],
[ 96, 114]])
If the elements of `factor` are not integer multiples of the
dimensions of `a`, the remainding cells are discarded.
>>> rebin(a, factor=(2, 2), func=numpy.sum)
array([[16, 24, 32],
[72, 80, 88]]) |
def _log_begin(self):
self.logger.info("Beginning task: %s", self.__class__.__name__)
if self.salesforce_task and not self.flow:
self.logger.info("%15s %s", "As user:", self.org_config.username)
self.logger.info("%15s %s", "In org:", self.org_config.org_id)
self.logger.info("") | Log the beginning of the task execution |
def resize(img, size, interpolation=Image.BILINEAR):
r
if not _is_pil_image(img):
raise TypeError(.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):
raise TypeError(.format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::-1], interpolation) | r"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image. |
def get_proficiencies(self):
if self.retrieved:
raise errors.IllegalState()
self.retrieved = True
return objects.ProficiencyList(self._results, runtime=self._runtime) | Gets the proficiency list resulting from a search.
return: (osid.learning.ProficiencyList) - the proficiency list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.* |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.