code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def _draw_polygons(self, feature, bg, colour, extent, polygons, xo, yo):
coords = []
for polygon in polygons:
coords.append([self._scale_coords(x, y, extent, xo, yo) for x, y in polygon])
if "type" in feature["properties"] and "building" in feature["properties"]["type"]:
for line in coords:
self._draw_lines_internal(line, colour, bg)
else:
self._screen.fill_polygon(coords, colour=colour, bg=bg) | Draw a set of polygons from a vector tile. |
def Define_TreeTable(self, heads, heads2=None):
display_heads = []
display_heads.append(tuple(heads[2:]))
self.tree_table = TreeTable()
self.tree_table.append_from_list(display_heads, fill_title=True)
if heads2 is not None:
heads2_color = heads2[1]
row_widget = gui.TableRow()
for index, field in enumerate(heads2[2:]):
row_item = gui.TableItem(text=field,
style={: heads2_color})
row_widget.append(row_item, field)
self.tree_table.append(row_widget, heads2[0])
self.wid.append(self.tree_table) | Define a TreeTable with a heading row
and optionally a second heading row. |
def interprocess_locked(path):
lock = InterProcessLock(path)
def decorator(f):
@six.wraps(f)
def wrapper(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return wrapper
return decorator | Acquires & releases a interprocess lock around call into
decorated function. |
def _merge_fields(a, b):
a_names = set(x[0] for x in a)
b_names = set(x[0] for x in b)
a_keep = a_names - b_names
fields = []
for name, field in a:
if name in a_keep:
fields.append((name, field))
fields.extend(b)
return fields | Merge two lists of fields.
Fields in `b` override fields in `a`. Fields in `a` are output first. |
def set_params_value(self, *params):
if len(params) != len(self.parameters):
raise Exception("parameter error")
temp = self.parameters
self.parameters = []
for i in range(len(params)):
self.parameters.append(Parameter(temp[i][], temp[i][]))
self.parameters[i].set_value(params[i]) | This interface is used to set parameter value for an function in abi file. |
def get_all_loopbacks(engine):
data = []
if in engine.type:
for cvi in engine.data.get(, []):
data.append(
LoopbackClusterInterface(cvi, engine))
for node in engine.nodes:
for lb in node.data.get(, []):
data.append(LoopbackInterface(lb, engine))
return data | Get all loopback interfaces for a given engine |
def run(command, encoding=None, decode=True, cwd=None):
if not encoding:
encoding = locale.getpreferredencoding()
try:
with open(os.devnull, ) as devnull:
pipe = subprocess.Popen(command, stdin=devnull,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=cwd)
except OSError as e:
raise Failure("could not run %s: %s" % (command, e))
output = pipe.communicate()[0]
if decode:
output = output.decode(encoding)
status = pipe.wait()
if status != 0:
raise CommandFailed(command, status, output)
return output | Run a command [cmd, arg1, arg2, ...].
Returns the output (stdout + stderr).
Raises CommandFailed in cases of error. |
def guest_reboot(self, userid):
LOG.info("Begin to reboot vm %s", userid)
self._smtclient.guest_reboot(userid)
LOG.info("Complete reboot vm %s", userid) | Reboot a guest vm. |
def currentPixmapRect(self):
pixmap = self.currentPixmap()
rect = self.rect()
size = pixmap.size()
x = rect.center().x() - (size.width() / 2.0)
y = rect.center().y() - (size.height() / 2.0)
return QtCore.QRect(x, y, size.width(), size.height()) | Returns the rect that defines the boundary for the current pixmap
based on the size of the button and the size of the pixmap.
:return <QtCore.QRect> |
def getp(self, name):
name = self._mapping.get(name,name)
return self.params[name] | Get the named parameter.
Parameters
----------
name : string
The parameter name.
Returns
-------
param :
The parameter object. |
def update(self, volume, display_name=None, display_description=None):
return volume.update(display_name=display_name,
display_description=display_description) | Update the specified values on the specified volume. You may specify
one or more values to update. If no values are specified as non-None,
the call is a no-op; no exception will be raised. |
def build_year(self, dt):
self.year = str(dt.year)
logger.debug("Building %s" % self.year)
self.request = self.create_request(self.get_url())
target_path = self.get_build_path()
self.build_file(target_path, self.get_content()) | Build the page for the provided year. |
def mgz_to_nifti(filename,prefix=None,gzip=True):
setup_freesurfer()
if prefix==None:
prefix = nl.prefix(filename) +
if gzip and not prefix.endswith():
prefix +=
nl.run([os.path.join(freesurfer_home,,),filename,prefix],products=prefix) | Convert ``filename`` to a NIFTI file using ``mri_convert`` |
def _set_cfg(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("cfg_name",cfg.cfg, yang_name="cfg", rest_name="cfg", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: None, u: None, u: None, u: u}}), is_container=, yang_name="cfg", rest_name="cfg", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None, u: None, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__cfg = t
if hasattr(self, ):
self._set() | Setter method for cfg, mapped from YANG variable /zoning/defined_configuration/cfg (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_cfg is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cfg() directly. |
def _create_query(node, context):
visited_nodes = [node]
output_columns = _get_output_columns(visited_nodes, context)
filters = _get_filters(visited_nodes, context)
selectable = sql_context_helpers.get_node_selectable(node, context)
query = select(output_columns).select_from(selectable).where(and_(*filters))
return query | Create a query from a SqlNode.
Args:
node: SqlNode, the current node.
context: CompilationContext, global compilation state and metadata.
Returns:
Selectable, selectable of the generated query. |
def as_number(self):
def combine(subver, ver):
return subver / 10 + ver
return reduce(combine, reversed(self.version)) | >>> round(SummableVersion('1.9.3').as_number(), 12)
1.93 |
def help_cli_search(self):
help = % (color.Yellow, color.Green)
help += % (color.Green)
help += % (color.Green)
help += bad\exe\ % (color.LightBlue)
help += % (color.Green)
help += % (color.LightBlue, color.Normal)
help += % (color.Green)
help += % (color.LightBlue, color.Normal)
help += % (color.LightBlue, color.Normal)
return help | Help for Workbench CLI Search |
def gnuplot_2d(x, y, filename, title=, x_label=, y_label=):
_, ext = os.path.splitext(filename)
if ext != :
filename +=
gnuplot_cmds = \
scr = _GnuplotScriptTemp(gnuplot_cmds)
data = _GnuplotDataTemp(x, y)
args_dict = {
: filename,
: data.name,
: title,
: x_label,
: y_label
}
gnuplot(scr.name, args_dict) | Function to produce a general 2D plot.
Args:
x (list): x points.
y (list): y points.
filename (str): Filename of the output image.
title (str): Title of the plot. Default is '' (no title).
x_label (str): x-axis label.
y_label (str): y-axis label. |
def _get_status_tokens(self):
" The tokens for the status bar. "
result = []
for i, w in enumerate(self.pymux.arrangement.windows):
if i > 0:
result.append((, ))
if w == self.pymux.arrangement.get_active_window():
style =
format_str = self.pymux.window_status_current_format
else:
style =
format_str = self.pymux.window_status_format
result.append((
style,
format_pymux_string(self.pymux, format_str, window=w),
self._create_select_window_handler(w)))
return result | The tokens for the status bar. |
def _build_contract_creation_tx_with_valid_signature(self, tx_dict: Dict[str, None], s: int) -> Transaction:
zero_address = HexBytes( + * 40)
f_address = HexBytes( + * 40)
nonce = tx_dict[]
gas_price = tx_dict[]
gas = tx_dict[]
to = tx_dict.get(, b)
value = tx_dict[]
data = tx_dict[]
for _ in range(100):
try:
v, r = self.find_valid_random_signature(s)
contract_creation_tx = Transaction(nonce, gas_price, gas, to, value, HexBytes(data), v=v, r=r, s=s)
sender_address = contract_creation_tx.sender
contract_address = contract_creation_tx.creates
if sender_address in (zero_address, f_address) or contract_address in (zero_address, f_address):
raise InvalidTransaction
return contract_creation_tx
except InvalidTransaction:
pass
raise ValueError(, s) | Use pyethereum `Transaction` to generate valid tx using a random signature
:param tx_dict: Web3 tx dictionary
:param s: Signature s value
:return: PyEthereum creation tx for the proxy contract |
def getAllConfig(self, fmt=):
for e in self.getCtrlConf(msgout=False):
self._lattice_confdict.update(e.dumpConfig(type=))
self._lattice_confdict.update(self._lattice.dumpConfig())
if fmt == :
return json.dumps(self._lattice_confdict)
else:
return self._lattice_confdict | return all element configurations as json string file.
could be further processed by beamline.Lattice class
:param fmt: 'json' (default) or 'dict' |
def kwonly_args(kws, required, withdefaults=(), leftovers=False):
if hasattr(withdefaults, ):
withdefaults = withdefaults.items()
kwonly = []
missing = []
for name in required:
if name not in kws:
missing.append(name)
else:
kwonly.append(kws.pop(name))
if missing:
if len(missing) > 2:
end = % (.join(missing[:-1]), missing[-1])
elif len(missing) == 2:
end = % tuple(missing)
else:
end = % tuple(missing)
msg =
raise TypeError(msg % (len(missing), end))
for name, value in withdefaults:
if name not in kws:
kwonly.append(value)
else:
kwonly.append(kws.pop(name))
if not leftovers and kws:
msg = "got an unexpected keyword argument "
raise TypeError(msg % (kws.keys()[0]))
return [kws] + kwonly | Based on the snippet by Eric Snow
http://code.activestate.com/recipes/577940
SPDX-License-Identifier: MIT |
def replace(dict,line):
words = line.split()
new_line = ""
for word in words:
fst = word[0]
last = word[-1]
if last == "," or last == ";" or last == ".":
clean_word = word[0:-1]
last = last + " "
elif last == "]":
clean_word = word[0:-1]
else:
clean_word = word
last = " "
if fst == "[":
clean_word = clean_word[1:]
else:
clean_word = clean_word
fst = ""
find = dict.get(clean_word)
if find == None:
new_line = new_line + fst + str(clean_word) + last
else:
new_line = new_line + fst + str(find) + last
return new_line | Find and replace the special words according to the dictionary.
Parameters
==========
dict : Dictionary
A dictionary derived from a yaml file. Source language as keys and the target language as values.
line : String
A string need to be processed. |
def _setup_piddir(self):
if self.pidfile is None:
return
piddir = os.path.dirname(self.pidfile)
if not os.path.isdir(piddir):
os.makedirs(piddir, 0o777 & ~self.umask)
os.chown(piddir, self.uid, self.gid) | Create the directory for the PID file if necessary. |
def wait(self, till=None):
waiter = Signal()
if self.waiting:
DEBUG and _Log.note("waiting with {{num}} others on {{name|quote}}", num=len(self.waiting), name=self.name, stack_depth=1)
self.waiting.insert(0, waiter)
else:
DEBUG and _Log.note("waiting by self on {{name|quote}}", name=self.name)
self.waiting = [waiter]
try:
self.lock.release()
DEBUG and _Log.note("out of lock {{name|quote}}", name=self.name)
(waiter | till).wait()
if DEBUG:
_Log.note("done minimum wait (for signal {{till|quote}})", till=till.name if till else "", name=self.name)
except Exception as e:
if not _Log:
_late_import()
_Log.warning("problem", cause=e)
finally:
self.lock.acquire()
DEBUG and _Log.note("re-acquired lock {{name|quote}}", name=self.name)
try:
self.waiting.remove(waiter)
DEBUG and _Log.note("removed own signal from {{name|quote}}", name=self.name)
except Exception:
pass
return bool(waiter) | THE ASSUMPTION IS wait() WILL ALWAYS RETURN WITH THE LOCK ACQUIRED
:param till: WHEN TO GIVE UP WAITING FOR ANOTHER THREAD TO SIGNAL
:return: True IF SIGNALED TO GO, False IF till WAS SIGNALED |
def nphase_border(im, include_diagonals=False):
r
if im.ndim != im.squeeze().ndim:
warnings.warn( + str(im.shape) +
+
)
ndim = len(np.shape(im))
if ndim not in [2, 3]:
raise NotImplementedError("Function only works for 2d and 3d images")
im = np.pad(im, pad_width=1, mode=)
stack = _make_stack(im, include_diagonals)
stack.sort()
out = np.ones_like(im)
for k in range(np.shape(stack)[ndim])[1:]:
if ndim == 2:
mask = stack[:, :, k] != stack[:, :, k-1]
elif ndim == 3:
mask = stack[:, :, :, k] != stack[:, :, :, k-1]
out += mask
if ndim == 2:
return out[1:-1, 1:-1].copy()
else:
return out[1:-1, 1:-1, 1:-1].copy() | r'''
Identifies the voxels in regions that border *N* other regions.
Useful for finding triple-phase boundaries.
Parameters
----------
im : ND-array
An ND image of the porous material containing discrete values in the
pore space identifying different regions. e.g. the result of a
snow-partition
include_diagonals : boolean
When identifying bordering pixels (2D) and voxels (3D) include those
shifted along more than one axis
Returns
-------
image : ND-array
A copy of ``im`` with voxel values equal to the number of uniquely
different bordering values |
def check_rollout(edits_service, package_name, days):
edit = edits_service.insert(body={}, packageName=package_name).execute()
response = edits_service.tracks().get(editId=edit[], track=, packageName=package_name).execute()
releases = response[]
for release in releases:
if release[] == :
url = .format(release[])
resp = requests.head(url)
if resp.status_code != 200:
if resp.status_code != 404:
logger.warning("Could not check %s: %s", url, resp.status_code)
continue
age = time.time() - calendar.timegm(eu.parsedate(resp.headers[]))
if age >= days * DAY:
yield release, age | Check if package_name has a release on staged rollout for too long |
def default_number_converter(number_str):
is_int = (number_str.startswith() and number_str[1:].isdigit()) or number_str.isdigit()
return int(number_str) if is_int else float(number_str) | Converts the string representation of a json number into its python object equivalent, an
int, long, float or whatever type suits. |
def train(self, data, target, **kwargs):
non_predictors = [i.replace(" ", "_").lower() for i in list(set(data[]))] + ["team", "next_year_wins"]
self.column_names = [l for l in list(data.columns) if l not in non_predictors]
results, folds = self.cross_validate(data, non_predictors, **kwargs)
self.gather_results(results, folds, data) | Used in the training phase. Override. |
def add_aliases(self_or_cls, **kwargs):
self_or_cls.aliases.update({v:k for k,v in kwargs.items()}) | Conveniently add new aliases as keyword arguments. For instance
you can add a new alias with add_aliases(short='Longer string') |
def get_gebouw_by_id(self, id):
def creator():
res = crab_gateway_request(
self.client, , id
)
if res == None:
raise GatewayResourceNotFoundException()
return Gebouw(
res.IdentificatorGebouw,
res.AardGebouw,
res.StatusGebouw,
res.GeometriemethodeGebouw,
res.Geometrie,
Metadata(
res.BeginDatum,
res.BeginTijd,
self.get_bewerking(res.BeginBewerking),
self.get_organisatie(res.BeginOrganisatie)
)
)
if self.caches[].is_configured:
key = % (id)
gebouw = self.caches[].get_or_create(key, creator)
else:
gebouw = creator()
gebouw.set_gateway(self)
return gebouw | Retrieve a `Gebouw` by the Id.
:param integer id: the Id of the `Gebouw`
:rtype: :class:`Gebouw` |
def _update_limits_from_api(self):
logger.debug()
self.connect()
resp = self.conn.describe_account_limits()
for lim in resp[]:
if lim[] == :
self.limits[]._set_api_limit(lim[])
continue
logger.debug(
, lim[]) | Call the service's API action to retrieve limit/quota information, and
update AwsLimit objects in ``self.limits`` with this information. |
async def load(cls, db, identifier=None, redis_key=None):
if not identifier and not redis_key:
raise InvalidQuery()
if redis_key is None:
redis_key = cls.make_key(identifier)
if await db.exists(redis_key):
data = await db.hgetall(redis_key)
kwargs = {}
for key_bin, value_bin in data.items():
key, value = key_bin, value_bin
column = getattr(cls, key, False)
if not column or (column.field_type == str):
kwargs[key] = value
elif column.field_type == datetime:
kwargs[key] = datetime.strptime(value, DATETIME_FORMAT)
else:
kwargs[key] = column.field_type(value)
kwargs[] = True
return cls(**kwargs)
else:
logger.debug("No Redis key found: {}".format(redis_key))
return None | Load the object from redis. Use the identifier (colon-separated
composite keys or the primary key) or the redis_key. |
def from_lal_unit(lunit):
return reduce(operator.mul, (
units.Unit(str(LAL_UNIT_INDEX[i])) ** exp for
i, exp in enumerate(lunit.unitNumerator))) | Convert a LALUnit` into a `~astropy.units.Unit`
Parameters
----------
lunit : `lal.Unit`
the input unit
Returns
-------
unit : `~astropy.units.Unit`
the Astropy representation of the input
Raises
------
TypeError
if ``lunit`` cannot be converted to `lal.Unit`
ValueError
if Astropy doesn't understand the base units for the input |
def encode_binary_dict(array, buffers):
__buffer__shapedtypeorder
buffer_id = make_id()
buf = (dict(id=buffer_id), array.tobytes())
buffers.append(buf)
return {
: buffer_id,
: array.shape,
: array.dtype.name,
: sys.byteorder
} | Send a numpy array as an unencoded binary buffer
The encoded format is a dict with the following structure:
.. code:: python
{
'__buffer__' : << an ID to locate the buffer >>,
'shape' : << array shape >>,
'dtype' : << dtype name >>,
'order' : << byte order at origin (little or big)>>
}
Args:
array (np.ndarray) : an array to encode
buffers (set) :
Set to add buffers to
**This is an "out" parameter**. The values it contains will be
modified in-place.
Returns:
dict |
def get_assignments(self, site):
tools = self.get_tools(site)
assignment_tool_filter = [x.href for x in tools if x.name == ]
if not assignment_tool_filter:
return []
assignment_tool_url = assignment_tool_filter[0].href
response = self._session.get(assignment_tool_url)
response.raise_for_status()
iframes = self._html_iface.get_iframes(response.text)
iframe_url =
for frame in iframes:
if frame[] == :
iframe_url = frame[]
if iframe_url == :
print "WARNING: NO ASSIGNMENT IFRAMES FOUND"
response = self._session.get(iframe_url)
response.raise_for_status()
assignment_dict_list = self._html_iface.get_assignments(response.text)
return [TSquareAssignment(**x) for x in assignment_dict_list] | Gets a list of assignments associated with a site (class). Returns
a list of TSquareAssignment objects.
@param site (TSquareSite) - The site to use with the assignment query
@returns - A list of TSquareSite objects. May be an empty list if
the site has defined no assignments. |
def get_name_dictionary_extractor(name_trie):
return DictionaryExtractor()\
.set_trie(name_trie)\
.set_pre_filter(VALID_TOKEN_RE.match)\
.set_pre_process(lambda x: x.lower())\
.set_metadata({: }) | Method for creating default name dictionary extractor |
def get_tunnel_statistics_output_tunnel_stat_rx_bytes(self, **kwargs):
config = ET.Element("config")
get_tunnel_statistics = ET.Element("get_tunnel_statistics")
config = get_tunnel_statistics
output = ET.SubElement(get_tunnel_statistics, "output")
tunnel_stat = ET.SubElement(output, "tunnel-stat")
rx_bytes = ET.SubElement(tunnel_stat, "rx-bytes")
rx_bytes.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
def to_record(cls, attr_names, values):
try:
values = values._asdict()
except AttributeError:
pass
try:
return [cls.__to_sqlite_element(values.get(attr_name)) for attr_name in attr_names]
except AttributeError:
pass
if isinstance(values, (tuple, list)):
return [cls.__to_sqlite_element(value) for value in values]
raise ValueError("cannot convert from {} to list".format(type(values))) | Convert values to a record to be inserted into a database.
:param list attr_names:
List of attributes for the converting record.
:param values: Values to be converted.
:type values: |dict|/|namedtuple|/|list|/|tuple|
:raises ValueError: If the ``values`` is invalid. |
def _get_pdm(cls, df, windows):
window = cls.get_only_one_positive_int(windows)
column_name = .format(window)
um, dm = df[], df[]
df[] = np.where(um > dm, um, 0)
if window > 1:
pdm = df[.format(window)]
else:
pdm = df[]
df[column_name] = pdm | +DM, positive directional moving
If window is not 1, calculate the SMMA of +DM
:param df: data
:param windows: range
:return: |
def do_termchar(self, args):
if not self.current:
print()
return
args = args.strip()
if not args:
try:
charmap = { u: , u: , u: , u: }
chr = self.current.read_termination
if chr in charmap:
chr = charmap[chr]
chw = self.current.write_termination
if chw in charmap:
chw = charmap[chw]
print(.format(chr, chw))
except Exception as e:
print(e)
else:
args = args.split()
charmap = { : u, : u, : u, : u, : None }
chr = args[0]
chw = args[0 if len(args) == 1 else 1]
if chr in charmap and chw in charmap:
try:
self.current.read_termination = charmap[chr]
self.current.write_termination = charmap[chw]
print()
except Exception as e:
print(e)
else:
print()
return | Get or set termination character for resource in use.
<termchar> can be one of: CR, LF, CRLF, NUL or None.
None is used to disable termination character
Get termination character:
termchar
Set termination character read or read+write:
termchar <termchar> [<termchar>] |
def draw_image(self, ax, image):
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image) | Process a matplotlib image object and call renderer.draw_image |
def add_index(self, field, value):
if field[-4:] not in ("_bin", "_int"):
raise RiakError("Riak 2i fields must end with either "
" or .")
self.indexes.add((field, value))
return self._robject | add_index(field, value)
Tag this object with the specified field/value pair for
indexing.
:param field: The index field.
:type field: string
:param value: The index value.
:type value: string or integer
:rtype: :class:`RiakObject <riak.riak_object.RiakObject>` |
def tree_walk(cls, directory, tree):
results = []
dirs = [d for d in tree if d != FILE_MARKER]
files = tree[FILE_MARKER]
results.append((directory, dirs, files))
for d in dirs:
subdir = os.path.join(directory, d)
subtree = tree[d]
results.extend(cls.tree_walk(subdir, subtree))
return results | Walks a tree returned by `cls.list_to_tree` returning a list of
3-tuples as if from os.walk(). |
def xcorr(x, y=None, maxlags=None, norm=):
N = len(x)
if y is None:
y = x
assert len(x) == len(y),
if maxlags is None:
maxlags = N-1
lags = np.arange(0, 2*N-1)
else:
assert maxlags <= N,
lags = np.arange(N-maxlags-1, N+maxlags)
res = np.correlate(x, y, mode=)
if norm == :
Nf = float(N)
res = res[lags] / float(N)
elif norm == :
res = res[lags] / (float(N)-abs(np.arange(-N+1, N)))[lags]
elif norm == :
Nf = float(N)
rms = pylab_rms_flat(x) * pylab_rms_flat(y)
res = res[lags] / rms / Nf
else:
res = res[lags]
lags = np.arange(-maxlags, maxlags+1)
return res, lags | Cross-correlation using numpy.correlate
Estimates the cross-correlation (and autocorrelation) sequence of a random
process of length N. By default, there is no normalisation and the output
sequence of the cross-correlation has a length 2*N+1.
:param array x: first data array of length N
:param array y: second data array of length N. If not specified, computes the
autocorrelation.
:param int maxlags: compute cross correlation between [-maxlags:maxlags]
when maxlags is not specified, the range of lags is [-N+1:N-1].
:param str option: normalisation in ['biased', 'unbiased', None, 'coeff']
The true cross-correlation sequence is
.. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])
However, in practice, only a finite segment of one realization of the
infinite-length random process is available.
The correlation is estimated using numpy.correlate(x,y,'full').
Normalisation is handled by this function using the following cases:
* 'biased': Biased estimate of the cross-correlation function
* 'unbiased': Unbiased estimate of the cross-correlation function
* 'coeff': Normalizes the sequence so the autocorrelations at zero
lag is 1.0.
:return:
* a numpy.array containing the cross-correlation sequence (length 2*N-1)
* lags vector
.. note:: If x and y are not the same length, the shorter vector is
zero-padded to the length of the longer vector.
.. rubric:: Examples
.. doctest::
>>> from spectrum import xcorr
>>> x = [1,2,3,4,5]
>>> c, l = xcorr(x,x, maxlags=0, norm='biased')
>>> c
array([ 11.])
.. seealso:: :func:`CORRELATION`. |
def state(self):
if self.method in [, , , ]:
state = STATE_STARTING
elif self.method in []:
state = STATE_PLAYING
else:
state = STATE_STOPPED
_LOGGER.debug(, self.host, state)
return state | Which state the session is in.
Starting - all messages needed to get stream started.
Playing - keep-alive messages every self.session_timeout. |
def provider_parser(subparser):
subparser.add_argument(,
help=
)
subparser.add_argument(, help=)
subparser.add_argument(, help=)
subparser.add_argument(,
help=
type/name/content\
,
default=str(),
choices=[, ])
subparser.add_argument(,
help=
,
default=str(),
choices=[, ])
subparser.add_argument(,
help=
,
default=int(30),
type=int) | Configure a provider parser for Hetzner |
def get_content(self):
filestream = compat.StringIO()
tableName, primKey = self.provider._split_path(self.path)
if primKey is not None:
conn = self.provider._init_connection()
listFields = self.provider._get_field_list(conn, tableName)
csvwriter = csv.DictWriter(filestream, listFields, extrasaction="ignore")
dictFields = {}
for field_name in listFields:
dictFields[field_name] = field_name
csvwriter.writerow(dictFields)
if primKey == "_ENTIRE_CONTENTS":
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute("SELECT * from " + self.provider._db + "." + tableName)
result_set = cursor.fetchall()
for row in result_set:
csvwriter.writerow(row)
cursor.close()
else:
row = self.provider._get_record_by_primary_key(conn, tableName, primKey)
if row is not None:
csvwriter.writerow(row)
conn.close()
filestream.seek(0)
return filestream | Open content as a stream for reading.
See DAVResource.get_content() |
def ec2_network_network_acl_id(self, lookup, default=None):
network_acl_id = EFAwsResolver.__CLIENTS["ec2"].describe_network_acls(Filters=[{
: ,
: [lookup]
}])
if len(network_acl_id["NetworkAcls"]) > 0:
return network_acl_id["NetworkAcls"][0]["NetworkAclId"]
else:
return default | Args:
lookup: the friendly name of the network ACL we are looking up
default: the optional value to return if lookup failed; returns None if not set
Returns:
the ID of the network ACL, or None if no match found |
def end(self):
if hasattr(curses, ):
curses.echo()
if hasattr(curses, ):
curses.nocbreak()
if hasattr(curses, ):
try:
curses.curs_set(1)
except Exception:
pass
curses.endwin() | Shutdown the curses window. |
def get_summary(list_all=[], **kwargs):
all_summary = []
for module in list_all:
summary = {
"module_name" : module[],
"show_all" : kwargs.get("show_all",True),
"project_name" : kwargs.get("proj_name","TestProject"),
"home_page" : kwargs.get("home_page",__about__.HOME_PAGE),
"start_time" : "",
"end_time" : "",
"duration_seconds" : "",
"total_case_num" : len(module["TestCases"]),
"pass_cases_num" : 0,
"fail_cases_num" : 0,
"details" : []
}
for case in module["TestCases"]:
case_detail = {}
case_detail["linkurl"] = "./caselogs/%s_%s.log" %(case["case_name"],case["exec_date"])
if case["status"].lower() == "pass":
summary["pass_cases_num"] += 1
case_detail["c_style"] = "tr_pass"
else:
summary["fail_cases_num"] += 1
case_detail["c_style"] = "tr_fail"
case_detail.update(case)
summary["details"].append(case_detail)
try:
st = module["TestCases"][0].get("start_at")
et = module["TestCases"][-1].get("end_at")
summary["start_time"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(st))
summary["end_time"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(et))
summary["duration_seconds"] = float("%.2f" %(et - st))
except Exception as _:
logger.log_warning("Will set and to ")
(summary["start_time"], summary["end_time"], summary["duration_seconds"]) = (None,None,None)
if summary["fail_cases_num"] > 0:
summary["dict_report"] = {"result":0,"message":"failure","pass":summary["pass_cases_num"],"fail":summary["fail_cases_num"]}
else:
summary["dict_report"] = {"result":1,"message":"success","pass":summary["pass_cases_num"],"fail":summary["fail_cases_num"]}
all_summary.append(summary)
return all_summary | summarize the report data
@param list_all: a list which save the report data
@param kwargs: such as
show_all: True/False report show all status cases
proj_name: project name
home_page: home page url |
def determine_type(filename):
ftype = magic.from_file(filename, mime=True).decode()
if ftype == :
ftype =
elif ftype == :
ftype =
else:
ftype = ftype.split()[1]
return ftype | Determine the file type and return it. |
def get_posts(self, include_draft=False, filter_functions=None):
def posts_generator(path):
if os.path.isdir(path):
for file in os.listdir(path):
filename, ext = os.path.splitext(file)
format_name = get_standard_format_name(ext[1:])
if format_name is not None and re.match(
r, filename):
post = Post()
post.format = format_name
post.meta, post.raw_content = FileStorage.read_file(
os.path.join(path, file))
post.rel_url = filename.replace(, , 3) +
post.unique_key = + post.rel_url
yield post
posts_path = os.path.join(current_app.instance_path, )
result = filter(lambda p: include_draft or not p.is_draft,
posts_generator(posts_path))
result = self._filter_result(result, filter_functions)
return sorted(result, key=lambda p: p.created, reverse=True) | Get all posts from filesystem.
:param include_draft: return draft posts or not
:param filter_functions: filter to apply BEFORE result being sorted
:return: an iterable of Post objects (the first is the latest post) |
def finalize(self):
signature = self.signer.finalize()
sig_r, sig_s = decode_dss_signature(signature)
sig_b64 = encode_signature(sig_r, sig_s)
return sig_b64 | Get the base64-encoded signature itself.
Can only be called once. |
def emit(self, record):
level = record.levelno
if not FLAGS.is_parsed():
global _warn_preinit_stderr
if _warn_preinit_stderr:
sys.stderr.write(
)
_warn_preinit_stderr = False
self._log_to_stderr(record)
elif FLAGS[].value:
self._log_to_stderr(record)
else:
super(PythonHandler, self).emit(record)
stderr_threshold = converter.string_to_standard(
FLAGS[].value)
if ((FLAGS[].value or level >= stderr_threshold) and
self.stream != sys.stderr):
self._log_to_stderr(record)
if _is_absl_fatal_record(record):
self.flush()
os.abort() | Prints a record out to some streams.
If FLAGS.logtostderr is set, it will print to sys.stderr ONLY.
If FLAGS.alsologtostderr is set, it will print to sys.stderr.
If FLAGS.logtostderr is not set, it will log to the stream
associated with the current thread.
Args:
record: logging.LogRecord, the record to emit. |
def title(self, gender: Optional[Gender] = None,
title_type: Optional[TitleType] = None) -> str:
gender_key = self._validate_enum(gender, Gender)
title_key = self._validate_enum(title_type, TitleType)
titles = self._data[][gender_key][title_key]
return self.random.choice(titles) | Generate a random title for name.
You can generate random prefix or suffix
for name using this method.
:param gender: The gender.
:param title_type: TitleType enum object.
:return: The title.
:raises NonEnumerableError: if gender or title_type in incorrect format.
:Example:
PhD. |
def forward(self, input, target):
jinput, input_is_table = Layer.check_input(input)
jtarget, target_is_table = Layer.check_input(target)
output = callBigDlFunc(self.bigdl_type,
"criterionForward",
self.value,
jinput,
input_is_table,
jtarget,
target_is_table)
return output | NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding loss of the criterion,
compared with `target`
:param input: ndarray or list of ndarray
:param target: ndarray or list of ndarray
:return: value of loss |
def _wait_for_read_ready_or_timeout(self, timeout):
remaining_timeout = timeout
t0 = time.time()
while True:
try:
(rs, _, _) = select.select(
[self.in_stream.fileno()] + self.readers,
[], [], remaining_timeout)
if not rs:
return False, None
r = rs[0]
if r == self.in_stream.fileno():
return True, None
else:
os.read(r, 1024)
if self.queued_interrupting_events:
return False, self.queued_interrupting_events.pop(0)
elif remaining_timeout is not None:
remaining_timeout = max(0, t0 + timeout - time.time())
continue
else:
continue
except select.error:
if self.sigints:
return False, self.sigints.pop()
if remaining_timeout is not None:
remaining_timeout = max(timeout - (time.time() - t0), 0) | Returns tuple of whether stdin is ready to read and an event.
If an event is returned, that event is more pressing than reading
bytes on stdin to create a keyboard input event.
If stdin is ready, either there are bytes to read or a SIGTSTP
triggered by dsusp has been received |
def has_property(self, property_name):
if property_name in self.properties:
return True
elif property_name in self.entities:
return True
elif property_name in self.collections:
return True
else:
return False | Check if schema has property
:param property_name: str, name to check
:return: bool |
def _update_estimate_and_sampler(self, ell, ell_hat, weight, extra_info,
**kwargs):
stratum_idx = extra_info[]
self._BB_TP.update(ell*ell_hat, stratum_idx)
self._BB_PP.update(ell_hat, stratum_idx)
self._BB_P.update(ell, stratum_idx)
self._update_cov_model(strata_to_update = [stratum_idx])
self._update_estimates() | Update the BB models and the estimates |
def loadJSON(self, filename):
with open(filename, "r") as f:
self.merge(json.load(f))
return self | Adds the data from a JSON file. The file is expected to be in datapoint format::
d = DatapointArray().loadJSON("myfile.json") |
def get(id_, hwid, type_, unit, precision, as_json):
if id_ and (hwid or type_):
raise click.BadOptionUsage(
"If --id is given --hwid and --type are not allowed."
)
if id_:
try:
sensor = W1ThermSensor.get_available_sensors()[id_ - 1]
except IndexError:
raise click.BadOptionUsage(
"No sensor with id {0} available. "
"Use the ls command to show all available sensors.".format(id_)
)
else:
sensor = W1ThermSensor(type_, hwid)
if precision:
sensor.set_precision(precision, persist=False)
temperature = sensor.get_temperature(unit)
if as_json:
data = {
"hwid": sensor.id,
"type": sensor.type_name,
"temperature": temperature,
"unit": unit,
}
click.echo(json.dumps(data, indent=4, sort_keys=True))
else:
click.echo(
"Sensor {0} measured temperature: {1} {2}".format(
click.style(sensor.id, bold=True),
click.style(str(temperature), bold=True),
click.style(unit, bold=True),
)
) | Get temperature of a specific sensor |
def gep(self, ptr, indices, inbounds=False, name=):
instr = instructions.GEPInstr(self.block, ptr, indices,
inbounds=inbounds, name=name)
self._insert(instr)
return instr | Compute effective address (getelementptr):
name = getelementptr ptr, <indices...> |
def cds_column_replace(source, data):
current_length = [len(v) for v in source.data.values() if isinstance(v, (list, np.ndarray))]
new_length = [len(v) for v in data.values() if isinstance(v, (list, np.ndarray))]
untouched = [k for k in source.data if k not in data]
return bool(untouched and current_length and new_length and current_length[0] != new_length[0]) | Determine if the CDS.data requires a full replacement or simply
needs to be updated. A replacement is required if untouched
columns are not the same length as the columns being updated. |
def dsort(fname, order, has_header=True, frow=0, ofname=None):
r
ofname = fname if ofname is None else ofname
obj = CsvFile(fname=fname, has_header=has_header, frow=frow)
obj.dsort(order)
obj.write(fname=ofname, header=has_header, append=False) | r"""
Sort file data.
:param fname: Name of the comma-separated values file to sort
:type fname: FileNameExists_
:param order: Sort order
:type order: :ref:`CsvColFilter`
:param has_header: Flag that indicates whether the comma-separated
values file to sort has column headers in its first line
(True) or not (False)
:type has_header: boolean
:param frow: First data row (starting from 1). If 0 the row where data
starts is auto-detected as the first row that has a number
(integer of float) in at least one of its columns
:type frow: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the sorted data. If None the sorting is
done "in place"
:type ofname: FileName_ or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.dsort.dsort
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`frow\` is not valid)
* RuntimeError (Argument \`has_header\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]] |
def mark_entries(self, entries):
s body
of the requested URL.
t have a redirect, set is as main entry
main_entry = entries[0]
main_location = self._get_location(main_entry)
if not main_location:
self._set_entry_type(main_entry, MAIN_ENTRY)
return
main_url = urllib.parse.urljoin(get_url(main_entry), main_location)
for entry in entries[1:]:
url = get_url(entry)
if url == main_url:
self._set_entry_type(entry, MAIN_ENTRY)
break
else:
self._set_entry_type(main_entry, MAIN_ENTRY) | Mark one entry as main entry and the rest as resource entry.
Main entry is the entry that contain response's body
of the requested URL. |
def build_genome_alignment_from_file(ga_path, ref_spec, idx_path=None,
verbose=False):
blocks = []
if (idx_path is not None):
bound_iter = functools.partial(genome_alignment_iterator,
reference_species=ref_spec)
hash_func = JustInTimeGenomeAlignmentBlock.build_hash
factory = IndexedFile(None, bound_iter, hash_func)
factory.read_index(idx_path, ga_path, verbose=verbose)
pind = None
for k in factory:
if verbose:
if pind is None:
total = len(factory)
pind = ProgressIndicator(totalToDo=total, messagePrefix="completed",
messageSuffix="building alignment blocks ")
pind.done += 1
pind.showProgress()
blocks.append(JustInTimeGenomeAlignmentBlock(factory, k))
else:
for b in genome_alignment_iterator(ga_path, ref_spec, verbose=verbose):
blocks.append(b)
return GenomeAlignment(blocks, verbose) | build a genome alignment by loading from a single MAF file.
:param ga_path: the path to the file to load.
:param ref_spec: which species in the MAF file is the reference?
:param idx_path: if provided, use this index to generate a just-in-time
genome alignment, instead of loading the file immediately. |
async def _send_report(self, status):
if len(self._notify) > 0:
asyncio.gather(*[coro(dict(status)) for coro in self._notify],
loop=self.loop) | Call all subscribed coroutines in _notify whenever a status
update occurs.
This method is a coroutine |
def reviews(self, last_item, filter_=None):
cmd = self._get_gerrit_cmd(last_item, filter_)
logger.debug("Getting reviews with command: %s", cmd)
raw_data = self.__execute(cmd)
raw_data = str(raw_data, "UTF-8")
return raw_data | Get the reviews starting from last_item. |
def run(args):
with warnings.catch_warnings():
warnings.simplefilter()
query = prepareQuery(args.query_file.read())
ds = Dataset()
res_indices_prev = set()
res_indices = set()
for f in args.graphs:
g = Graph(identifier=os.path.basename(f.name))
g.parse(data=f.read(), format=)
ds.add_graph(g)
for data in read_by_chunk(args.input_file, int(args.chunk_size)):
g = Graph(identifier=)
g.parse(data=data, format=args.input_type)
ds.add_graph(g)
res = ds.query(query)
dedup_res_graph = Graph()
if len(res) != 0:
for r in res:
tid = generate_index(r)
res_indices.add(tid)
if tid in res_indices_prev:
continue
dedup_res_graph.add(r)
if len(dedup_res_graph) > 0:
ret = dedup_res_graph.serialize(format=args.output_type, encoding=)
args.output_file.write(ret)
ds.remove_graph(g)
res_indices_prev = res_indices
res_indices = set() | Args:
args (argparse.Namespace) |
def _parse_tensor(self, indices=False):
if indices:
self.line = self._skip_lines(1)
tensor = np.zeros((3, 3))
for i in range(3):
tokens = self.line.split()
if indices:
tensor[i][0] = float(tokens[1])
tensor[i][1] = float(tokens[2])
tensor[i][2] = float(tokens[3])
else:
tensor[i][0] = float(tokens[0])
tensor[i][1] = float(tokens[1])
tensor[i][2] = float(tokens[2])
self.line = self._skip_lines(1)
return tensor | Parse a tensor. |
def save(df, path, data_paths):
size = _reset_df_and_get_size(df)
buffer = defaultdict(list)
with get_tqdm(total=size) as pbar:
for dp in df:
assert len(dp) == len(data_paths), "Datapoint has {} components!".format(len(dp))
for k, el in zip(data_paths, dp):
buffer[k].append(el)
pbar.update()
with h5py.File(path, ) as hf, get_tqdm(total=len(data_paths)) as pbar:
for data_path in data_paths:
hf.create_dataset(data_path, data=buffer[data_path])
pbar.update() | Args:
df (DataFlow): the DataFlow to serialize.
path (str): output hdf5 file.
data_paths (list[str]): list of h5 paths. It should have the same
length as each datapoint, and each path should correspond to one
component of the datapoint. |
def set_project_filenames(self, recent_files):
if (self.current_active_project
and self.is_valid_project(
self.current_active_project.root_path)):
self.current_active_project.set_recent_files(recent_files) | Set the list of open file names in a project |
def revoke(self):
now = timezone.now()
self.revoked = True
self.revoked_at = now
self.save() | * flag certificate as revoked
* fill in revoked_at DateTimeField |
def calcu0(self,E,Lz):
logu0= optimize.brent(_u0Eq,
args=(self._delta,self._pot,
E,Lz**2./2.))
return numpy.exp(logu0) | NAME:
calcu0
PURPOSE:
calculate the minimum of the u potential
INPUT:
E - energy
Lz - angular momentum
OUTPUT:
u0
HISTORY:
2012-11-29 - Written - Bovy (IAS) |
def open_state_machine(path=None, recent_opened_notification=False):
start_time = time.time()
if path is None:
if interface.open_folder_func is None:
logger.error("No function defined for opening a folder")
return
load_path = interface.open_folder_func("Please choose the folder of the state machine")
if load_path is None:
return
else:
load_path = path
if state_machine_manager.is_state_machine_open(load_path):
logger.info("State machine already open. Select state machine instance from path {0}.".format(load_path))
sm = state_machine_manager.get_open_state_machine_of_file_system_path(load_path)
gui_helper_state.gui_singletons.state_machine_manager_model.selected_state_machine_id = sm.state_machine_id
return state_machine_manager.get_open_state_machine_of_file_system_path(load_path)
state_machine = None
try:
state_machine = storage.load_state_machine_from_path(load_path)
state_machine_manager.add_state_machine(state_machine)
if recent_opened_notification:
global_runtime_config.update_recently_opened_state_machines_with(state_machine)
duration = time.time() - start_time
stat = state_machine.root_state.get_states_statistics(0)
logger.info("It took {0:.2}s to load {1} states with {2} hierarchy levels.".format(duration, stat[0], stat[1]))
except (AttributeError, ValueError, IOError) as e:
logger.error(.format(e))
return state_machine | Open a state machine from respective file system path
:param str path: file system path to the state machine
:param bool recent_opened_notification: flags that indicates that this call also should update recently open
:rtype rafcon.core.state_machine.StateMachine
:return: opened state machine |
def render_js_code(self, id_, *args, **kwargs):
if id_:
options = self.render_select2_options_code(
dict(self.get_options()), id_)
return mark_safe(self.html.format(id=id_, options=options))
return u | Render html container for Select2 widget with options. |
def _ensure_counter(self):
if not isinstance(self.sync_counter, self._SynchronizationManager):
self.sync_counter = self._SynchronizationManager() | Ensure the sync counter is a valid non-dummy object. |
def throttle(self, key, amount=1, rate=None, capacity=None,
exc_class=Throttled, **kwargs):
if not self.consume(key, amount, rate, capacity, **kwargs):
raise exc_class("Request of %d unit for %s exceeds capacity."
% (amount, key)) | Consume an amount for a given key, or raise a Throttled exception. |
def dump_tables_to_tskit(pop):
node_view = np.array(pop.tables.nodes, copy=True)
node_view[] -= node_view[].max()
node_view[][np.where(node_view[] != 0.0)[0]] *= -1.0
edge_view = np.array(pop.tables.edges, copy=False)
mut_view = np.array(pop.tables.mutations, copy=False)
tc = tskit.TableCollection(pop.tables.genome_length)
derived_state=derived_state,
derived_state_offset=ancestral_state_offset,
metadata=md,
metadata_offset=mdo)
return tc.tree_sequence() | Converts fwdpy11.TableCollection to an
tskit.TreeSequence |
def uncompress_files(original, destination):
with zipfile.ZipFile(original) as zips:
extract_path = os.path.join(destination)
zips.extractall(extract_path) | Move file from original path to destination path.
:type original: str
:param original: The location of zip file
:type destination: str
:param destination: The extract path |
def connect(self):
"Connects to the Redis server if not already connected"
if self._sock:
return
try:
sock = self._connect()
except socket.timeout:
raise TimeoutError("Timeout connecting to server")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError(self._error_message(e))
self._sock = sock
self._selector = DefaultSelector(sock)
try:
self.on_connect()
except RedisError:
self.disconnect()
raise
for callback in self._connect_callbacks:
callback(self) | Connects to the Redis server if not already connected |
def can_cast_to(v: Literal, dt: str) -> bool:
return v.value is not None and Literal(str(v), datatype=dt).value is not None | 5.4.3 Datatype Constraints
Determine whether "a value of the lexical form of n can be cast to the target type v per
XPath Functions 3.1 section 19 Casting[xpath-functions]." |
def tag(self, alt=, use_size=None, **attrs):
if use_size is None:
if getattr(self, , None):
use_size = True
else:
try:
self.storage.path(self.name)
use_size = True
except NotImplementedError:
use_size = False
attrs[] = alt
attrs[] = self.url
if use_size:
attrs.update(dict(width=self.width, height=self.height))
attrs = .join([ % (key, escape(value))
for key, value in sorted(attrs.items())])
return mark_safe( % attrs) | Return a standard XHTML ``<img ... />`` tag for this field.
:param alt: The ``alt=""`` text for the tag. Defaults to ``''``.
:param use_size: Whether to get the size of the thumbnail image for use
in the tag attributes. If ``None`` (default), the size will only
be used it if won't result in a remote file retrieval.
All other keyword parameters are added as (properly escaped) extra
attributes to the `img` tag. |
def exit_on_error(self, message, exit_code=1):
log = "I got an unrecoverable error. I have to exit."
if message:
log += "\n-----\nError message: %s" % message
print("Error message: %s" % message)
log += "-----\n"
log += "You can get help at https://github.com/Alignak-monitoring/alignak\n"
log += "If you think this is a bug, create a new issue including as much " \
"details as possible (version, configuration,...)"
if exit_code is not None:
exit(exit_code) | Log generic message when getting an error and exit
:param exit_code: if not None, exit with the provided value as exit code
:type exit_code: int
:param message: message for the exit reason
:type message: str
:return: None |
def assemble_cx():
if request.method == :
return {}
response = request.body.read().decode()
body = json.loads(response)
stmts_json = body.get()
stmts = stmts_from_json(stmts_json)
ca = CxAssembler(stmts)
model_str = ca.make_model()
res = {: model_str}
return res | Assemble INDRA Statements and return CX network json. |
def get_nodes(self, request):
nodes = []
for shiny_app in ShinyApp.objects.all():
node = NavigationNode(
shiny_app.name,
reverse(, args=(shiny_app.slug,)),
shiny_app.slug
)
nodes.append(node)
return nodes | This method is used to build the menu tree. |
def reset( self ):
for btn in self.findChildren(QToolButton):
btn.close()
btn.setParent(None)
btn.deleteLater()
palette = self.palette()
unchecked = palette.color(palette.Button)
avg = (unchecked.red() + unchecked.green() + unchecked.blue()) / 3.0
if ( avg < 140 ):
checked = unchecked.lighter(115)
checked_clr = self.colorString(unchecked.lighter(120))
border_clr = self.colorString(unchecked.darker(140))
unchecked_clr = self.colorString(checked.lighter(140))
unchecked_clr_alt = self.colorString(checked.lighter(120))
checked_clr_alt = self.colorString(unchecked)
else:
checked = unchecked.lighter(120)
checked_clr = self.colorString(unchecked)
border_clr = self.colorString(unchecked.darker(160))
unchecked_clr = self.colorString(checked)
unchecked_clr_alt = self.colorString(checked.darker(130))
checked_clr_alt = self.colorString(unchecked.darker(120))
options = {}
options[] = 0
options[] = 0
options[] = 0
options[] = 0
options[] = border_clr
options[] = checked_clr
options[] = checked_clr_alt
options[] = unchecked_clr
options[] = unchecked_clr_alt
options[] = 1
options[] = 1
options[] = 1
options[] = 1
horiz = self.direction() in (QBoxLayout.LeftToRight,
QBoxLayout.RightToLeft)
if ( horiz ):
options[] = 0
options[] = 0
options[] = 0
options[] = 1
else:
options[] = 0
options[] = 0
options[] = 1
options[] = 1
actions = self.actionGroup().actions()
count = len(actions)
for i, action in enumerate(actions):
btn = QToolButton(self)
btn.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
btn.setDefaultAction(action)
self.layout().insertWidget(i, btn)
options[] = 1
options[] = 1
options[] = 1
options[] = 1
if ( horiz ):
options[] = self._padding
options[] = self._padding
else:
options[] = self._padding
options[] = self._padding
if ( not i ):
if ( horiz ):
options[] = self.cornerRadius()
options[] = self.cornerRadius()
options[] += self.cornerRadius() / 3.0
else:
options[] = self.cornerRadius()
options[] = self.cornerRadius()
options[] += self.cornerRadius() / 3.0
elif ( i == count - 1 ):
if ( horiz ):
options[] = self.cornerRadius()
options[] = self.cornerRadius()
options[] += self.cornerRadius() / 3.0
else:
options[] = self.cornerRadius()
options[] = self.cornerRadius()
options[] += self.cornerRadius() / 3.0
btn.setStyleSheet(TOOLBUTTON_STYLE % options)
btn.setAutoFillBackground(True) | Resets the user interface buttons for this widget. |
def multiget_slice(self, keys, column_parent, predicate, consistency_level):
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_multiget_slice(keys, column_parent, predicate, consistency_level)
return d | Performs a get_slice for column_parent and predicate for the given keys in parallel.
Parameters:
- keys
- column_parent
- predicate
- consistency_level |
def options(self, parser, env=None):
if env is None:
env = os.environ
parser.add_option(
,
help=,
)
super(SphinxSearchPlugin, self).options(parser, env) | Sphinx config file that can optionally take the following python
template string arguments:
``database_name``
``database_password``
``database_username``
``database_host``
``database_port``
``sphinx_search_data_dir``
``searchd_log_dir`` |
def lock(self, lock_name, timeout=900):
try:
try:
lock = self.cache.lock
except AttributeError:
try:
lock = self.cache.client.lock
except AttributeError:
lock = self.cache._client.lock
have_lock = False
lock = lock(lock_name, timeout=timeout)
try:
have_lock = lock.acquire(blocking=True)
if have_lock:
yield
finally:
if have_lock:
lock.release()
except AttributeError:
have_lock = False
try:
while not have_lock:
have_lock = self.cache.add(lock_name, , timeout)
if have_lock:
yield
finally:
if have_lock:
self.cache.delete(lock_name) | Attempt to use lock and unlock, which will work if the Cache is Redis,
but fall back to a memcached-compliant add/delete approach.
If the Jobtastic Cache isn't Redis or Memcache, or another product
with a compatible lock or add/delete API, then a custom locking function
will be required. However, Redis and Memcache are expected to account for
the vast majority of installations.
See:
- http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html
- http://celery.readthedocs.org/en/latest/tutorials/task-cookbook.html#ensuring-a-task-is-only-executed-one-at-a-time # NOQA |
def from_api_repr(cls, resource, client):
job_id, config_resource = cls._get_resource_config(resource)
config = CopyJobConfig.from_api_repr(config_resource)
copy_resource = config_resource["copy"]
destination = TableReference.from_api_repr(copy_resource["destinationTable"])
sources = []
source_configs = copy_resource.get("sourceTables")
if source_configs is None:
single = copy_resource.get("sourceTable")
if single is None:
raise KeyError("Resource missing / ")
source_configs = [single]
for source_config in source_configs:
table_ref = TableReference.from_api_repr(source_config)
sources.append(table_ref)
job = cls(job_id, sources, destination, client=client, job_config=config)
job._set_properties(resource)
return job | Factory: construct a job given its API representation
.. note:
This method assumes that the project found in the resource matches
the client's project.
:type resource: dict
:param resource: dataset job representation returned from the API
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: Client which holds credentials and project
configuration for the dataset.
:rtype: :class:`google.cloud.bigquery.job.CopyJob`
:returns: Job parsed from ``resource``. |
def get_plugins_by_feature(features):
if not features:
return get_all_plugins()
plugins = PluginLoader.load_all().items()
names = set([f.__name__ for f in features])
return [e for e, plugin in plugins if names & set(plugin.__dict__.keys())] | Returns a list of plugin names where the plugins implement at least one of
the *features*. *features* must a list of Plugin methods, e.g.
[Plugin.postprocess_testrun, Plugin.postprocess_testjob] |
def unit_conversion(current, desired):
current = str(current).strip().lower()
desired = str(desired).strip().lower()
conversion = TO_INCH[current] / TO_INCH[desired]
return conversion | Calculate the conversion from one set of units to another.
Parameters
---------
current : str
Unit system values are in now (eg 'millimeters')
desired : str
Unit system we'd like values in (eg 'inches')
Returns
---------
conversion : float
Number to multiply by to put values into desired units |
def factorize(self):
self.update_w()
if self._compute_h:
self.update_h()
self.W = self.mdl.W
self.H = self.mdl.H
self.ferr = np.zeros(1)
self.ferr[0] = self.mdl.frobenius_norm()
self._print_cur_status( + str(self.ferr[0])) | Do factorization s.t. data = dot(dot(data,beta),H), under the convexity constraint
beta >=0, sum(beta)=1, H >=0, sum(H)=1 |
def main(sleep_length=0.1):
log = logging.getLogger()
log.info(, sleep_length)
counter = 0
try:
while True:
log.info(,
counter, _version.__version__, __version__)
counter += 1
time.sleep(sleep_length)
except KeyboardInterrupt:
log.info() | Log to stdout using python logging in a while loop |
def build_groups(self, tokens):
groups = {}
for token in tokens:
match_type = MatchType.start if token.group_end else MatchType.single
groups[token.group_start] = (token, match_type)
if token.group_end:
groups[token.group_end] = (token, MatchType.end)
return groups | Build dict of groups from list of tokens |
def keywords(text, cloud=None, batch=False, api_key=None, version=2, batch_size=None, **kwargs):
if kwargs.get("language", "english") != "english":
version = 1
url_params = {"batch": batch, "api_key": api_key, "version": version}
return api_handler(text, cloud=cloud, api="keywords", url_params=url_params, batch_size=batch_size, **kwargs) | Given input text, returns series of keywords and associated scores
Example usage:
.. code-block:: python
>>> import indicoio
>>> import numpy as np
>>> text = 'Monday: Delightful with mostly sunny skies. Highs in the low 70s.'
>>> keywords = indicoio.keywords(text, top_n=3)
>>> print "The keywords are: "+str(keywords.keys())
u'The keywords are ['delightful', 'highs', 'skies']
:param text: The text to be analyzed.
:type text: str or unicode
:rtype: Dictionary of feature score pairs |
def extract_name_from_job_arn(arn):
slash_pos = arn.find()
if slash_pos == -1:
raise ValueError("Cannot parse invalid ARN: %s" % arn)
return arn[(slash_pos + 1):] | Returns the name used in the API given a full ARN for a training job
or hyperparameter tuning job. |
def libvlc_audio_set_mute(p_mi, status):
f = _Cfunctions.get(, None) or \
_Cfunction(, ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, status) | Set mute status.
@param p_mi: media player.
@param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.