code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def serial(self):
asnint = libcrypto.X509_get_serialNumber(self.cert)
bio = Membio()
libcrypto.i2a_ASN1_INTEGER(bio.bio, asnint)
return int(str(bio), 16)
|
Serial number of certificate as integer
|
def list_opts():
for mod in load_conf_modules():
mod_opts = mod.list_opts()
if type(mod_opts) is list:
for single_mod_opts in mod_opts:
yield single_mod_opts[0], single_mod_opts[1]
else:
yield mod_opts[0], mod_opts[1]
|
List all conf modules opts.
Goes through all conf modules and yields their opts.
|
def open(self, path, delimiter=None, mode='r', buffering=-1, encoding=None, errors=None,
newline=None):
if not re.match('^[rbt]{1,3}$', mode):
raise ValueError('mode argument must be only have r, b, and t')
file_open = get_read_function(path, self.disable_compression)
file = file_open(path, mode=mode, buffering=buffering, encoding=encoding, errors=errors,
newline=newline)
if delimiter is None:
return self(file)
else:
return self(''.join(list(file)).split(delimiter))
|
Reads and parses input files as defined.
If delimiter is not None, then the file is read in bulk then split on it. If it is None
(the default), then the file is parsed as sequence of lines. The rest of the options are
passed directly to builtins.open with the exception that write/append file modes is not
allowed.
>>> seq.open('examples/gear_list.txt').take(1)
[u'tent\\n']
:param path: path to file
:param delimiter: delimiter to split joined text on. if None, defaults to per line split
:param mode: file open mode
:param buffering: passed to builtins.open
:param encoding: passed to builtins.open
:param errors: passed to builtins.open
:param newline: passed to builtins.open
:return: output of file depending on options wrapped in a Sequence via seq
|
def source(self):
if len(self.sources) == 0:
raise ValueError("No source associated with %s" % self.__class__.__name__)
elif len(self.sources) > 1:
raise ValueError("Multiple sources for %s" % self.__class__.__name__)
return list(self.sources)[0]
|
Returns the single source name for a variant collection if it is unique,
otherwise raises an error.
|
def json_options_to_metadata(options, add_brackets=True):
try:
options = loads('{' + options + '}' if add_brackets else options)
return options
except ValueError:
return {}
|
Read metadata from its json representation
|
def mixerfields(data, commdct):
objkey = "Connector:Mixer".upper()
fieldlists = splittermixerfieldlists(data, commdct, objkey)
return extractfields(data, commdct, objkey, fieldlists)
|
get mixer fields to diagram it
|
def _preprocess(project_dict):
handlers = {
('archive',): _list_if_none,
('on-run-start',): _list_if_none_or_string,
('on-run-end',): _list_if_none_or_string,
}
for k in ('models', 'seeds'):
handlers[(k,)] = _dict_if_none
handlers[(k, 'vars')] = _dict_if_none
handlers[(k, 'pre-hook')] = _list_if_none_or_string
handlers[(k, 'post-hook')] = _list_if_none_or_string
handlers[('seeds', 'column_types')] = _dict_if_none
def converter(value, keypath):
if keypath in handlers:
handler = handlers[keypath]
return handler(value)
else:
return value
return deep_map(converter, project_dict)
|
Pre-process certain special keys to convert them from None values
into empty containers, and to turn strings into arrays of strings.
|
def default_depart(self, mdnode):
if mdnode.is_container():
fn_name = 'visit_{0}'.format(mdnode.t)
if not hasattr(self, fn_name):
warn("Container node skipped: type={0}".format(mdnode.t))
else:
self.current_node = self.current_node.parent
|
Default node depart handler
If there is a matching ``visit_<type>`` method for a container node,
then we should make sure to back up to it's parent element when the node
is exited.
|
def delete_key(self, key_to_delete):
log = logging.getLogger(self.cls_logger + '.delete_key')
log.info('Attempting to delete key: {k}'.format(k=key_to_delete))
try:
self.s3client.delete_object(Bucket=self.bucket_name, Key=key_to_delete)
except ClientError:
_, ex, trace = sys.exc_info()
log.error('ClientError: Unable to delete key: {k}\n{e}'.format(k=key_to_delete, e=str(ex)))
return False
else:
log.info('Successfully deleted key: {k}'.format(k=key_to_delete))
return True
|
Deletes the specified key
:param key_to_delete:
:return:
|
def _cursor_forward(self, count=1):
self.x = min(self.size[1] - 1, self.x + count)
|
Moves cursor right count columns. Cursor stops at right margin.
|
def _byte_pad(data, bound=4):
bound = int(bound)
if len(data) % bound != 0:
pad = bytes(bound - (len(data) % bound))
result = bytes().join([data, pad])
assert (len(result) % bound) == 0
return result
return data
|
GLTF wants chunks aligned with 4- byte boundaries
so this function will add padding to the end of a
chunk of bytes so that it aligns with a specified
boundary size
Parameters
--------------
data : bytes
Data to be padded
bound : int
Length of desired boundary
Returns
--------------
padded : bytes
Result where: (len(padded) % bound) == 0
|
def select_from_drop_down_by_text(self, drop_down_locator, option_locator, option_text, params=None):
self.click(drop_down_locator, params['drop_down'] if params else None)
for option in self.get_present_elements(option_locator, params['option'] if params else None):
if self.get_text(option) == option_text:
self.click(option)
break
|
Select option from drop down widget using text.
:param drop_down_locator: locator tuple (if any, params needs to be in place) or WebElement instance
:param option_locator: locator tuple (if any, params needs to be in place)
:param option_text: text to base option selection on
:param params: Dictionary containing dictionary of params
:return: None
|
def deploy_clone_from_vm(self, si, logger, data_holder, vcenter_data_model, reservation_id, cancellation_context):
template_resource_model = data_holder.template_resource_model
return self._deploy_a_clone(si,
logger,
data_holder.app_name,
template_resource_model.vcenter_vm,
template_resource_model,
vcenter_data_model,
reservation_id,
cancellation_context)
|
deploy Cloned VM From VM Command, will deploy vm from another vm
:param cancellation_context:
:param reservation_id:
:param si:
:param logger:
:type data_holder:
:type vcenter_data_model:
:rtype DeployAppResult:
:return:
|
def subscriber_choice_control(self):
self.current.task_data['option'] = None
self.current.task_data['chosen_subscribers'], names = self.return_selected_form_items(
self.input['form']['SubscriberList'])
self.current.task_data[
'msg'] = "You should choose at least one subscriber for migration operation."
if self.current.task_data['chosen_subscribers']:
self.current.task_data['option'] = self.input['cmd']
del self.current.task_data['msg']
|
It controls subscribers choice and generates
error message if there is a non-choice.
|
def create_conversion_event(self, event_key, user_id, attributes, event_tags):
params = self._get_common_params(user_id, attributes)
conversion_params = self._get_required_params_for_conversion(event_key, event_tags)
params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(conversion_params)
return Event(self.EVENTS_URL,
params,
http_verb=self.HTTP_VERB,
headers=self.HTTP_HEADERS)
|
Create conversion Event to be sent to the logging endpoint.
Args:
event_key: Key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing user attributes and values.
event_tags: Dict representing metadata associated with the event.
Returns:
Event object encapsulating the conversion event.
|
def show(cobertura_file, format, output, source, source_prefix):
cobertura = Cobertura(cobertura_file, source=source)
Reporter = reporters[format]
reporter = Reporter(cobertura)
report = reporter.generate()
if not isinstance(report, bytes):
report = report.encode('utf-8')
isatty = True if output is None else output.isatty()
click.echo(report, file=output, nl=isatty)
|
show coverage summary of a Cobertura report
|
def ping(self):
msg_code = riak.pb.messages.MSG_CODE_PING_REQ
codec = self._get_codec(msg_code)
msg = codec.encode_ping()
resp_code, _ = self._request(msg, codec)
if resp_code == riak.pb.messages.MSG_CODE_PING_RESP:
return True
else:
return False
|
Ping the remote server
|
def _show_previous_blank_lines(block):
pblock = block.previous()
while (pblock.text().strip() == '' and
pblock.blockNumber() >= 0):
pblock.setVisible(True)
pblock = pblock.previous()
|
Show the block previous blank lines
|
def set_time_zone(self, item):
i3s_time = item["full_text"].encode("UTF-8", "replace")
try:
i3s_time = i3s_time.decode()
except:
pass
parts = i3s_time.split()
i3s_datetime = " ".join(parts[:2])
if len(parts) < 3:
return True
else:
i3s_time_tz = parts[2]
date = datetime.strptime(i3s_datetime, TIME_FORMAT)
utcnow = datetime.utcnow()
delta = datetime(
date.year, date.month, date.day, date.hour, date.minute
) - datetime(utcnow.year, utcnow.month, utcnow.day, utcnow.hour, utcnow.minute)
try:
self.tz = Tz(i3s_time_tz, delta)
except ValueError:
return False
return True
|
Work out the time zone and create a shim tzinfo.
We return True if all is good or False if there was an issue and we
need to re check the time zone. see issue #1375
|
def join_resource_name(self, v):
d = self.dict
d['fragment'] = [v, None]
return MetapackResourceUrl(downloader=self._downloader, **d)
|
Return a MetapackResourceUrl that includes a reference to the resource. Returns a
MetapackResourceUrl, which will have a fragment
|
def import_from_netcdf(network, path, skip_time=False):
assert has_xarray, "xarray must be installed for netCDF support."
basename = os.path.basename(path) if isinstance(path, string_types) else None
with ImporterNetCDF(path=path) as importer:
_import_from_importer(network, importer, basename=basename,
skip_time=skip_time)
|
Import network data from netCDF file or xarray Dataset at `path`.
Parameters
----------
path : string|xr.Dataset
Path to netCDF dataset or instance of xarray Dataset
skip_time : bool, default False
Skip reading in time dependent attributes
|
def _cache_loc(self, path, saltenv='base', cachedir=None):
cachedir = self.get_cachedir(cachedir)
dest = salt.utils.path.join(cachedir,
'files',
saltenv,
path)
destdir = os.path.dirname(dest)
with salt.utils.files.set_umask(0o077):
if os.path.isfile(destdir):
os.remove(destdir)
try:
os.makedirs(destdir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
yield dest
|
Return the local location to cache the file, cache dirs will be made
|
def import_legislators(src):
logger.info("Importing Legislators From: {0}".format(src))
current = pd.read_csv("{0}/{1}/legislators-current.csv".format(
src, LEGISLATOR_DIR))
historic = pd.read_csv("{0}/{1}/legislators-historic.csv".format(
src, LEGISLATOR_DIR))
legislators = current.append(historic)
return legislators
|
Read the legislators from the csv files into a single Dataframe. Intended
for importing new data.
|
def training_set_multiplication(training_set, mult_queue):
logging.info("Multiply data...")
for algorithm in mult_queue:
new_trning_set = []
for recording in training_set:
samples = algorithm(recording['handwriting'])
for sample in samples:
new_trning_set.append({'id': recording['id'],
'is_in_testset': 0,
'formula_id': recording['formula_id'],
'handwriting': sample,
'formula_in_latex':
recording['formula_in_latex']})
training_set = new_trning_set
return new_trning_set
|
Multiply the training set by all methods listed in mult_queue.
Parameters
----------
training_set :
set of all recordings that will be used for training
mult_queue :
list of all algorithms that will take one recording and generate more
than one.
Returns
-------
mutliple recordings
|
def create_unique_wcsname(fimg, extnum, wcsname):
wnames = list(wcsutil.altwcs.wcsnames(fimg, ext=extnum).values())
if wcsname not in wnames:
uniqname = wcsname
else:
rpatt = re.compile(wcsname+'_\d')
index = 0
for wname in wnames:
rmatch = rpatt.match(wname)
if rmatch:
n = int(wname[wname.rfind('_')+1:])
if n > index: index = 1
index += 1
uniqname = "%s_%d"%(wcsname,index)
return uniqname
|
This function evaluates whether the specified wcsname value has
already been used in this image. If so, it automatically modifies
the name with a simple version ID using wcsname_NNN format.
Parameters
----------
fimg : obj
PyFITS object of image with WCS information to be updated
extnum : int
Index of extension with WCS information to be updated
wcsname : str
Value of WCSNAME specified by user for labelling the new WCS
Returns
-------
uniqname : str
Unique WCSNAME value
|
def unpack(s):
header = IRHeader(*struct.unpack(_IR_FORMAT, s[:_IR_SIZE]))
s = s[_IR_SIZE:]
if header.flag > 0:
header = header._replace(label=np.frombuffer(s, np.float32, header.flag))
s = s[header.flag*4:]
return header, s
|
Unpack a MXImageRecord to string.
Parameters
----------
s : str
String buffer from ``MXRecordIO.read``.
Returns
-------
header : IRHeader
Header of the image record.
s : str
Unpacked string.
Examples
--------
>>> record = mx.recordio.MXRecordIO('test.rec', 'r')
>>> item = record.read()
>>> header, s = mx.recordio.unpack(item)
>>> header
HEADER(flag=0, label=14.0, id=20129312, id2=0)
|
def help_heading(self):
message = m.Heading(
tr('Help for {step_name}').format(step_name=self.step_name),
**SUBSECTION_STYLE)
return message
|
Helper method that returns just the header.
:returns: A heading object.
:rtype: safe.messaging.heading.Heading
|
def set_win_wallpaper(img):
if "x86" in os.environ["PROGRAMFILES"]:
ctypes.windll.user32.SystemParametersInfoW(20, 0, img, 3)
else:
ctypes.windll.user32.SystemParametersInfoA(20, 0, img, 3)
|
Set the wallpaper on Windows.
|
def refresh_fqdn_cache(force=False):
if not isinstance(force, bool):
raise CommandExecutionError("Force option must be boolean.")
if force:
query = {'type': 'op',
'cmd': '<request><system><fqdn><refresh><force>yes</force></refresh></fqdn></system></request>'}
else:
query = {'type': 'op', 'cmd': '<request><system><fqdn><refresh></refresh></fqdn></system></request>'}
return __proxy__['panos.call'](query)
|
Force refreshes all FQDNs used in rules.
force
Forces all fqdn refresh
CLI Example:
.. code-block:: bash
salt '*' panos.refresh_fqdn_cache
salt '*' panos.refresh_fqdn_cache force=True
|
def mbar_log_W_nk(u_kn, N_k, f_k):
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
logW = f_k - u_kn.T - log_denominator_n[:, np.newaxis]
return logW
|
Calculate the log weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
logW_nk : np.ndarray, dtype='float', shape=(n_samples, n_states)
The normalized log weights.
Notes
-----
Equation (9) in JCP MBAR paper.
|
def get_details(app_url=defaults.APP_URL):
url = '%s/environment' % app_url
response = requests.get(url)
if response.status_code == 200:
return response.json()
else:
raise JutException('Unable to retrieve environment details from %s, got %s: %s' %
(url, response.status_code, response.text))
|
returns environment details for the app url specified
|
def scheme(name, bins, bin_method='quantiles'):
return {
'name': name,
'bins': bins,
'bin_method': (bin_method if isinstance(bins, int) else ''),
}
|
Return a custom scheme based on CARTOColors.
Args:
name (str): Name of a CARTOColor.
bins (int or iterable): If an `int`, the number of bins for classifying
data. CARTOColors have 7 bins max for quantitative data, and 11 max
for qualitative data. If `bins` is a `list`, it is the upper range
for classifying data. E.g., `bins` can be of the form ``(10, 20, 30,
40, 50)``.
bin_method (str, optional): One of methods in :obj:`BinMethod`.
Defaults to ``quantiles``. If `bins` is an interable, then that is
the bin method that will be used and this will be ignored.
.. Warning::
Input types are particularly sensitive in this function, and little
feedback is given for errors. ``name`` and ``bin_method`` arguments
are case-sensitive.
|
def stream_directory(directory,
recursive=False,
patterns='**',
chunk_size=default_chunk_size):
stream = DirectoryStream(directory,
recursive=recursive,
patterns=patterns,
chunk_size=chunk_size)
return stream.body(), stream.headers
|
Gets a buffered generator for streaming directories.
Returns a buffered generator which encodes a directory as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
directory : str
The filepath of the directory to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
|
def get_layer_description_from_canvas(self, layer, purpose):
if not layer:
return ""
try:
keywords = self.keyword_io.read_keywords(layer)
if 'layer_purpose' not in keywords:
keywords = None
except (HashNotFoundError,
OperationalError,
NoKeywordsFoundError,
KeywordNotFoundError,
InvalidParameterError,
UnsupportedProviderError):
keywords = None
self.layer = layer
if purpose == layer_purpose_hazard['key']:
self.hazard_layer = layer
elif purpose == layer_purpose_exposure['key']:
self.exposure_layer = layer
else:
self.aggregation_layer = layer
if keywords and 'keyword_version' in keywords:
kw_ver = str(keywords['keyword_version'])
self.is_selected_layer_keywordless = (
not is_keyword_version_supported(kw_ver))
else:
self.is_selected_layer_keywordless = True
description = layer_description_html(layer, keywords)
return description
|
Obtain the description of a canvas layer selected by user.
:param layer: The QGIS layer.
:type layer: QgsMapLayer
:param purpose: The layer purpose of the layer to get the description.
:type purpose: string
:returns: description of the selected layer.
:rtype: string
|
def transition(trname='', field='', check=None, before=None, after=None):
if is_callable(trname):
raise ValueError(
"The @transition decorator should be called as "
"@transition(['transition_name'], **kwargs)")
if check or before or after:
warnings.warn(
"The use of check=, before= and after= in @transition decorators is "
"deprecated in favor of @transition_check, @before_transition and "
"@after_transition decorators.",
DeprecationWarning,
stacklevel=2)
return TransitionWrapper(trname, field=field, check=check, before=before, after=after)
|
Decorator to declare a function as a transition implementation.
|
def _show(self, message, indent=0, enable_verbose=True):
if enable_verbose:
print(" " * indent + message)
|
Message printer.
|
def move_to(x, y):
for b in _button_state:
if _button_state[b]:
e = Quartz.CGEventCreateMouseEvent(
None,
_button_mapping[b][3],
(x, y),
_button_mapping[b][0])
break
else:
e = Quartz.CGEventCreateMouseEvent(
None,
Quartz.kCGEventMouseMoved,
(x, y),
Quartz.kCGMouseButtonLeft)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, e)
|
Sets the mouse's location to the specified coordinates.
|
def _group(self, group_data):
if isinstance(group_data, dict):
xid = group_data.get('xid')
else:
xid = group_data.xid
if self.groups.get(xid) is not None:
group_data = self.groups.get(xid)
elif self.groups_shelf.get(xid) is not None:
group_data = self.groups_shelf.get(xid)
else:
self.groups[xid] = group_data
return group_data
|
Return previously stored group or new group.
Args:
group_data (dict|obj): An Group dict or instance of Group object.
Returns:
dict|obj: The new Group dict/object or the previously stored dict/object.
|
def format_variable_map(variable_map, join_lines=True):
rows = []
rows.append(("Key", "Variable", "Shape", "Type", "Collections", "Device"))
var_to_collections = _get_vars_to_collections(variable_map)
sort_key = lambda item: (item[0], item[1].name)
for key, var in sorted(variable_map_items(variable_map), key=sort_key):
shape = "x".join(str(dim) for dim in var.get_shape().as_list())
dtype = repr(var.dtype.base_dtype).replace("tf.", "")
coll = ", ".join(sorted(var_to_collections[var]))
rows.append((key, var.op.name, shape, dtype, coll, _format_device(var)))
return _format_table(rows, join_lines)
|
Takes a key-to-variable map and formats it as a table.
|
def reset_option(self, key, subkey):
if not self.open:
return
key, subkey = _lower_keys(key, subkey)
_entry_must_exist(self.gc, key, subkey)
df = self.gc[(self.gc["k1"] == key) & (self.gc["k2"] == subkey)]
if df["locked"].values[0]:
raise ValueError("{0}.{1} option is locked".format(key, subkey))
val = df["default"].values[0]
self.gc.loc[
(self.gc["k1"] == key) &
(self.gc["k2"] == subkey), "value"] = val
|
Resets a single option to the default values.
:param str key: First identifier of the option.
:param str subkey: Second identifier of the option.
:raise:
:NotRegisteredError: If ``key`` or ``subkey`` do not define any
option.
:ValueError: If the targeted obtion is locked.
|
def _check_split_list_validity(self):
if not (hasattr(self,"_splitListsSet") and (self._splitListsSet)):
return False
elif len(self) != self._splitListsLength:
return False
else:
return True
|
See _temporal_split_list above. This function checks if the current
split lists are still valid.
|
def placeholdit(
width,
height,
background_color="cccccc",
text_color="969696",
text=None,
random_background_color=False
):
url = get_placeholdit_url(
width,
height,
background_color=background_color,
text_color=text_color,
text=text,
)
return format_html('<img src="{}"/>', url)
|
Creates a placeholder image using placehold.it
Usage format:
{% placeholdit [width] [height] [background_color] [text_color] [text] %}
Example usage:
Default image at 250 square
{% placeholdit 250 %}
100 wide and 200 high
{% placeholdit 100 200 %}
Custom background and text colors
{% placeholdit 100 200 background_color='fff' text_color=000' %}
Custom text
{% placeholdit 100 200 text='Hello LA' %}
|
def do_state_tomography(preparation_program, nsamples, cxn, qubits=None, use_run=False):
return tomography._do_tomography(preparation_program, nsamples, cxn, qubits,
tomography.MAX_QUBITS_STATE_TOMO,
StateTomography, state_tomography_programs,
DEFAULT_STATE_TOMO_SETTINGS, use_run=use_run)
|
Method to perform both a QPU and QVM state tomography, and use the latter as
as reference to calculate the fidelity of the former.
:param Program preparation_program: Program to execute.
:param int nsamples: Number of samples to take for the program.
:param QVMConnection|QPUConnection cxn: Connection on which to run the program.
:param list qubits: List of qubits for the program.
to use in the tomography analysis.
:param bool use_run: If ``True``, use append measurements on all qubits and use ``cxn.run``
instead of ``cxn.run_and_measure``.
:return: The state tomogram.
:rtype: StateTomography
|
def finalize(self):
if self.total_instances > 1:
print('{} of {} instances contained dead code.'
.format(self.dead_code_instances, self.total_instances))
|
Output the number of instances that contained dead code.
|
def max_version(self):
data = self.version_downloads
if not data:
return None, 0
return max(data.items(), key=lambda item: item[1])
|
Version with the most downloads.
:return: A tuple of the form (version, n_downloads)
|
def ReadTriggers(self, collection_link, options=None):
if options is None:
options = {}
return self.QueryTriggers(collection_link, None, options)
|
Reads all triggers in a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
Query Iterable of Triggers.
:rtype:
query_iterable.QueryIterable
|
def define_saver(exclude=None):
variables = []
exclude = exclude or []
exclude = [re.compile(regex) for regex in exclude]
for variable in tf.global_variables():
if any(regex.match(variable.name) for regex in exclude):
continue
variables.append(variable)
saver = tf.train.Saver(variables, keep_checkpoint_every_n_hours=5)
return saver
|
Create a saver for the variables we want to checkpoint.
Args:
exclude: List of regexes to match variable names to exclude.
Returns:
Saver object.
|
def _add_coverage_bedgraph_to_output(out, data):
out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0]
if utils.file_exists(out_file):
out["bedgraph"] = out_file
return out
bam_file = dd.get_align_bam(data)
bedtools = config_utils.get_program("bedtools", data["config"])
samtools = config_utils.get_program("samtools", data["config"])
cns_file = out["cns"]
bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False).name
with file_transaction(data, out_file) as tx_out_file:
cmd = ("sed 1d {cns_file} | cut -f1,2,3 > {bed_file}; "
"{samtools} view -b -L {bed_file} {bam_file} | "
"{bedtools} genomecov -bg -ibam - -g {bed_file} >"
"{tx_out_file}").format(**locals())
do.run(cmd, "CNVkit bedGraph conversion")
os.remove(bed_file)
out["bedgraph"] = out_file
return out
|
Add BedGraph representation of coverage to the output
|
def in_timezone(self, tz):
tz = pendulum._safe_timezone(tz)
return tz.convert(self, dst_rule=pendulum.POST_TRANSITION)
|
Set the instance's timezone from a string or object.
|
def basic_parse(response, buf_size=ijson.backend.BUFSIZE):
lexer = iter(IncrementalJsonParser.lexer(response, buf_size))
for value in ijson.backend.parse_value(lexer):
yield value
try:
next(lexer)
except StopIteration:
pass
else:
raise ijson.common.JSONError('Additional data')
|
Iterator yielding unprefixed events.
Parameters:
- response: a stream response from requests
|
def _left_zero_blocks(self, r):
if not self._include_off_diagonal:
return r
elif not self._upper:
return 0
elif self._include_diagonal:
return r
else:
return r + 1
|
Number of blocks with zeros from the left in block row `r`.
|
def is_compatible_with(self, other):
other = as_dtype(other)
return self._type_enum in (
other.as_datatype_enum,
other.base_dtype.as_datatype_enum,
)
|
Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
DType(T) .is_compatible_with(DType(T).as_ref) == True
DType(T).as_ref.is_compatible_with(DType(T)) == False
DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
|
def set_rendering_intent(self, rendering_intent):
if rendering_intent not in (None,
PERCEPTUAL,
RELATIVE_COLORIMETRIC,
SATURATION,
ABSOLUTE_COLORIMETRIC):
raise FormatError('Unknown redering intent')
self.rendering_intent = rendering_intent
|
Set rendering intent variant for sRGB chunk
|
def proxy_protocol(self, error='raise', default=None, limit=None, authenticate=False):
if error not in ('raise', 'unread'):
raise ValueError('error="{0}" is not "raise" or "unread""')
if not isinstance(self.request, SocketBuffer):
self.request = SocketBuffer(self.request)
if default == 'peer':
default = ProxyInfo(
self.client_address[0], self.client_address[1],
self.client_address[0], self.client_address[1],
)
try:
line = read_line(
self.request.sock,
self.request.buf,
limit=limit,
)
except exc.ReadError:
if error == 'raise':
raise
return default
try:
info = parse_line(line)
except exc.ParseError:
if error == 'raise':
raise
self.request.unread(line)
return default
if authenticate and not self.proxy_authenticate(info):
logger.info('authentication failed - %s', info)
return default
return info
|
Parses, and optionally authenticates, proxy protocol information from
request. Note that ``self.request`` is wrapped by ``SocketBuffer``.
:param error:
How read (``exc.ReadError``) and parse (``exc.ParseError``) errors
are handled. One of:
- "raise" to propagate.
- "unread" to suppress exceptions and unread back to socket.
:param default:
What to return when no ``ProxyInfo`` was found. Only meaningful
with error "unread".
:param limit:
Maximum number of bytes to read when probing request for
``ProxyInfo``.
:returns: Parsed ``ProxyInfo`` instance or **default** if none found.
|
def _define_range(self, sequences):
sequence_count = 0
total_sequence = 0
for record in SeqIO.parse(open(sequences), 'fasta'):
total_sequence+=1
sequence_count+=len(record.seq)
max_range = (sequence_count/total_sequence)*1.5
return max_range
|
define_range - define the maximum range within which two hits in a db
search can be linked. This is defined as 1.5X the average length of all
reads in the database.
Parameters
----------
sequences : str
A path to the sequences in FASTA format. This fasta file is assumed
to be in the correct format. i.e. headers start with '>'
Returns
-------
max_range : int
As described above, 1.5X the size of the average length of genes
within the database
|
def parser_factory(fake_args=None):
parser = ArgumentParser(description='aomi')
subparsers = parser.add_subparsers(dest='operation',
help='Specify the data '
' or extraction operation')
extract_file_args(subparsers)
environment_args(subparsers)
aws_env_args(subparsers)
seed_args(subparsers)
render_args(subparsers)
diff_args(subparsers)
freeze_args(subparsers)
thaw_args(subparsers)
template_args(subparsers)
password_args(subparsers)
token_args(subparsers)
help_args(subparsers)
export_args(subparsers)
if fake_args is None:
return parser, parser.parse_args()
return parser, parser.parse_args(fake_args)
|
Return a proper contextual OptionParser
|
def get_whitelist_page(self, page_number=None, page_size=None):
params = {
'pageNumber': page_number,
'pageSize': page_size
}
resp = self._client.get("whitelist", params=params)
return Page.from_dict(resp.json(), content_type=Indicator)
|
Gets a paginated list of indicators that the user's company has whitelisted.
:param int page_number: the page number to get.
:param int page_size: the size of the page to be returned.
:return: A |Page| of |Indicator| objects.
|
def _cmp_date(self):
dates = sorted(val for val in self.kw.values()
if isinstance(val, CalendarDate))
if dates:
return dates[0]
return CalendarDate()
|
Returns Calendar date used for comparison.
Use the earliest date out of all CalendarDates in this instance,
or some date in the future if there are no CalendarDates (e.g.
when Date is a phrase).
|
def model_base(bind_label=None, info=None):
Model = type('Model', (BaseModel,), {'__odm_abstract__': True})
info = {}
Model.__table_args__ = table_args(info=info)
if bind_label:
info['bind_label'] = bind_label
return Model
|
Create a base declarative class
|
def sparql_query(self, query, flush=None, limit=None):
return self.find_statements(query, language='sparql', type='tuples',
flush=flush, limit=limit)
|
Run a Sparql query.
:param query: sparql query string
:rtype: list of dictionary
|
def query_symbol(self, asset: str) -> str:
contract_address = self.get_asset_address(asset)
method = 'symbol'
invoke_code = build_native_invoke_code(contract_address, b'\x00', method, bytearray())
tx = Transaction(0, 0xd1, int(time()), 0, 0, None, invoke_code, bytearray(), list())
response = self.__sdk.rpc.send_raw_transaction_pre_exec(tx)
symbol = ContractDataParser.to_utf8_str(response['Result'])
return symbol
|
This interface is used to query the asset's symbol of ONT or ONG.
:param asset: a string which is used to indicate which asset's symbol we want to get.
:return: asset's symbol in the form of string.
|
def getStartdatetime(self):
return datetime(self.startdate_year, self.startdate_month, self.startdate_day,
self.starttime_hour, self.starttime_minute, self.starttime_second)
|
Returns the date and starttime as datetime object
Parameters
----------
None
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getStartdatetime()
datetime.datetime(2011, 4, 4, 12, 57, 2)
>>> f._close()
>>> del f
|
def paths(self, destination_account, destination_amount, source_account, destination_asset_code,
destination_asset_issuer=None):
destination_asset = Asset(destination_asset_code, destination_asset_issuer)
destination_asset_params = {
'destination_asset_type': destination_asset.type,
'destination_asset_code': None if destination_asset.is_native() else destination_asset.code,
'destination_asset_issuer': destination_asset.issuer
}
endpoint = '/paths'
params = self.__query_params(destination_account=destination_account,
source_account=source_account,
destination_amount=destination_amount,
**destination_asset_params
)
return self.query(endpoint, params)
|
Load a list of assets available to the source account id and find
any payment paths from those source assets to the desired
destination asset.
See the below docs for more information on required and optional
parameters for further specifying your search.
`GET /paths
<https://www.stellar.org/developers/horizon/reference/endpoints/path-finding.html>`_
:param str destination_account: The destination account that any returned path should use.
:param str destination_amount: The amount, denominated in the destination asset,
that any returned path should be able to satisfy.
:param str source_account: The sender's account id. Any returned path must use a source that the sender can hold.
:param str destination_asset_code: The asset code for the destination.
:param destination_asset_issuer: The asset issuer for the destination, if it is a native asset, let it be `None`.
:type destination_asset_issuer: str, None
:return: A list of paths that can be used to complete a payment based
on a given query.
:rtype: dict
|
def annotate_gemini(data, retriever=None):
r = dd.get_variation_resources(data)
return all([r.get(k) and objectstore.file_exists_or_remote(r[k]) for k in ["exac", "gnomad_exome"]])
|
Annotate with population calls if have data installed.
|
def _get_default_router(self, routers, router_name=None):
if router_name is None:
for router in routers:
if router['id'] is not None:
return router['id']
else:
for router in routers:
if router['hostname'] == router_name:
return router['id']
raise SoftLayer.SoftLayerError("Could not find valid default router")
|
Returns the default router for ordering a dedicated host.
|
def postags(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(POSTAG)
|
The list of word part-of-speech tags.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
|
def check_existing_results(self, benchmark):
if os.path.exists(benchmark.log_folder):
sys.exit('Output directory {0} already exists, will not overwrite existing results.'.format(benchmark.log_folder))
if os.path.exists(benchmark.log_zip):
sys.exit('Output archive {0} already exists, will not overwrite existing results.'.format(benchmark.log_zip))
|
Check and abort if the target directory for the benchmark results
already exists in order to avoid overwriting results.
|
def compare(self, compare_recipe, suffix='_compare'):
assert isinstance(compare_recipe, Recipe)
assert isinstance(suffix, basestring)
self.compare_recipe.append(compare_recipe)
self.suffix.append(suffix)
self.dirty = True
return self.recipe
|
Adds a comparison recipe to a base recipe.
|
def dump(props, output):
def escape(token):
return re.sub(r'([=:\s])', r'\\\1', token)
def write(out):
for k, v in props.items():
out.write('%s=%s\n' % (escape(str(k)), escape(str(v))))
if hasattr(output, 'write') and callable(output.write):
write(output)
elif isinstance(output, six.string_types):
with open(output, 'w+') as out:
write(out)
else:
raise TypeError('Can only dump data to a path or a writable object, given: %s' % output)
|
Dumps a dict of properties to the specified open stream or file path.
:API: public
|
def to_list(self):
output = []
for i in range(1, len(self.elements), 2):
output.append(self.elements[i])
return output
|
Converts the vector to an array of the elements within the vector
|
def assert_trigger(self, session, protocol):
try:
return self.sessions[session].assert_trigger(protocol)
except KeyError:
return constants.StatusCode.error_invalid_object
|
Asserts software or hardware trigger.
Corresponds to viAssertTrigger function of the VISA library.
:param session: Unique logical identifier to a session.
:param protocol: Trigger protocol to use during assertion. (Constants.PROT*)
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
|
def extract_facts(rule):
def _extract_facts(ce):
if isinstance(ce, Fact):
yield ce
elif isinstance(ce, TEST):
pass
else:
for e in ce:
yield from _extract_facts(e)
return set(_extract_facts(rule))
|
Given a rule, return a set containing all rule LHS facts.
|
def render_subgraph(self, ontol, nodes, **args):
subont = ontol.subontology(nodes, **args)
return self.render(subont, **args)
|
Render a `ontology` object after inducing a subgraph
|
def get_post(self, slug):
cache_key = self.get_cache_key(post_slug=slug)
content = cache.get(cache_key)
if not content:
post = Post.objects.get(slug=slug)
content = self._format(post)
cache_duration = conf.GOSCALE_CACHE_DURATION if post else 1
cache.set(cache_key, content, cache_duration)
return content
|
This method returns a single post by slug
|
def make_blastdb(self):
db = os.path.splitext(self.formattedprimers)[0]
nhr = '{db}.nhr'.format(db=db)
if not os.path.isfile(str(nhr)):
command = 'makeblastdb -in {primerfile} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {outfile}'\
.format(primerfile=self.formattedprimers,
outfile=db)
run_subprocess(command)
|
Create a BLAST database of the primer file
|
def to_bytes(s, encoding="utf8"):
if PY_VERSION == 2:
b = bytes(s)
elif PY_VERSION == 3:
b = bytes(s, encoding)
else:
raise ValueError("Is Python 4 out already?")
return b
|
Converts str s to bytes
|
def mmi_to_delimited_file(self, force_flag=True):
LOGGER.debug('mmi_to_delimited_text requested.')
csv_path = os.path.join(
self.output_dir, 'mmi.csv')
if os.path.exists(csv_path) and force_flag is not True:
return csv_path
csv_file = open(csv_path, 'w')
csv_file.write(self.mmi_to_delimited_text())
csv_file.close()
csvt_path = os.path.join(
self.output_dir, self.output_basename + '.csvt')
csvt_file = open(csvt_path, 'w')
csvt_file.write('"Real","Real","Real"')
csvt_file.close()
return csv_path
|
Save mmi_data to delimited text file suitable for gdal_grid.
The output file will be of the same format as strings returned from
:func:`mmi_to_delimited_text`.
:param force_flag: Whether to force the regeneration of the output
file. Defaults to False.
:type force_flag: bool
:returns: The absolute file system path to the delimited text file.
:rtype: str
.. note:: An accompanying .csvt will be created which gdal uses to
determine field types. The csvt will contain the following string:
"Real","Real","Real". These types will be used in other conversion
operations. For example to convert the csv to a shp you would do::
ogr2ogr -select mmi -a_srs EPSG:4326 mmi.shp mmi.vrt mmi
|
def updateActiveMarkupClass(self):
previousMarkupClass = self.activeMarkupClass
self.activeMarkupClass = find_markup_class_by_name(globalSettings.defaultMarkup)
if self._fileName:
markupClass = get_markup_for_file_name(
self._fileName, return_class=True)
if markupClass:
self.activeMarkupClass = markupClass
if self.activeMarkupClass != previousMarkupClass:
self.highlighter.docType = self.activeMarkupClass.name if self.activeMarkupClass else None
self.highlighter.rehighlight()
self.activeMarkupChanged.emit()
self.triggerPreviewUpdate()
|
Update the active markup class based on the default class and
the current filename. If the active markup class changes, the
highlighter is rerun on the input text, the markup object of
this tab is replaced with one of the new class and the
activeMarkupChanged signal is emitted.
|
def copy_assets(self, path='assets'):
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug('copying %r to %r', fullpath, copy_to)
shutil.copyfile(fullpath, copy_to)
|
Copy assets into the destination directory.
|
def delete_user(self, id):
self.assert_has_permission('scim.write')
uri = self.uri + '/Users/%s' % id
headers = self._get_headers()
logging.debug("URI=" + str(uri))
logging.debug("HEADERS=" + str(headers))
response = self.session.delete(uri, headers=headers)
logging.debug("STATUS=" + str(response.status_code))
if response.status_code == 200:
return response
else:
logging.error(response.content)
response.raise_for_status()
|
Delete user with given id.
|
def iter_sources(self):
for src_id in xrange(self.get_source_count()):
yield src_id, self.get_source_name(src_id)
|
Iterates over all source names and IDs.
|
def _realwavelets(s_freq, freqs, dur, width):
x = arange(-dur / 2, dur / 2, 1 / s_freq)
wavelets = empty((len(freqs), len(x)))
g = exp(-(pi * x ** 2) / width ** 2)
for i, one_freq in enumerate(freqs):
y = cos(2 * pi * x * one_freq)
wavelets[i, :] = y * g
return wavelets
|
Create real wavelets, for UCSD.
Parameters
----------
s_freq : int
sampling frequency
freqs : ndarray
vector with frequencies of interest
dur : float
duration of the wavelets in s
width : float
parameter controlling gaussian shape
Returns
-------
ndarray
wavelets
|
def klucher(surface_tilt, surface_azimuth, dhi, ghi, solar_zenith,
solar_azimuth):
r
cos_tt = aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
cos_tt = np.maximum(cos_tt, 0)
F = 1 - ((dhi / ghi) ** 2)
try:
F.fillna(0, inplace=True)
except AttributeError:
F = np.where(np.isnan(F), 0, F)
term1 = 0.5 * (1 + tools.cosd(surface_tilt))
term2 = 1 + F * (tools.sind(0.5 * surface_tilt) ** 3)
term3 = 1 + F * (cos_tt ** 2) * (tools.sind(solar_zenith) ** 3)
sky_diffuse = dhi * term1 * term2 * term3
return sky_diffuse
|
r'''
Determine diffuse irradiance from the sky on a tilted surface
using Klucher's 1979 model
.. math::
I_{d} = DHI \frac{1 + \cos\beta}{2} (1 + F' \sin^3(\beta/2))
(1 + F' \cos^2\theta\sin^3\theta_z)
where
.. math::
F' = 1 - (I_{d0} / GHI)
Klucher's 1979 model determines the diffuse irradiance from the sky
(ground reflected irradiance is not included in this algorithm) on a
tilted surface using the surface tilt angle, surface azimuth angle,
diffuse horizontal irradiance, direct normal irradiance, global
horizontal irradiance, extraterrestrial irradiance, sun zenith
angle, and sun azimuth angle.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. surface_tilt must be >=0
and <=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. surface_azimuth must
be >=0 and <=360. The Azimuth convention is defined as degrees
east of north (e.g. North = 0, South=180 East = 90, West = 270).
dhi : numeric
Diffuse horizontal irradiance in W/m^2. DHI must be >=0.
ghi : numeric
Global irradiance in W/m^2. DNI must be >=0.
solar_zenith : numeric
Apparent (refraction-corrected) zenith angles in decimal
degrees. solar_zenith must be >=0 and <=180.
solar_azimuth : numeric
Sun azimuth angles in decimal degrees. solar_azimuth must be >=0
and <=360. The Azimuth convention is defined as degrees east of
north (e.g. North = 0, East = 90, West = 270).
Returns
-------
diffuse : numeric
The sky diffuse component of the solar radiation.
References
----------
[1] Loutzenhiser P.G. et. al. "Empirical validation of models to compute
solar irradiance on inclined surfaces for building energy simulation"
2007, Solar Energy vol. 81. pp. 254-267
[2] Klucher, T.M., 1979. Evaluation of models to predict insolation on
tilted surfaces. Solar Energy 23 (2), 111-114.
|
def toggle_badge(self, kind):
badge = self.get_badge(kind)
if badge:
return self.remove_badge(kind)
else:
return self.add_badge(kind)
|
Toggle a bdage given its kind
|
def _cache_get_for_dn(self, dn: str) -> Dict[str, bytes]:
self._do_with_retry(
lambda obj: obj.search(
dn,
'(objectclass=*)',
ldap3.BASE,
attributes=['*', '+']))
results = self._obj.response
if len(results) < 1:
raise NoSuchObject("No results finding current value")
if len(results) > 1:
raise RuntimeError("Too many results finding current value")
return results[0]['raw_attributes']
|
Object state is cached. When an update is required the update will be
simulated on this cache, so that rollback information can be correct.
This function retrieves the cached data.
|
def is_won(grid):
"Did the latest move win the game?"
p, q = grid
return any(way == (way & q) for way in ways_to_win)
|
Did the latest move win the game?
|
def get_id_head(self):
id_head = None
for target_node in self:
if target_node.is_head():
id_head = target_node.get_id()
break
return id_head
|
Returns the id of the target that is set as "head"
@rtype: string
@return: the target id (or None) of the head target
|
def assess_content(member,file_filter):
member_path = member.name.replace('.','',1)
if len(member_path) == 0:
return False
if "skip_files" in file_filter:
if member_path in file_filter['skip_files']:
return False
if "assess_content" in file_filter:
if member_path in file_filter['assess_content']:
return True
return False
|
Determine if the filter wants the file to be read for content.
In the case of yes, we would then want to add the content to the
hash and not the file object.
|
def scale_samples(params, bounds):
b = np.array(bounds)
lower_bounds = b[:, 0]
upper_bounds = b[:, 1]
if np.any(lower_bounds >= upper_bounds):
raise ValueError("Bounds are not legal")
np.add(np.multiply(params,
(upper_bounds - lower_bounds),
out=params),
lower_bounds,
out=params)
|
Rescale samples in 0-to-1 range to arbitrary bounds
Arguments
---------
bounds : list
list of lists of dimensions `num_params`-by-2
params : numpy.ndarray
numpy array of dimensions `num_params`-by-:math:`N`,
where :math:`N` is the number of samples
|
def get_duration_metadata(self):
metadata = dict(self._mdata['duration'])
metadata.update({'existing_duration_values': self._my_map['duration']})
return Metadata(**metadata)
|
Gets the metadata for the assessment duration.
return: (osid.Metadata) - metadata for the duration
*compliance: mandatory -- This method must be implemented.*
|
def _in(ins):
output = _16bit_oper(ins.quad[1])
output.append('ld b, h')
output.append('ld c, l')
output.append('in a, (c)')
output.append('push af')
return output
|
Translates IN to asm.
|
def encrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
f.write(s)
return out.getvalue()
|
Encrypt `s' for this pubkey.
|
def _metadata_is_invalid(cls, fact):
return any(isinstance(token, URIRef) and ' ' in token
for token in fact)
|
Determines if the fact is not well formed.
|
def post(self, path, auth=None, **kwargs):
return self._check_ok(self._post(path, auth=auth, **kwargs))
|
Manually make a POST request.
:param str path: relative url of the request (e.g. `/users/username`)
:param auth.Authentication auth: authentication object
:param kwargs dict: Extra arguments for the request, as supported by the
`requests <http://docs.python-requests.org/>`_ library.
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
|
def add_context_action(self, action):
self.main_tab_widget.context_actions.append(action)
for child_splitter in self.child_splitters:
child_splitter.add_context_action(action)
|
Adds a custom context menu action
:param action: action to add.
|
def rolling_window_sequences(X, index, window_size, target_size, target_column):
out_X = list()
out_y = list()
X_index = list()
y_index = list()
target = X[:, target_column]
for start in range(len(X) - window_size - target_size + 1):
end = start + window_size
out_X.append(X[start:end])
out_y.append(target[end:end + target_size])
X_index.append(index[start])
y_index.append(index[end])
return np.asarray(out_X), np.asarray(out_y), np.asarray(X_index), np.asarray(y_index)
|
Create rolling window sequences out of timeseries data.
|
def redirect_logging(tqdm_obj, logger=logging.getLogger()):
assert(len(logger.handlers) == 1)
prev_handler = logger.handlers[0]
logger.removeHandler(prev_handler)
tqdm_handler = TqdmLoggingHandler(tqdm_obj)
if prev_handler.formatter is not None:
tqdm_handler.setFormatter(prev_handler.formatter)
logger.addHandler(tqdm_handler)
try:
yield
finally:
logger.removeHandler(tqdm_handler)
logger.addHandler(prev_handler)
|
Context manager to redirect logging to a TqdmLoggingHandler object and then restore the original.
|
def horizon_dashboard_nav(context):
if 'request' not in context:
return {}
dashboard = context['request'].horizon['dashboard']
panel_groups = dashboard.get_panel_groups()
non_empty_groups = []
for group in panel_groups.values():
allowed_panels = []
for panel in group:
if (callable(panel.nav) and panel.nav(context) and
panel.can_access(context)):
allowed_panels.append(panel)
elif (not callable(panel.nav) and panel.nav and
panel.can_access(context)):
allowed_panels.append(panel)
if allowed_panels:
if group.name is None:
non_empty_groups.append((dashboard.name, allowed_panels))
else:
non_empty_groups.append((group.name, allowed_panels))
return {'components': OrderedDict(non_empty_groups),
'user': context['request'].user,
'current': context['request'].horizon['panel'].slug,
'request': context['request']}
|
Generates sub-navigation entries for the current dashboard.
|
def lookup_function(val):
"Look-up and return a pretty-printer that can print va."
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
typename = type.tag
if typename == None:
return None
for function in pretty_printers_dict:
if function.search(typename):
return pretty_printers_dict[function](val)
return None
|
Look-up and return a pretty-printer that can print va.
|
def start(self):
self.parse_opt()
self.parse_cfg()
if self.options.browse or self.options.browse_big or self.options.progress:
self.browse()
raise SystemExit
paramlist = []
for exp in self.cfgparser.sections():
if not self.options.experiments or exp in self.options.experiments:
params = self.items_to_params(self.cfgparser.items(exp))
params['name'] = exp
paramlist.append(params)
self.do_experiment(paramlist)
|
starts the experiments as given in the config file.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.