code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def _to_DOM(self):
root_node = ET.Element("forecast")
interval_node = ET.SubElement(root_node, "interval")
interval_node.text = self._interval
reception_time_node = ET.SubElement(root_node, "reception_time")
reception_time_node.text = str(self._reception_time)
root_node.append(self._location._to_DOM())
weathers_node = ET.SubElement(root_node, "weathers")
for weather in self:
weathers_node.append(weather._to_DOM())
return root_node | Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object |
def value(self):
return .join(map(str, self.evaluate(self.trigger.user))) | Return the current evaluation of a condition statement |
def get_monophyletic(self, values, target_attr):
if type(values) != set:
values = set(values)
n2values = self.get_cached_content(store_attr=target_attr)
is_monophyletic = lambda node: n2values[node] == values
for match in self.iter_leaves(is_leaf_fn=is_monophyletic):
if is_monophyletic(match):
yield match | Returns a list of nodes matching the provided monophyly
criteria. For a node to be considered a match, all
`target_attr` values within and node, and exclusively them,
should be grouped.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees). |
def is_fraction(value,
minimum = None,
maximum = None,
**kwargs):
try:
value = validators.fraction(value,
minimum = minimum,
maximum = maximum,
**kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | Indicate whether ``value`` is a :class:`Fraction <python:fractions.Fraction>`.
:param value: The value to evaluate.
:param minimum: If supplied, will make sure that ``value`` is greater than or
equal to this value.
:type minimum: numeric
:param maximum: If supplied, will make sure that ``value`` is less than or
equal to this value.
:type maximum: numeric
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator |
def call_function(self, c, i):
callable_ = self.__stack[-1-i.arg]
args = tuple(self.__stack[len(self.__stack) - i.arg:])
self._print()
self._print(, callable_)
self._print(, i.arg)
self._print(, args)
self.call_callbacks(, callable_, *args)
if isinstance(callable_, FunctionType):
ret = callable_(*args)
elif callable_ is builtins.__build_class__:
ret = self.build_class(callable_, args)
elif callable_ is builtins.globals:
ret = self.builtins_globals()
else:
ret = callable_(*args)
self.pop(1 + i.arg)
self.__stack.append(ret) | Implement the CALL_FUNCTION_ operation.
.. _CALL_FUNCTION: https://docs.python.org/3/library/dis.html#opcode-CALL_FUNCTION |
def filter_rank_genes_groups(adata, key=None, groupby=None, use_raw=True, log=True,
key_added=,
min_in_group_fraction=0.25, min_fold_change=2,
max_out_group_fraction=0.5):
if key is None:
key =
if groupby is None:
groupby = str(adata.uns[key][][])
gene_names = pd.DataFrame(adata.uns[key][])
fraction_in_cluster_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns,
index=gene_names.index)
fold_change_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns, index=gene_names.index)
fraction_out_cluster_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns,
index=gene_names.index)
logg.info("Filtering genes using: min_in_group_fraction: {} "
"min_fold_change: {}, max_out_group_fraction: {}".format(min_in_group_fraction, min_fold_change,
max_out_group_fraction))
from ..plotting._anndata import _prepare_dataframe
for cluster in gene_names.columns:
var_names = gene_names[cluster].values
adata.obs[] = pd.Categorical(adata.obs[groupby] == cluster)
categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby=, use_raw=use_raw)
mean_obs = obs_tidy.groupby(level=0).mean()
obs_bool = obs_tidy.astype(bool)
fraction_obs = obs_bool.groupby(level=0).sum() / obs_bool.groupby(level=0).count()
fraction_in_cluster_matrix.loc[:, cluster] = fraction_obs.loc[True].values
fraction_out_cluster_matrix.loc[:, cluster] = fraction_obs.loc[False].values
if log:
fold_change_matrix.loc[:, cluster] = (np.exp(mean_obs.loc[True]) / np.exp(mean_obs.loc[False])).values
else:
fold_change_matrix.loc[:, cluster] = (mean_obs.loc[True] / mean_obs.loc[False]).values
adata.obs.drop(columns=)
gene_names = gene_names[(fraction_in_cluster_matrix > min_in_group_fraction) &
(fraction_out_cluster_matrix < max_out_group_fraction) &
(fold_change_matrix > min_fold_change)]
adata.uns[key_added] = adata.uns[key].copy()
adata.uns[key_added][] = gene_names.to_records(index=False) | Filters out genes based on fold change and fraction of genes expressing the gene within and outside the `groupby` categories.
See :func:`~scanpy.tl.rank_genes_groups`.
Results are stored in `adata.uns[key_added]` (default: 'rank_genes_groups_filtered').
To preserve the original structure of adata.uns['rank_genes_groups'], filtered genes
are set to `NaN`.
Parameters
----------
adata: :class:`~anndata.AnnData`
key
groupby
use_raw
log : if true, it means that the values to work with are in log scale
key_added
min_in_group_fraction
min_fold_change
max_out_group_fraction
Returns
-------
Same output as :ref:`scanpy.tl.rank_genes_groups` but with filtered genes names set to
`nan`
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon')
>>> sc.tl.filter_rank_genes_groups(adata, min_fold_change=3)
>>> # visualize results
>>> sc.pl.rank_genes_groups(adata, key='rank_genes_groups_filtered')
>>> # visualize results using dotplot
>>> sc.pl.rank_genes_groups_dotplot(adata, key='rank_genes_groups_filtered') |
def add(self, value):
hash(value)
self.redis.sadd(self.key, self._pickle(value)) | Add element *value* to the set. |
def setWorker(self, *args, **kwargs):
try:
la = self.LAUNCHING_ARGUMENTS(*args, **kwargs)
except TypeError as e:
scoop.logger.error(("addWorker failed to convert args %s and kwargs %s "
"to namedtuple (requires %s arguments (names %s)") %
(args, kwargs, len(self.LAUNCHING_ARGUMENTS._fields),
self.LAUNCHING_ARGUMENTS._fields))
self.workersArguments = la | Add a worker assignation
Arguments and order to pass are defined in LAUNCHING_ARGUMENTS
Using named args is advised. |
def emg_linear_envelope(emg, sampling_rate=1000, freqs=[10, 400], lfreq=4):
r
emg = emg_tkeo(emg)
if np.size(freqs) == 2:
b, a = scipy.signal.butter(2, np.array(freqs)/(sampling_rate/2.), btype = )
emg = scipy.signal.filtfilt(b, a, emg)
if np.size(lfreq) == 1:
envelope = abs(emg)
b, a = scipy.signal.butter(2, np.array(lfreq)/(sampling_rate/2.), btype = )
envelope = scipy.signal.filtfilt(b, a, envelope)
return (envelope) | r"""Calculate the linear envelope of a signal.
Parameters
----------
emg : array
raw EMG signal.
sampling_rate : int
Sampling rate (samples/second).
freqs : list [fc_h, fc_l], optional
cutoff frequencies for the band-pass filter (in Hz).
lfreq : number, optional
cutoff frequency for the low-pass filter (in Hz).
Returns
-------
envelope : array
linear envelope of the signal.
Notes
-----
*Authors*
- Marcos Duarte
*See Also*
See this notebook [1]_.
References
----------
.. [1] https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb |
def extract_value_from_output(canary, split_offset, kal_out):
retval = ""
while retval == "":
for line in kal_out.splitlines():
if canary in line:
retval = str(line.split()[split_offset])
if retval == "":
retval = None
return retval | Return value parsed from output.
Args:
canary(str): This string must exist in the target line.
split_offset(int): Split offset for target value in string.
kal_out(int): Output from kal. |
def set_chksum(self, doc, chk_sum):
if chk_sum:
doc.ext_document_references[-1].check_sum = checksum.Algorithm(
, chk_sum)
else:
raise SPDXValueError() | Sets the external document reference's check sum, if not already set.
chk_sum - The checksum value in the form of a string. |
def fix_varscan_output(line, normal_name="", tumor_name=""):
line = line.strip()
tofix = ("
if(line.startswith("
if line.startswith(tofix):
line = line.replace(,
)
return line
line = line.split("\t")
if line[0].startswith("
if tumor_name and normal_name:
mapping = {"NORMAL": normal_name, "TUMOR": tumor_name}
base_header = line[:9]
old_samples = line[9:]
if len(old_samples) == 0:
return "\t".join(line)
samples = [mapping[sample_name] for sample_name in old_samples]
assert len(old_samples) == len(samples)
return "\t".join(base_header + samples)
else:
return "\t".join(line)
try:
REF, ALT = line[3:5]
except ValueError:
return "\t".join(line)
def _normalize_freq(line, sample_i):
ft_parts = line[8].split(":")
dat = line[sample_i].split(":")
line[3] = REF
line[4] = ALT
return "\t".join(line) | Fix a varscan VCF line.
Fixes the ALT column and also fixes floating point values
output as strings to by Floats: FREQ, SSC.
This function was contributed by Sean Davis <sdavis2@mail.nih.gov>,
with minor modifications by Luca Beltrame <luca.beltrame@marionegri.it>. |
def _convert_to_json(self, response):
try:
return response.json()
except ValueError:
logging.warning(.format(
response.request.url, response.text,
))
return None | Converts response to JSON.
If the response cannot be converted to JSON then `None` is returned.
Args:
response - An object of type `requests.models.Response`
Returns:
Response in JSON format if the response can be converted to JSON. `None` otherwise. |
def _GetLink(self):
if self._link is None:
self._link =
if self.entry_type != definitions.FILE_ENTRY_TYPE_LINK:
return self._link
cpio_archive_file = self._file_system.GetCPIOArchiveFile()
link_data = cpio_archive_file.ReadDataAtOffset(
self._cpio_archive_file_entry.data_offset,
self._cpio_archive_file_entry.data_size)
self._link = link_data.decode()
return self._link | Retrieves the link.
Returns:
str: full path of the linked file entry. |
def download_file(save_path, file_url):
r = requests.get(file_url)
with open(save_path, ) as f:
f.write(r.content)
return save_path | Download file from http url link |
def refresh_token(self):
self.token = self.oauth.refresh_token(self.access_token_url, refresh_token=self.get_refresh_token())
self.access_token = self.token.get("access_token") | Refreshing the current expired access token |
def render_local_template(service_name, environment, repo_root, template_file):
cmd = .format(repo_root, template_file, environment)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
stderr = indentify(.format(stderr))
stdout = indentify(.format(stdout))
raise Exception(
.format(service_name, environment, stderr, stdout))
logger.debug(, template_file, environment)
r = re.match(r".*(^{.*^})$", stdout, re.MULTILINE | re.DOTALL)
return jsonify(json.loads(r.group(1))) | Render a given service's template for a given environment and return it |
def get_cookies(self):
ret = self.Network_getAllCookies()
assert in ret, "No return value in function response!"
assert in ret[], "No key in function response"
cookies = []
for raw_cookie in ret[][]:
baked_cookie = http.cookiejar.Cookie(
rfc2109 = False
)
cookies.append(baked_cookie)
return cookies | Retreive the cookies from the remote browser.
Return value is a list of http.cookiejar.Cookie() instances.
These can be directly used with the various http.cookiejar.XXXCookieJar
cookie management classes. |
def _webfinger(provider, request, **kwargs):
params = urlparse.parse_qs(request)
if params["rel"][0] == OIC_ISSUER:
wf = WebFinger()
return Response(wf.response(params["resource"][0], provider.baseurl),
headers=[("Content-Type", "application/jrd+json")])
else:
return BadRequest("Incorrect webfinger.") | Handle webfinger requests. |
def tofits(self, filename, overwrite=True, velocity=True,
optical=True, bitpix=-32, minpix=1, maxpix=-1):
return self._tofits(filename, overwrite, velocity, optical,
bitpix, minpix, maxpix) | Write the image to a file in FITS format.
`filename`
FITS file name
`overwrite`
If False, an exception is raised if the new image file already exists.
Default is True.
`velocity`
By default a velocity primary spectral axis is written if possible.
`optical`
If writing a velocity, use the optical definition
(otherwise use radio).
`bitpix`
can be set to -32 (float) or 16 (short) only. When `bitpix` is
16 it will write BSCALE and BZERO into the FITS file. If minPix
`minpix` and `maxpix`
are used to determine BSCALE and BZERO if `bitpix=16`.
If `minpix` is greater than `maxpix` (which is the default),
the minimum and maximum pixel values will be determined from the ddta.
Oherwise the supplied values will be used and pixels outside that
range will be clipped to the minimum and maximum pixel values.
Note that this truncation does not occur for `bitpix=-32`. |
def register_watcher(self, event_type, callback, register_timeout=None):
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]),
timeout=register_timeout) | Register a callback for a given event type. |
async def _start_payloads(self, nursery):
with self._lock:
for coroutine in self._payloads:
nursery.start_soon(coroutine)
self._payloads.clear()
await trio.sleep(0) | Start all queued payloads |
def dump(u, *args, **kwargs):
return dict(
id=u.id,
email=u.email,
password=u.password,
password_salt=u.password_salt,
note=u.note,
full_name=u.full_name if hasattr(u, ) else .format(
u.given_names, u.family_name),
settings=u.settings,
nickname=u.nickname,
last_login=dt2iso_or_empty(u.last_login)) | Dump the users as a list of dictionaries.
:param u: User to be dumped.
:type u: `invenio.modules.accounts.models.User [Invenio2.x]` or namedtuple.
:returns: User serialized to dictionary.
:rtype: dict |
def parse_html(html):
paragraphs = re.split("</?p[^>]*>", html)
paragraphs = [re.split("<br */?>", p) for p in paragraphs if p]
return [[get_text(l) for l in p] for p in paragraphs] | Attempt to convert html to plain text while keeping line breaks.
Returns a list of paragraphs, each being a list of lines. |
def cwt_coefficients(x, param):
calculated_cwt = {}
res = []
indices = []
for parameter_combination in param:
widths = parameter_combination["widths"]
w = parameter_combination["w"]
coeff = parameter_combination["coeff"]
if widths not in calculated_cwt:
calculated_cwt[widths] = cwt(x, ricker, widths)
calculated_cwt_for_widths = calculated_cwt[widths]
indices += ["widths_{}__coeff_{}__w_{}".format(widths, coeff, w)]
i = widths.index(w)
if calculated_cwt_for_widths.shape[1] <= coeff:
res += [np.NaN]
else:
res += [calculated_cwt_for_widths[i, coeff]]
return zip(indices, res) | Calculates a Continuous wavelet transform for the Ricker wavelet, also known as the "Mexican hat wavelet" which is
defined by
.. math::
\\frac{2}{\\sqrt{3a} \\pi^{\\frac{1}{4}}} (1 - \\frac{x^2}{a^2}) exp(-\\frac{x^2}{2a^2})
where :math:`a` is the width parameter of the wavelet function.
This feature calculator takes three different parameter: widths, coeff and w. The feature calculater takes all the
different widths arrays and then calculates the cwt one time for each different width array. Then the values for the
different coefficient for coeff and width w are returned. (For each dic in param one feature is returned)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"widths":x, "coeff": y, "w": z} with x array of int and y,z int
:type param: list
:return: the different feature values
:return type: pandas.Series |
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None):
if six.PY2:
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func)
else:
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo)
if extra is None:
extra = dict()
extra.update({"pid": os.getpid(), "uid": os.getuid(), "gid": os.getgid(), "ppid": os.getppid()})
for key in extra:
rv.__dict__[key] = extra[key]
return rv | Override default logger to allow overriding of internal attributes. |
def _disambiguate_pos(self, terms, pos):
candidate_map = {term: wn.synsets(term, pos=pos)[:3] for term in terms}
concepts = set(c for cons in candidate_map.values() for c in cons)
concepts = list(concepts)
sim_mat = self._similarity_matrix(concepts)
map = {}
for term, cons in candidate_map.items():
if not cons:
continue
scores = []
for con in cons:
i = concepts.index(con)
scores_ = []
for term_, cons_ in candidate_map.items():
if term == term_ or not cons_:
continue
cons_idx = [concepts.index(c) for c in cons_]
top_sim = max(sim_mat[i,cons_idx])
scores_.append(top_sim)
scores.append(sum(scores_))
best_idx = np.argmax(scores)
map[term] = cons[best_idx]
return map | Disambiguates a list of tokens of a given PoS. |
def create(cls, pid_value, **kwargs):
return super(DataCiteProvider, cls).create(
pid_value=pid_value, **kwargs) | Create a new record identifier.
For more information about parameters,
see :meth:`invenio_pidstore.providers.BaseProvider.create`.
:param pid_value: Persistent identifier value.
:params **kwargs: See
:meth:`invenio_pidstore.providers.base.BaseProvider.create` extra
parameters.
:returns: A :class:`invenio_pidstore.providers.DataCiteProvider`
instance. |
def disable_svc_freshness_check(self, service):
if service.check_freshness:
service.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value
service.check_freshness = False
self.send_an_element(service.get_update_status_brok()) | Disable freshness check for a service
Format of the line that triggers function call::
DISABLE_SERVICE_FRESHNESS_CHECK;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None |
def _simple_cmd(cmd, app, url=, timeout=180):
try:
opts = {
: app,
: ls(url)[app][]
}
return .join(_wget(cmd, opts, url, timeout=timeout)[])
except Exception:
return .format(app) | Simple command wrapper to commands that need only a path option |
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def check_last_ip(app):
return LastIpCheck(app, conf)
return check_last_ip | Returns a WSGI filter app for use with paste.deploy. |
def set(self, value, *keys):
element = self
if keys is None:
return self
if in keys[0]:
keys = keys[0].split(".")
nested_str = .join(["[]".format(x) for x in keys])
try:
ast.literal_eval(str(value))
converted = str(value)
except ValueError:
converted = ""
exec("self" + nested_str + "=" + converted)
return element | Sets the dict of the information as read from the yaml file. To access
the file safely, you can use the keys in the order of the access.
Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy")
will set the value of config["provisioner"]["policy"] in the yaml file if
it does not exists an error will be printing that the value does not
exists. Alternatively you can use the . notation e.g.
set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy") |
def update_status(self, progress):
update_interval = 0.2
now = datetime.datetime.now()
if not self._last_progress_update is None and now-self._last_progress_update < datetime.timedelta(seconds=update_interval):
return
self._last_progress_update = now
self.progressBar.setValue(progress)
script = self.current_script
if progress:
remaining_time = str(datetime.timedelta(seconds=script.remaining_time.seconds))
self.lbl_time_estimate.setText(.format(remaining_time))
if script is not str(self.tabWidget.tabText(self.tabWidget.currentIndex())).lower() in [, ]:
self.plot_script(script) | waits for a signal emitted from a thread and updates the gui
Args:
progress:
Returns: |
def render(self, is_unicode=False, pretty_print=False):
for f in self.graph.xml_filters:
self.root = f(self.root)
args = {: }
svg = b
if etree.lxml:
args[] = pretty_print
if not self.graph.disable_xml_declaration:
svg = b"<?xml version= encoding=?>\n"
if not self.graph.disable_xml_declaration:
svg += b.join([
etree.tostring(pi, **args)
for pi in self.processing_instructions
])
svg += etree.tostring(self.root, **args)
if self.graph.disable_xml_declaration or is_unicode:
svg = svg.decode()
return svg | Last thing to do before rendering |
def dump(thing, query, from_date, file_prefix, chunk_size, limit, thing_flags):
init_app_context()
file_prefix = file_prefix if file_prefix else .format(thing)
kwargs = dict((f.strip().replace(, ), True) for f in thing_flags)
try:
thing_func = collect_things_entry_points()[thing]
except KeyError:
click.Abort(
.format(thing, collect_things_entry_points()))
click.echo("Querying {0}...".format(thing))
count, items = thing_func.get(query, from_date, limit=limit, **kwargs)
progress_i = 0
click.echo("Dumping {0}...".format(thing))
with click.progressbar(length=count) as bar:
for i, chunk_ids in enumerate(grouper(items, chunk_size)):
with open(.format(file_prefix, i), ) as fp:
fp.write("[\n")
for _id in chunk_ids:
try:
json.dump(
thing_func.dump(_id, from_date, **kwargs),
fp,
default=set_serializer
)
fp.write(",")
except Exception as e:
click.secho("Failed dump {0} {1} ({2})".format(
thing, _id, e.message), fg=)
progress_i += 1
bar.update(progress_i)
fp.seek(fp.tell()-1)
fp.write("\n]") | Dump data from Invenio legacy. |
def create_cloudwatch_log_event(app_name, env, region, rules):
session = boto3.Session(profile_name=env, region_name=region)
cloudwatch_client = session.client()
log_group = rules.get()
filter_name = rules.get()
filter_pattern = rules.get()
if not log_group:
LOG.critical()
raise InvalidEventConfiguration()
if not filter_name:
LOG.critical()
raise InvalidEventConfiguration()
if filter_pattern is None:
LOG.critical()
raise InvalidEventConfiguration()
lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)
statement_id = .format(app_name, filter_name.replace(" ", "_"))
principal = .format(region)
account_id = get_env_credential(env=env)[]
source_arn = "arn:aws:logs:{0}:{1}:log-group:{2}:*".format(region, account_id, log_group)
add_lambda_permissions(
function=lambda_alias_arn,
statement_id=statement_id,
action=,
principal=principal,
source_arn=source_arn,
env=env,
region=region)
cloudwatch_client.put_subscription_filter(
logGroupName=log_group, filterName=filter_name, filterPattern=filter_pattern, destinationArn=lambda_alias_arn)
LOG.info("Created Cloudwatch log event with filter: %s", filter_pattern) | Create cloudwatch log event for lambda from rules.
Args:
app_name (str): name of the lambda function
env (str): Environment/Account for lambda function
region (str): AWS region of the lambda function
rules (str): Trigger rules from the settings |
def update_factor(self, name, body):
url = self._url(.format(name))
return self.client.put(url, data=body) | Update Guardian factor
Useful to enable / disable factor
Args:
name (str): Either push-notification or sms
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/management/v2#!/Guardian/put_factors_by_name |
def make_hash(o):
r
if type(o) == DictProxyType:
o2 = {}
for k, v in o.items():
if not k.startswith("__"):
o2[k] = v
o = o2
if isinstance(o, (set, tuple, list)):
return tuple([make_hash(e) for e in o])
elif not isinstance(o, dict):
return hash(o)
new_o = copy.deepcopy(o)
for k, v in new_o.items():
new_o[k] = make_hash(v)
return hash(tuple(frozenset(sorted(new_o.items())))) | r"""
Makes a hash from a dictionary, list, tuple or set to any level, that
contains only other hashable types (including any lists, tuples, sets, and
dictionaries). In the case where other kinds of objects (like classes) need
to be hashed, pass in a collection of object attributes that are pertinent.
For example, a class can be hashed in this fashion:
make_hash([cls.__dict__, cls.__name__])
A function can be hashed like so:
make_hash([fn.__dict__, fn.__code__])
References:
http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary |
def create_loadbalancer(self, datacenter_id, loadbalancer):
data = json.dumps(self._create_loadbalancer_dict(loadbalancer))
response = self._perform_request(
url= % datacenter_id,
method=,
data=data)
return response | Creates a load balancer within the specified data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param loadbalancer: The load balancer object to be created.
:type loadbalancer: ``dict`` |
def siphashx24(message, key=b, encoder=nacl.encoding.HexEncoder):
digest = _sip_hashx(message, key)
return encoder.encode(digest) | Computes a keyed MAC of ``message`` using the 128 bit variant of the
siphash-2-4 construction.
:param message: The message to hash.
:type message: bytes
:param key: the message authentication key for the siphash MAC construct
:type key: bytes(:const:`SIPHASHX_KEYBYTES`)
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes(:const:`SIPHASHX_BYTES`)
.. versionadded:: 1.2 |
def additive_noise(stream, key=, scale=1e-1):
X
for data in stream:
noise_shape = data[key].shape
noise = scale * np.random.randn(*noise_shape)
data[key] = data[key] + noise
yield data | Add noise to a data stream.
Parameters
----------
stream : iterable
A stream that yields data objects.
key : string, default='X'
Name of the field to add noise.
scale : float, default=0.1
Scale factor for gaussian noise.
Yields
------
data : dict
Updated data objects in the stream. |
def get_path_matching(name):
p = os.path.join(os.path.expanduser("~"), name)
if not os.path.isdir(p):
p = None
drive, folders = os.path.splitdrive(os.getcwd())
folders = folders.split(os.sep)
folders.insert(0, os.sep)
if name in folders:
p = os.path.join(drive, *folders[: folders.index(name) + 1])
return p | Get path matching a name.
Parameters
----------
name : string
Name to search for.
Returns
-------
string
Full filepath. |
def tree_type_checker(*ref):
ref = tuple(ref)
if NeuriteType.all in ref:
def check_tree_type(_):
return True
else:
def check_tree_type(tree):
return tree.type in ref
return check_tree_type | Tree type checker functor
Returns:
Functor that takes a tree, and returns true if that tree matches any of
NeuriteTypes in ref
Ex:
>>> from neurom.core.types import NeuriteType, tree_type_checker
>>> tree_filter = tree_type_checker(NeuriteType.axon, NeuriteType.basal_dendrite)
>>> nrn.i_neurites(tree.isegment, tree_filter=tree_filter) |
def _compile_bus_injection(self):
string =
self.bus_injection = compile(eval(string), , ) | Impose injections on buses |
def DataIsInteger(self):
return self.data_type in (
definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN,
definitions.REG_QWORD) | Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
bool: True if the data is an integer, False otherwise. |
async def send_request(self, connection: Connection, payment_handle: int):
if not hasattr(Credential.send_request, "cb"):
self.logger.debug("vcx_credential_send_request: Creating callback")
Credential.send_request.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32))
c_credential_handle = c_uint32(self.handle)
c_connection_handle = c_uint32(connection.handle)
c_payment = c_uint32(payment_handle)
await do_call(,
c_credential_handle,
c_connection_handle,
c_payment,
Credential.send_request.cb) | Approves the credential offer and submits a credential request. The result will be a credential stored in the prover's wallet.
:param connection: connection to submit request from
:param payment_handle: currently unused
:return:
Example:
connection = await Connection.create(source_id)
await connection.connect(phone_number)
credential = await Credential.create(source_id, offer)
await credential.send_request(connection, 0) |
def is_username(string, minlen=1, maxlen=15):
if string:
string = string.strip()
return username_re.match(string) and (minlen <= len(string) <= maxlen)
return False | Determines whether the @string pattern is username-like
@string: #str being tested
@minlen: minimum required username length
@maxlen: maximum username length
-> #bool |
def properties(lines):
results = {}
for i, line in enumerate(lines):
type_ = line[3:6]
if type_ not in ["CHG", "RAD", "ISO"]:
continue
count = int(line[6:9])
results[type_] = []
for j in range(count):
idx = int(line[10 + j * 8: 13 + j * 8])
val = int(line[14 + j * 8: 17 + j * 8])
results[type_].append((idx, val))
return results | Parse properties block
Returns:
dict: {property_type: (atom_index, value)} |
def get_album(self, object_id, relation=None, **kwargs):
return self.get_object("album", object_id, relation=relation, **kwargs) | Get the album with the provided id
:returns: an :class:`~deezer.resources.Album` object |
def _tobinarray_really(self, start, end, pad, size):
if pad is None:
pad = self.padding
bin = array()
if self._buf == {} and None in (start, end):
return bin
if size is not None and size <= 0:
raise ValueError("tobinarray: wrong value for size")
start, end = self._get_start_end(start, end, size)
for i in range_g(start, end+1):
bin.append(self._buf.get(i, pad))
return bin | Return binary array. |
def get_single_header(headers, key):
raw_headers = headers.getRawHeaders(key)
if raw_headers is None:
return None
header, _ = cgi.parse_header(raw_headers[-1])
return header | Get a single value for the given key out of the given set of headers.
:param twisted.web.http_headers.Headers headers:
The set of headers in which to look for the header value
:param str key:
The header key |
def get_l2cap_options (sock):
s = sock.getsockopt (SOL_L2CAP, L2CAP_OPTIONS, 12)
options = list( struct.unpack ("HHHBBBH", s))
return options | get_l2cap_options (sock, mtu)
Gets L2CAP options for the specified L2CAP socket.
Options are: omtu, imtu, flush_to, mode, fcs, max_tx, txwin_size. |
def untracable(object):
@functools.wraps(object)
def untracable_wrapper(*args, **kwargs):
return object(*args, **kwargs)
set_untracable(untracable_wrapper)
return untracable_wrapper | Marks decorated object as non tracable.
:param object: Object to decorate.
:type object: object
:return: Object.
:rtype: object |
def _check_frames(self, frames, fill_value):
if self.seekable():
remaining_frames = self.frames - self.tell()
if frames < 0 or (frames > remaining_frames and
fill_value is None):
frames = remaining_frames
elif frames < 0:
raise ValueError("frames must be specified for non-seekable files")
return frames | Reduce frames to no more than are available in the file. |
def get_safe_type(self):
product_type = self.product_id.split()[1]
if product_type.startswith():
return EsaSafeType.COMPACT_TYPE
if product_type in [, ]:
return EsaSafeType.OLD_TYPE
raise ValueError(.format(self.product_id)) | Determines the type of ESA product.
In 2016 ESA changed structure and naming of data. Therefore the class must
distinguish between old product type and compact (new) product type.
:return: type of ESA product
:rtype: constants.EsaSafeType
:raises: ValueError |
def fastp_read_n_plot(self):
data_labels, pdata = self.filter_pconfig_pdata_subplots(self.fastp_n_content_data, )
pconfig = {
: ,
: ,
: ,
: ,
: 100,
: 5,
: 0,
: False,
: ,
: ,
: data_labels
}
return linegraph.plot(pdata, pconfig) | Make the read N content plot for Fastp |
def add_layer_to_canvas(layer, name):
if qgis_version() >= 21800:
layer.setName(name)
else:
layer.setLayerName(name)
QgsProject.instance().addMapLayer(layer, False) | Helper method to add layer to QGIS.
:param layer: The layer.
:type layer: QgsMapLayer
:param name: Layer name.
:type name: str |
def bufsize_validator(kwargs):
invalid = []
in_ob = kwargs.get("in", None)
out_ob = kwargs.get("out", None)
in_buf = kwargs.get("in_bufsize", None)
out_buf = kwargs.get("out_bufsize", None)
in_no_buf = ob_is_tty(in_ob) or ob_is_pipe(in_ob)
out_no_buf = ob_is_tty(out_ob) or ob_is_pipe(out_ob)
err = "Can't specify an {target} bufsize if the {target} target is a pipe or TTY"
if in_no_buf and in_buf is not None:
invalid.append((("in", "in_bufsize"), err.format(target="in")))
if out_no_buf and out_buf is not None:
invalid.append((("out", "out_bufsize"), err.format(target="out")))
return invalid | a validator to prevent a user from saying that they want custom
buffering when they're using an in/out object that will be os.dup'd to the
process, and has its own buffering. an example is a pipe or a tty. it
doesn't make sense to tell them to have a custom buffering, since the os
controls this. |
def send(self, message):
res = jsonex_request(self.server_url + .lstrip(), {: message})
msg = res[]
for k, v in msg.__dict__.items():
setattr(message, k, v)
return message | Send a message by forwarding it to the server
:param message: Message
:type message: smsframework.data.OutgoingMessage
:rtype: smsframework.data.OutgoingMessage
:raise Exception: any exception reported by the other side
:raise urllib2.URLError: Connection error |
def _pprint(self, cycle=False, flat=False, annotate=False, onlychanged=True, level=1, tab = ):
(kwargs, pos_args, infix_operator, extra_params) = self._pprint_args
(br, indent) = ( if flat else , if flat else tab * level)
prettify = lambda x: isinstance(x, PrettyPrinted) and not flat
pretty = lambda x: x._pprint(flat=flat, level=level+1) if prettify(x) else repr(x)
params = dict(self.get_param_values())
show_lexsort = getattr(self, , None) is not None
modified = [k for (k,v) in self.get_param_values(onlychanged=onlychanged)]
pkwargs = [(k, params[k]) for k in kwargs if (k in modified)] + list(extra_params.items())
arg_list = [(k,params[k]) for k in pos_args] + pkwargs
lines = []
if annotate:
len_ckeys, len_vkeys = len(self.constant_keys), len(self.varying_keys)
info_triple = (len(self),
% len_ckeys if len_ckeys else ,
% len_vkeys if len_vkeys else )
annotation = % info_triple
lines = [annotation]
if show_lexsort: lines.append()
if cycle:
lines.append( % self.__class__.__name__)
elif infix_operator:
level = level - 1
triple = (pretty(params[pos_args[0]]), infix_operator, pretty(params[pos_args[1]]))
lines.append( % triple)
else:
lines.append( % self.__class__.__name__)
for (k,v) in arg_list:
lines.append( % (br+indent, k, pretty(v)))
lines.append()
lines = lines[:-1] +[br+(tab*(level-1))+]
if show_lexsort:
lines.append( % .join(repr(el) for el in self._lexorder))
return .join(lines) | Pretty printer that prints only the modified keywords and
generates flat representations (for repr) and optionally
annotates the top of the repr with a comment. |
def next_unused_name_in_group(grp, length):
fmt = .format(length)
name = fmt % random.getrandbits(length * 4)
while name in grp:
name = fmt % random.getrandbits(length * 4)
return name | Gives a name that isn't used in a Group.
Generates a name of the desired length that is not a Dataset or
Group in the given group. Note, if length is not large enough and
`grp` is full enough, there may be no available names meaning that
this function will hang.
Parameters
----------
grp : h5py.Group or h5py.File
The HDF5 Group (or File if at '/') to generate an unused name
in.
length : int
Number of characters the name should be.
Returns
-------
name : str
A name that isn't already an existing Dataset or Group in
`grp`. |
def objectnames_conesearch(racenter,
declcenter,
searchradiusarcsec,
simbad_mirror=,
returnformat=,
forcefetch=False,
cachedir=,
verbose=True,
timeout=10.0,
refresh=2.0,
maxtimeout=90.0,
maxtries=1,
complete_query_later=True):
csvvotablejsont available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
complete_query_later : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If itparamsprovenancecachenew downloadresult
query = (
"select a.oid, a.ra, a.dec, a.main_id, a.otype_txt, "
"a.coo_bibcode, a.nbref, b.ids as all_ids, "
"(DISTANCE(POINT(, a.ra, a.dec), "
"POINT(, {ra_center:.5f}, {decl_center:.5f})))*3600.0 "
"AS dist_arcsec "
"from basic a join ids b on a.oid = b.oidref where "
"CONTAINS(POINT(,a.ra, a.dec),"
"CIRCLE(,{ra_center:.5f},{decl_center:.5f},"
"{search_radius:.6f}))=1 "
"ORDER by dist_arcsec asc "
)
formatted_query = query.format(ra_center=racenter,
decl_center=declcenter,
search_radius=searchradiusarcsec/3600.0)
return tap_query(formatted_query,
simbad_mirror=simbad_mirror,
returnformat=returnformat,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
complete_query_later=complete_query_later) | This queries the SIMBAD TAP service for a list of object names near the
coords. This is effectively a "reverse" name resolver (i.e. this does the
opposite of SESAME).
Parameters
----------
racenter,declcenter : float
The cone-search center coordinates in decimal degrees
searchradiusarcsec : float
The radius in arcseconds to search around the center coordinates.
simbad_mirror : str
This is the key used to select a SIMBAD mirror from the
`SIMBAD_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
complete_query_later : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table} |
def _is_modification_or_activity(feature):
if not (isinstance(feature, _bp()) or \
isinstance(feature, _bpimpl())):
return None
mf_type = feature.getModificationType()
if mf_type is None:
return None
mf_type_terms = mf_type.getTerm().toArray()
for term in mf_type_terms:
if term in (,
,
, ):
return
return | Return True if the feature is a modification |
def _get_current_deployment_label(self):
deploymentId = self._get_current_deployment_id()
deployment = __salt__[](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get()
if deployment:
return deployment.get()
return None | Helper method to find the deployment label that the stage_name is currently associated with. |
def get_processes(sort_by_name=True):
if sort_by_name:
return sorted(
_list_processes(),
key=cmp_to_key(
lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))
),
)
else:
return sorted(
_list_processes(),
key=cmp_to_key(
lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))
),
) | Retrieve a list of processes sorted by name.
Args:
sort_by_name (bool): Sort the list by name or by process ID's.
Returns:
list of (int, str) or list of (int, str, str): List of process id,
process name and optional cmdline tuples. |
def list(self, name, platform=, genre=):
data_list = self.db.get_data(self.list_path, name=name,
platform=platform, genre=genre)
data_list = data_list.get() or {}
games = data_list.get() or []
return [self._build_item(**i) for i in games] | The name argument is required for this method as per the API
server specification. This method also provides the platform and genre
optional arguments as filters. |
def tail(self, path, tail_length=1024, append=False):
if not path:
raise InvalidInputException("tail: no path given")
block_size = self.serverdefaults()[]
if tail_length > block_size:
raise InvalidInputException("tail: currently supports length up to the block size (%d)" % (block_size,))
if tail_length <= 0:
raise InvalidInputException("tail: tail_length cannot be less than or equal to zero")
processor = lambda path, node: self._handle_tail(path, node, tail_length, append)
for item in self._find_items([path], processor, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item | Show the end of the file - default 1KB, supports up to the Hadoop block size.
:param path: Path to read
:type path: string
:param tail_length: The length to read from the end of the file - default 1KB, up to block size.
:type tail_length: int
:param append: Currently not implemented
:type append: bool
:returns: a generator that yields strings |
def date_range_builder(self, start=, end=None):
if not end:
end = time.strftime()
return % (start, end) | Builds date range query.
:param start:
Date string. format: YYYY-MM-DD
:type start:
String
:param end:
date string. format: YYYY-MM-DD
:type end:
String
:returns:
String |
def _delete_from_indices(self, pipeline):
s = Set(self.key()[])
z = Set(self.key()[])
for index in s.members:
pipeline.srem(index, self.id)
for index in z.members:
pipeline.zrem(index, self.id)
pipeline.delete(s.key)
pipeline.delete(z.key) | Deletes the object's id from the sets(indices) it has been added
to and removes its list of indices (used for housekeeping). |
def save(filename, obj, overwrite=False, create_directories=False):
if isinstance(filename, six.string_types):
filename = os.path.expanduser(filename)
if not overwrite and os.path.exists(filename):
raise ValueError( % filename)
if create_directories:
dname = os.path.dirname(os.path.realpath(filename))
if not os.path.isdir(dname): os.makedirs(dname)
with open(filename, ) as f:
_save_stream(f, obj)
else:
_save_stream(filename, obj)
return filename | pimms.save(filename, obj) attempts to pickle the given object obj in the filename (or stream,
if given). An error is raised when this cannot be accomplished; the first argument is always
returned; though if the argument is a filename, it may be a differet string that refers to
the same file.
The save/load protocol uses pickle for all saving/loading except when the object is a numpy
object, in which case it is written using obj.tofile(). The save function writes meta-data
into the file so cannot simply be unpickled, but must be loaded using the pimms.load()
function. Fundamentally, however, if an object can be picled, it can be saved/loaded.
Options:
* overwrite (False) The optional parameter overwrite indicates whether an error should be
raised before opening the file if the file already exists.
* create_directories (False) The optional parameter create_directories indicates whether the
function should attempt to create the directories in which the filename exists if they do
not already exist. |
def _zforce(self,R,z,phi=0.,t=0.):
l,n = bovy_coords.Rz_to_lambdanu (R,z,ac=self._ac,Delta=self._Delta)
jac = bovy_coords.Rz_to_lambdanu_jac(R,z, Delta=self._Delta)
dldz = jac[0,1]
dndz = jac[1,1]
return - (dldz * self._lderiv(l,n) + \
dndz * self._nderiv(l,n)) | NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2015-02-13 - Written - Trick (MPIA) |
def file_transfer_protocol_send(self, target_network, target_system, target_component, payload, force_mavlink1=False):
return self.send(self.file_transfer_protocol_encode(target_network, target_system, target_component, payload), force_mavlink1=force_mavlink1) | File transfer message
target_network : Network ID (0 for broadcast) (uint8_t)
target_system : System ID (0 for broadcast) (uint8_t)
target_component : Component ID (0 for broadcast) (uint8_t)
payload : Variable length payload. The length is defined by the remaining message length when subtracting the header and other fields. The entire content of this block is opaque unless you understand any the encoding message_type. The particular encoding used can be extension specific and might not always be documented as part of the mavlink specification. (uint8_t) |
def ssh(self, enable=True, comment=None):
self.make_request(
NodeCommandFailed,
method=,
resource=,
params={: enable, : comment}) | Enable or disable SSH
:param bool enable: enable or disable SSH daemon
:param str comment: optional comment for audit
:raises NodeCommandFailed: cannot enable SSH daemon
:return: None |
def get_open_clinvar_submission(self, user_id, institute_id):
LOG.info("Retrieving an open clinvar submission for user and institute %s", user_id, institute_id)
query = dict(user_id=user_id, institute_id=institute_id, status=)
submission = self.clinvar_submission_collection.find_one(query)
if submission is None:
submission_id = self.create_submission(user_id, institute_id)
submission = self.clinvar_submission_collection.find_one({:submission_id})
return submission | Retrieve the database id of an open clinvar submission for a user and institute,
if none is available then create a new submission and return it
Args:
user_id(str): a user ID
institute_id(str): an institute ID
Returns:
submission(obj) : an open clinvar submission object |
def slides(self):
sldIdLst = self._element.get_or_add_sldIdLst()
self.part.rename_slide_parts([sldId.rId for sldId in sldIdLst])
return Slides(sldIdLst, self) | |Slides| object containing the slides in this presentation. |
def total_memory():
with file(, ) as f:
for line in f:
words = line.split()
if words[0].upper() == :
return int(words[1]) * 1024
raise IOError() | Returns the the amount of memory available for use.
The memory is obtained from MemTotal entry in /proc/meminfo.
Notes
=====
This function is not very useful and not very portable. |
def get(self, block=1, delay=None):
if _debug: IOQueue._debug("get block=%r delay=%r", block, delay)
if not block and not self.notempty.isSet():
if _debug: IOQueue._debug(" - not blocking and empty")
return None
if delay:
self.notempty.wait(delay)
if not self.notempty.isSet():
return None
else:
self.notempty.wait()
priority, iocb = self.queue[0]
del self.queue[0]
iocb.ioQueue = None
qlen = len(self.queue)
if not qlen:
self.notempty.clear()
return iocb | Get a request from a queue, optionally block until a request
is available. |
def validate(user_input, ret_errs=False, print_errs=False):
errs = run_validator(user_input)
passed = len(errs) == 0
if print_errs:
for err in errs:
print(err)
if ret_errs:
return passed, errs
return passed | Wrapper for run_validator function that returns True if the user_input
contains a valid STIX pattern or False otherwise. The error messages may
also be returned or printed based upon the ret_errs and print_errs arg
values. |
def size(args):
p = OptionParser(size.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
total_size = total_numrecords = 0
for f in args:
cur_size = cur_numrecords = 0
for rec in iter_fastq(f):
if not rec:
break
cur_numrecords += 1
cur_size += len(rec)
print(" ".join(str(x) for x in \
(op.basename(f), cur_numrecords, cur_size)))
total_numrecords += cur_numrecords
total_size += cur_size
if len(args) > 1:
print(" ".join(str(x) for x in \
("Total", total_numrecords, total_size))) | %prog size fastqfile
Find the total base pairs in a list of fastq files |
def receive_empty(self, message):
logger.debug("receive_empty - " + str(message))
try:
host, port = message.source
except AttributeError:
return
key_mid = str_append_hash(host, port, message.mid)
key_mid_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.mid)
key_token = str_append_hash(host, port, message.token)
key_token_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.token)
if key_mid in list(self._transactions.keys()):
transaction = self._transactions[key_mid]
elif key_token in self._transactions_token:
transaction = self._transactions_token[key_token]
elif key_mid_multicast in list(self._transactions.keys()):
transaction = self._transactions[key_mid_multicast]
elif key_token_multicast in self._transactions_token:
transaction = self._transactions_token[key_token_multicast]
else:
logger.warning("Un-Matched incoming empty message " + str(host) + ":" + str(port))
return None
if message.type == defines.Types["ACK"]:
if not transaction.request.acknowledged:
transaction.request.acknowledged = True
elif (transaction.response is not None) and (not transaction.response.acknowledged):
transaction.response.acknowledged = True
elif message.type == defines.Types["RST"]:
if not transaction.request.acknowledged:
transaction.request.rejected = True
elif not transaction.response.acknowledged:
transaction.response.rejected = True
elif message.type == defines.Types["CON"]:
logger.debug("Implicit ACK on received CON for waiting transaction")
transaction.request.acknowledged = True
else:
logger.warning("Unhandled message type...")
if transaction.retransmit_stop is not None:
transaction.retransmit_stop.set()
return transaction | Pair ACKs with requests.
:type message: Message
:param message: the received message
:rtype : Transaction
:return: the transaction to which the message belongs to |
def validate(cls, code, prefix):
reasons = []
pieces = code.split()
n = len(pieces)
if n > 3:
reasons.append(.format(prefix, code))
elif n > 0 and pieces[0] not in geo_data:
reasons.append(
.format(prefix, code))
elif n > 1 and pieces[1] not in geo_data[pieces[0]]:
reasons.append(.format(prefix, code))
elif n > 2 and \
pieces[2] not in geo_data[pieces[0]][pieces[1]][]:
reasons.append(.format(prefix, code))
return reasons | Validates an octoDNS geo code making sure that it is a valid and
corresponding:
* continent
* continent & country
* continent, country, & province |
def to_csv(self, file: str, opts: dict = None) -> str:
opts = opts if opts is not None else {}
ll = self._is_valid()
if ll:
if not self.sas.batch:
print(ll[])
else:
return ll
else:
return self.sas.write_csv(file, self.table, self.libref, self.dsopts, opts) | This method will export a SAS Data Set to a file in CSV format.
:param file: the OS filesystem path of the file to be created (exported from this SAS Data Set)
:return: |
def add_items(self, items, index_items):
count_templates = 0
count_items = 0
generated_items = []
for item in items:
if item.is_tpl():
self.add_template(item)
count_templates = count_templates + 1
else:
new_items = self.add_item(item, index_items)
count_items = count_items + max(1, len(new_items))
if new_items:
generated_items.extend(new_items)
if count_templates:
logger.info(, count_templates)
if count_items:
logger.info(, count_items, self.inner_class.my_type) | Add items to template if is template, else add in item list
:param items: items list to add
:type items: alignak.objects.item.Items
:param index_items: Flag indicating if the items should be indexed on the fly.
:type index_items: bool
:return: None |
def write(self, chunk: Union[str, bytes, dict]) -> None:
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += (
". Lists not accepted for security reasons; see "
+ "http://www.tornadoweb.org/en/stable/web.html
)
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk) | Writes the given chunk to the output buffer.
To write the output to the network, use the `flush()` method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
``set_header`` *after* calling ``write()``).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009 |
def lookup_subclass(cls, d):
try:
typeid = d["typeid"]
except KeyError:
raise FieldError("typeid not present in keys %s" % list(d))
subclass = cls._subcls_lookup.get(typeid, None)
if not subclass:
raise FieldError(" not a valid typeid" % typeid)
else:
return subclass | Look up a class based on a serialized dictionary containing a typeid
Args:
d (dict): Dictionary with key "typeid"
Returns:
Serializable subclass |
def accuracy(self, outputs):
output = outputs[self.output_name]
predict = TT.argmax(output, axis=-1)
correct = TT.eq(predict, self._target)
acc = correct.mean()
if self._weights is not None:
acc = (self._weights * correct).sum() / self._weights.sum()
return acc | Build a Theano expression for computing the accuracy of graph output.
Parameters
----------
outputs : dict of Theano expressions
A dictionary mapping network output names to Theano expressions
representing the outputs of a computation graph.
Returns
-------
acc : Theano expression
A Theano expression representing the accuracy of the output compared
to the target data. |
def memoize(func):
func._cache_dict = {}
@wraps(func)
def _inner(*args, **kwargs):
return _get_memoized_value(func, args, kwargs)
return _inner | Decorator that stores function results in a dictionary to be used on the
next time that the same arguments were informed. |
def get_topic_sha3(event_block):
sig = ""
sig += event_block["name"]
if not event_block["inputs"]:
sig += "()"
return sig
sig += "("
for input in event_block["inputs"]:
sig += input["type"]
sig += ","
sig = sig[:-1]
sig += ")"
return sig | takes an event block and returns a signature for sha3 hashing
:param event_block:
:return: |
def get_symbol_size(version, scale=1, border=None):
if border is None:
border = get_default_border_size(version)
dim = version * 4 + 17 if version > 0 else (version + 4) * 2 + 9
dim += 2 * border
dim *= scale
return dim, dim | \
Returns the symbol size (width x height) with the provided border and
scaling factor.
:param int version: A version constant.
:param scale: Indicates the size of a single module (default: 1).
The size of a module depends on the used output format; i.e.
in a PNG context, a scaling factor of 2 indicates that a module
has a size of 2 x 2 pixel. Some outputs (i.e. SVG) accept
floating point values.
:type scale: int or float
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:rtype: tuple (width, height) |
def parse_domain_users(domain_users_file, domain_groups_file):
with open(domain_users_file) as f:
users = json.loads(f.read())
domain_groups = {}
if domain_groups_file:
with open(domain_groups_file) as f:
groups = json.loads(f.read())
for group in groups:
sid = get_field(group, )
domain_groups[int(sid.split()[-1])] = get_field(group, )
user_search = UserSearch()
count = 0
total = len(users)
print_notification("Importing {} users".format(total))
for entry in users:
result = parse_user(entry, domain_groups)
user = user_search.id_to_object(result[])
user.name = result[]
user.domain.append(result[])
user.description = result[]
user.groups.extend(result[])
user.flags.extend(result[])
user.sid = result[]
user.add_tag("domaindump")
user.save()
count += 1
sys.stdout.write()
sys.stdout.write("[{}/{}]".format(count, total))
sys.stdout.flush()
sys.stdout.write()
return count | Parses the domain users and groups files. |
def run(self):
self.count += 1
print( % (self.count))
if self.count < 3:
raise RuntimeError()
return True | Increments counter and raises an exception for first two runs. |
def char_style(self, style):
normaloutlineshadowoutlineshadow
styleset = {: 0,
: 1,
: 2,
: 3
}
if style in styleset:
self.send(chr(27) + + chr(styleset[style]))
else:
raise RuntimeError() | Sets the character style.
Args:
style: The desired character style. Choose from 'normal', 'outline', 'shadow', and 'outlineshadow'
Returns:
None
Raises:
RuntimeError: Invalid character style |
def get_response_signer(self):
if not hasattr(self, "response_signer"):
self.response_signer = V2ResponseSigner(self.digest, orig=self)
return self.response_signer | Returns the response signer for this version of the signature. |
def visc_rad_kap_sc(T,rho,X):
%g in Schatzman, 1977, assume electron scattering opacity
kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2)
'
kappa = 0.2*(1.+X)
nu_rad = 6.88e-26*(old_div(T**4,(kappa*rho**2)))
return nu_rad | Radiative viscosity (Thomas, 1930) for e- scattering opacity
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
radiative diffusivity in [cm**2/s]
Examples
--------
>>> In [1]: import astronomy as ast
>>> In [2]: l = 100*1.e5 # 100km
>>> In [3]: v = 1.e5 # typical velocity
>>> In [4]: T = 90.e6 # temperature
>>> In [5]: X = 0.001 # H mass fraction
>>> In [6]: rho = 100. # density
>>> In [7]: nu = ast.visc_rad_kap_sc(T,rho,X)
>>> In [8]: Re=v*l/nu
>>> In [9]: print "Re_rad = "+str('%g'%Re)
>>> Re_rad = 4.43512e+08
Notes
-----
Eqn. 14' in Schatzman, 1977, assume electron scattering opacity
kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2) |
def list_images(self, repository_name, registry_id=None):
repository = None
found = False
if repository_name in self.repositories:
repository = self.repositories[repository_name]
if registry_id:
if repository.registry_id == registry_id:
found = True
else:
found = True
if not found:
raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID)
images = []
for image in repository.images:
images.append(image)
return images | maxResults and filtering not implemented |
def trim_docstring(docstring):
if not docstring:
return
lines = docstring.expandtabs().splitlines()
| Taken from http://www.python.org/dev/peps/pep-0257/ |
def list_namespaced_pod(self, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.list_namespaced_pod_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_pod_with_http_info(namespace, **kwargs)
return data | list or watch objects of kind Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_pod(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1PodList
If the method is called asynchronously,
returns the request thread. |
def get_request_date(cls, req):
date = None
for header in [, ]:
if header not in req.headers:
continue
try:
date_str = cls.parse_date(req.headers[header])
except DateFormatError:
continue
try:
date = datetime.datetime.strptime(date_str, ).date()
except ValueError:
continue
else:
break
return date | Try to pull a date from the request by looking first at the
x-amz-date header, and if that's not present then the Date header.
Return a datetime.date object, or None if neither date header
is found or is in a recognisable format.
req -- a requests PreparedRequest object |
def find_methods(self, classname=".*", methodname=".*", descriptor=".*",
accessflags=".*", no_external=False):
for cname, c in self.classes.items():
if re.match(classname, cname):
for m in c.get_methods():
z = m.get_method()
if no_external and isinstance(z, ExternalMethod):
continue
if re.match(methodname, z.get_name()) and \
re.match(descriptor, z.get_descriptor()) and \
re.match(accessflags, z.get_access_flags_string()):
yield m | Find a method by name using regular expression.
This method will return all MethodClassAnalysis objects, which match the
classname, methodname, descriptor and accessflags of the method.
:param classname: regular expression for the classname
:param methodname: regular expression for the method name
:param descriptor: regular expression for the descriptor
:param accessflags: regular expression for the accessflags
:param no_external: Remove external method from the output (default False)
:rtype: generator of `MethodClassAnalysis` |
def set_used_labels(self, labels):
mask = np.zeros(self.cl_.size, dtype=bool)
for l in labels:
mask = np.logical_or(mask, self.cl_ == l)
self.trial_mask_ = mask
return self | Specify which trials to use in subsequent analysis steps.
This function masks trials based on their class labels.
Parameters
----------
labels : list of class labels
Marks all trials that have a label that is in the `labels` list for further processing.
Returns
-------
self : Workspace
The Workspace object. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.