Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
389,100 | def _build_action_bound_constraints_table(self):
self.action_lower_bound_constraints = {}
self.action_upper_bound_constraints = {}
for name, preconds in self.local_action_preconditions.items():
for precond in preconds:
expr_type = precond.etype
expr_args = precond.args
bounds_expr = None
if expr_type == (, ):
inner_expr = expr_args[1]
if inner_expr.etype[0] == :
bounds_expr = inner_expr
elif expr_type[0] == :
bounds_expr = precond
if bounds_expr:
bound = self._extract_lower_bound(name, bounds_expr)
if bound is not None:
self.action_lower_bound_constraints[name] = bound
else:
bound = self._extract_upper_bound(name, bounds_expr)
if bound is not None:
self.action_upper_bound_constraints[name] = bound | Builds the lower and upper action bound constraint expressions. |
389,101 | def insert(self, key, value, ttl=0, format=None, persist_to=0, replicate_to=0):
return _Base.insert(self, key, value, ttl=ttl, format=format,
persist_to=persist_to, replicate_to=replicate_to) | Store an object in Couchbase unless it already exists.
Follows the same conventions as :meth:`upsert` but the value is
stored only if it does not exist already. Conversely, the value
is not stored if the key already exists.
Notably missing from this method is the `cas` parameter, this is
because `insert` will only succeed if a key does not already
exist on the server (and thus can have no CAS)
:raise: :exc:`.KeyExistsError` if the key already exists
.. seealso:: :meth:`upsert`, :meth:`insert_multi` |
389,102 | def cluster(self, method, **kwargs):
from eqcorrscan.utils import clustering
tribes = []
func = getattr(clustering, method)
if method in [, ]:
cat = Catalog([t.event for t in self.templates])
groups = func(cat, **kwargs)
for group in groups:
new_tribe = Tribe()
for event in group:
new_tribe.templates.extend([t for t in self.templates
if t.event == event])
tribes.append(new_tribe)
return tribes | Cluster the tribe.
Cluster templates within a tribe: returns multiple tribes each of
which could be stacked.
:type method: str
:param method:
Method of stacking, see :mod:`eqcorrscan.utils.clustering`
:return: List of tribes.
.. rubric:: Example |
389,103 | def get_box_files(self, box_key):
uri = .join([self.api_uri,
self.boxes_suffix,
box_key,
self.files_suffix
])
return self._req(, uri) | Gets to file infos in a single box.
Args:
box_key key for the file
return (status code, list of file info dicts) |
389,104 | def get_reply_visibility(self, status_dict):
visibility = ("public", "unlisted", "private", "direct")
default_visibility = visibility.index(self.default_visibility)
status_visibility = visibility.index(status_dict["visibility"])
return visibility[max(default_visibility, status_visibility)] | Given a status dict, return the visibility that should be used.
This behaves like Mastodon does by default. |
389,105 | def has_args():
no_args_syntax =
args_syntax = no_args_syntax +
args, no_args = [(-1,-1)], [(-1,-1)]
for i, line in enumerate(Overload.traceback_lines()):
if args_syntax in line:
args.append((i, line.find(args_syntax)))
if no_args_syntax in line:
no_args.append((i, line.find(no_args_syntax)))
args, no_args = max(args), max(no_args)
if sum(args)+sum(no_args) == -4:
return False
return args >= no_args | returns true if the decorator invocation
had arguments passed to it before being
sent a function to decorate |
389,106 | def getoptS(X, Y, M_E, E):
n, r = X.shape
C = np.dot(np.dot(X.T, M_E), Y)
C = C.flatten()
A = np.zeros((r * r, r * r))
for i in range(r):
for j in range(r):
ind = j * r + i
temp = np.dot(
np.dot(X.T, np.dot(X[:, i, None], Y[:, j, None].T) * E), Y)
A[:, ind] = temp.flatten()
S = np.linalg.solve(A, C)
return np.reshape(S, (r, r)).T | Find Sopt given X, Y |
389,107 | def clear_plot(self):
self.tab_plot.clear()
self.tab_plot.draw()
self.save_plot.set_enabled(False) | Clear plot display. |
389,108 | def get_score(self, fmap=, importance_type=):
if getattr(self, , None) is not None and self.booster not in {, }:
raise ValueError(
.format(self.booster))
allowed_importance_types = [, , , , ]
if importance_type not in allowed_importance_types:
msg = ("importance_type mismatch, got , expected one of " +
repr(allowed_importance_types))
raise ValueError(msg.format(importance_type))
fmap[fid] = 1
else:
fmap[fid] += 1
return fmap
average_over_splits = True
if importance_type == :
importance_type =
average_over_splits = False
elif importance_type == :
importance_type =
average_over_splits = False
trees = self.get_dump(fmap, with_stats=True)
importance_type +=
fmap = {}
gmap = {}
for tree in trees:
for line in tree.split():
arr = line.split()
if len(arr) == 1:
continue
fid = arr[1].split()
g = float(fid[1].split(importance_type)[1].split()[0])
fid = fid[0].split()[0]
if fid not in fmap:
fmap[fid] = 1
gmap[fid] = g
else:
fmap[fid] += 1
gmap[fid] += g
if average_over_splits:
for fid in gmap:
gmap[fid] = gmap[fid] / fmap[fid]
return gmap | Get feature importance of each feature.
Importance type can be defined as:
* 'weight': the number of times a feature is used to split the data across all trees.
* 'gain': the average gain across all splits the feature is used in.
* 'cover': the average coverage across all splits the feature is used in.
* 'total_gain': the total gain across all splits the feature is used in.
* 'total_cover': the total coverage across all splits the feature is used in.
.. note:: Feature importance is defined only for tree boosters
Feature importance is only defined when the decision tree model is chosen as base
learner (`booster=gbtree`). It is not defined for other base learner types, such
as linear learners (`booster=gblinear`).
Parameters
----------
fmap: str (optional)
The name of feature map file.
importance_type: str, default 'weight'
One of the importance types defined above. |
389,109 | def power_down(self):
GPIO.output(self._pd_sck, False)
GPIO.output(self._pd_sck, True)
time.sleep(0.01)
return True | turn off the HX711
:return: always True
:rtype bool |
389,110 | def qop(self):
def on_update(header_set):
if not header_set and in self:
del self[]
elif header_set:
self[] = header_set.to_header()
return parse_set_header(self.get(), on_update) | Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth. |
389,111 | def kmer_counter(seq, k=4):
if isinstance(seq, basestring):
return Counter(generate_kmers(seq, k)) | Return a sequence of all the unique substrings (k-mer or q-gram) within a short (<128 symbol) string
Used for algorithms like UniqTag for genome unique identifier locality sensitive hashing.
jellyfish is a C implementation of k-mer counting
If seq is a string generate a sequence of k-mer string
If seq is a sequence of strings then generate a sequence of generators or sequences of k-mer strings
If seq is a sequence of sequences of strings generate a sequence of sequence of generators ...
Default k = 4 because that's the length of a gene base-pair?
>>> kmer_counter('AGATAGATAGACACAGAAATGGGACCACAC') == Counter({'ACAC': 2, 'ATAG': 2, 'CACA': 2,
... 'TAGA': 2, 'AGAT': 2, 'GATA': 2, 'AGAC': 1, 'ACAG': 1, 'AGAA': 1, 'AAAT': 1, 'TGGG': 1, 'ATGG': 1,
... 'ACCA': 1, 'GGAC': 1, 'CCAC': 1, 'CAGA': 1, 'GAAA': 1, 'GGGA': 1, 'GACA': 1, 'GACC': 1, 'AATG': 1})
True |
389,112 | def _allocate_address(self, instance, network_ids):
with OpenStackCloudProvider.__node_start_lock:
try:
free_ips = [ip for ip in self.nova_client.floating_ips.list() if not ip.fixed_ip]
if not free_ips:
free_ips.append(self.nova_client.floating_ips.create())
except AttributeError:
free_ips = [ip for ip in
self.neutron_client.list_floatingips(fixed_ip_address=)[]
if ip[] is None]
if not free_ips:
except BadNeutronRequest as err:
log.debug(
"Failed allocating floating IP on network %s: %s",
network_id, err)
if allocated_ip:
free_ips.append(allocated_ip)
break
else:
continue
if free_ips:
ip = free_ips.pop()
else:
raise RuntimeError(
"Could not allocate floating IP for VM {0}"
.format(vm.id))
instance.add_floating_ip(ip)
return ip.ip | Allocates a floating/public ip address to the given instance.
:param instance: instance to assign address to
:param list network_id: List of IDs (as strings) of networks where to
request allocation the floating IP.
:return: public ip address |
389,113 | def DeleteDatabase(self, database_link, options=None):
if options is None:
options = {}
path = base.GetPathFromLink(database_link)
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
return self.DeleteResource(path,
,
database_id,
None,
options) | Deletes a database.
:param str database_link:
The link to the database.
:param dict options:
The request options for the request.
:return:
The deleted Database.
:rtype:
dict |
389,114 | def get_masters(ppgraph):
masters = {}
for protein, peps in ppgraph.items():
ismaster = True
peps = set(peps)
multimaster = set()
for subprotein, subpeps in ppgraph.items():
if protein == subprotein:
continue
if peps.issubset(subpeps):
if peps.union(subpeps) > peps:
ismaster = False
break
elif peps.intersection(subpeps) == peps:
multimaster.update({protein, subprotein})
if not ismaster:
continue
elif multimaster:
premaster = sorted(list(multimaster))[0]
else:
premaster = protein
for pep in peps:
try:
masters[pep].add(premaster)
except KeyError:
masters[pep] = {premaster}
return masters | From a protein-peptide graph dictionary (keys proteins,
values peptides), return master proteins aka those which
have no proteins whose peptides are supersets of them.
If shared master proteins are found, report only the first,
we will sort the whole proteingroup later anyway. In that
case, the master reported here may be temporary. |
389,115 | def construct_codons_dict(alphabet_file = None):
c_symbol = line.split(, 1)[0].strip(.join(protected_symbols))
if symbol in codons_dict.keys():
print symbol + " is already used as an symbol for codons: "
print codons_dict[symbol]
continue
elif not len(symbol) == 1:
print "Canamino acidt trigger due to the stripping of protected symbols.
print symbol + " is a protected character"
current_codon_collection = set()
for x in expanded_alphabet[symbol]:
if x in codons_dict.keys():
current_codon_collection = current_codon_collection.union(codons_dict[x])
elif x.upper() in codons:
current_codon_collection.add(x.upper())
elif len(x) == 0:
continue
else:
continue
codons_dict[symbol] = list(current_codon_collection)
return codons_dict | Generate the sub_codons_right dictionary of codon suffixes.
syntax of custom alphabet_files:
char: list,of,amino,acids,or,codons,separated,by,commas
Parameters
----------
alphabet_file : str
File name for a custom alphabet definition. If no file is provided, the
default alphabet is used, i.e. standard amino acids, undetermined amino
acids (B, J, X, and Z), and single codon symbols.
Returns
-------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol. |
389,116 | def singleton(*args, **kwargs):
def decorator(cls: type) -> Callable[[], object]:
if issubclass(type(cls), _SingletonMetaClassBase):
raise TypeError()
box = _Box()
factory = None
lock = Lock()
def metaclass_call(_):
if box.value is None:
with lock:
if box.value is None:
instance = cls(*args, **kwargs)
instance.__class__ = factory
box.value = (instance, )
return box.value[0]
def _is_init(*_):
return box.value is not None
SingletonMetaClass = type(, (type(cls), _SingletonMetaClassBase), {
: (),
: metaclass_call
})
factory = SingletonMetaClass(cls.__name__, (cls, ), {
: (),
: _is_init
})
return update_wrapper(factory, cls, updated=())
return decorator | a lazy init singleton pattern.
usage:
``` py
@singleton()
class X: ...
```
`args` and `kwargs` will pass to ctor of `X` as args. |
389,117 | def get_icloud_folder_location():
yosemite_icloud_path =
icloud_home = os.path.expanduser(yosemite_icloud_path)
if not os.path.isdir(icloud_home):
error()
return str(icloud_home) | Try to locate the iCloud Drive folder.
Returns:
(str) Full path to the iCloud Drive folder. |
389,118 | def unassigned(data, as_json=False):
no_subusers = set()
if not isinstance(data, list):
return format_ret(no_subusers, as_json=as_json)
for current in data:
num_subusers = len(current["subusers"])
if num_subusers == 0:
current_ip = current["ip"]
no_subusers.add(current_ip)
ret_val = format_ret(no_subusers, as_json=as_json)
return ret_val | https://sendgrid.com/docs/API_Reference/api_v3.html#ip-addresses
The /ips rest endpoint returns information about the IP addresses
and the usernames assigned to an IP
unassigned returns a listing of the IP addresses that are allocated
but have 0 users assigned
data (response.body from sg.client.ips.get())
as_json False -> get list of dicts
True -> get json object
example:
sg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
params = {
'subuser': 'test_string',
'ip': 'test_string',
'limit': 1,
'exclude_whitelabels':
'true', 'offset': 1
}
response = sg.client.ips.get(query_params=params)
if response.status_code == 201:
data = response.body
unused = unassigned(data) |
389,119 | def count_objects_by_tags(self, metric, scraper_config):
config = self.object_count_params[metric.name]
metric_name = "{}.{}".format(scraper_config[], config[])
object_counter = Counter()
for sample in metric.samples:
tags = [
self._label_to_tag(l, sample[self.SAMPLE_LABELS], scraper_config) for l in config[]
] + scraper_config[]
object_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(object_counter):
self.gauge(metric_name, count, tags=list(tags)) | Count objects by whitelisted tags and submit counts as gauges. |
389,120 | def _set_copy(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=copy.copy, is_container=, presence=False, yang_name="copy", rest_name="copy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__copy = t
if hasattr(self, ):
self._set() | Setter method for copy, mapped from YANG variable /copy (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_copy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_copy() directly. |
389,121 | def has_subdirectories(path, include, exclude, show_all):
try:
return len( listdir(path, include, exclude,
show_all, folders_only=True) ) > 1
except (IOError, OSError):
return False | Return True if path has subdirectories |
389,122 | def rvs(self, size=1, param=None):
if param is not None:
dtype = [(param, float)]
else:
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
for (p,_) in dtype:
log_high = numpy.log10(self._bounds[p][0])
log_low = numpy.log10(self._bounds[p][1])
arr[p] = 10.0**(numpy.random.uniform(log_low, log_high, size=size))
return arr | Gives a set of random values drawn from this distribution.
Parameters
----------
size : {1, int}
The number of values to generate; default is 1.
param : {None, string}
If provided, will just return values for the given parameter.
Otherwise, returns random values for each parameter.
Returns
-------
structured array
The random values in a numpy structured array. If a param was
specified, the array will only have an element corresponding to the
given parameter. Otherwise, the array will have an element for each
parameter in self's params. |
389,123 | def gifs_categories_category_get(self, api_key, category, **kwargs):
kwargs[] = True
if kwargs.get():
return self.gifs_categories_category_get_with_http_info(api_key, category, **kwargs)
else:
(data) = self.gifs_categories_category_get_with_http_info(api_key, category, **kwargs)
return data | Category Tags Endpoint.
Returns a list of tags for a given category. NOTE `limit` and `offset` must both be set; otherwise they're ignored.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.gifs_categories_category_get(api_key, category, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str api_key: Giphy API Key. (required)
:param str category: Filters results by category. (required)
:param int limit: The maximum number of records to return.
:param int offset: An optional results offset. Defaults to 0.
:return: InlineResponse2004
If the method is called asynchronously,
returns the request thread. |
389,124 | def get_hash(self):
if self.__index_hash:
return self.__index_hash
key = self.request.method
key += URLHelper.get_protocol(self.request.url)
key += URLHelper.get_subdomain(self.request.url)
key += URLHelper.get_hostname(self.request.url)
key += URLHelper.get_tld(self.request.url)
key += URLHelper.get_path(self.request.url)
key += str(URLHelper.get_ordered_params(self.request.url))
if self.request.data is not None:
key += str(self.request.data.keys())
self.__index_hash = key
return self.__index_hash | Generate and return the dict index hash of the given queue item.
Note:
Cookies should not be included in the hash calculation because
otherwise requests are crawled multiple times with e.g. different
session keys, causing infinite crawling recursion.
Note:
At this moment the keys do not actually get hashed since it works perfectly without and
since hashing the keys requires us to built hash collision management.
Returns:
str: The hash of the given queue item. |
389,125 | def page_strip(page, versioned):
page.pop(, None)
contents_key = versioned and or
contents = page.get(contents_key, ())
if versioned:
keys = []
for k in contents:
if k[]:
keys.append((k[], k[], True))
else:
keys.append((k[], k[]))
return keys
else:
return [k[] for k in contents]
if not contents:
return page
for k in contents:
k.pop(, None)
k.pop(, None)
k.pop(, None)
k.pop(, None)
k.pop(, None)
return page | Remove bits in content results to minimize memory utilization.
TODO: evolve this to a key filter on metadata, like date |
389,126 | def _wrapinstance(ptr, base=None):
assert isinstance(ptr, long), "Argument must be of type <long>"
assert (base is None) or issubclass(base, Qt.QtCore.QObject), (
"Argument must be of type <QObject>")
if Qt.IsPyQt4 or Qt.IsPyQt5:
func = getattr(Qt, "_sip").wrapinstance
elif Qt.IsPySide2:
func = getattr(Qt, "_shiboken2").wrapInstance
elif Qt.IsPySide:
func = getattr(Qt, "_shiboken").wrapInstance
else:
raise AttributeError(" has no attribute ")
if base is None:
q_object = func(long(ptr), Qt.QtCore.QObject)
meta_object = q_object.metaObject()
class_name = meta_object.className()
super_class_name = meta_object.superClass().className()
if hasattr(Qt.QtWidgets, class_name):
base = getattr(Qt.QtWidgets, class_name)
elif hasattr(Qt.QtWidgets, super_class_name):
base = getattr(Qt.QtWidgets, super_class_name)
else:
base = Qt.QtCore.QObject
return func(long(ptr), base) | Enable implicit cast of pointer to most suitable class
This behaviour is available in sip per default.
Based on http://nathanhorne.com/pyqtpyside-wrap-instance
Usage:
This mechanism kicks in under these circumstances.
1. Qt.py is using PySide 1 or 2.
2. A `base` argument is not provided.
See :func:`QtCompat.wrapInstance()`
Arguments:
ptr (long): Pointer to QObject in memory
base (QObject, optional): Base class to wrap with. Defaults to QObject,
which should handle anything. |
389,127 | def _init_client():
global client, path_prefix
if client is not None:
return
etcd_kwargs = {
: __opts__.get(, ),
: __opts__.get(, 2379),
: __opts__.get(, ),
: __opts__.get(, True),
: __opts__.get(, False),
: __opts__.get(, None),
: __opts__.get(, 60),
: __opts__.get(, None),
: __opts__.get(, None),
: __opts__.get(, None),
: __opts__.get(, None),
}
path_prefix = __opts__.get(, _DEFAULT_PATH_PREFIX)
if path_prefix != "":
path_prefix = .format(path_prefix.strip())
log.info("etcd: Setting up client with params: %r", etcd_kwargs)
client = etcd.Client(**etcd_kwargs)
try:
client.read(path_prefix)
except etcd.EtcdKeyNotFound:
log.info("etcd: Creating dir %r", path_prefix)
client.write(path_prefix, None, dir=True) | Setup client and init datastore. |
389,128 | def logtrick_minimizer(minimizer):
r
@wraps(minimizer)
def new_minimizer(fun, x0, jac=True, bounds=None, **minimizer_kwargs):
if bounds is None:
return minimizer(fun, x0, jac=jac, bounds=bounds,
**minimizer_kwargs)
logx, expx, gradx, bounds = _logtrick_gen(bounds)
if callable(jac):
def new_jac(x, *fargs, **fkwargs):
return gradx(jac(expx(x), *fargs, **fkwargs), x)
else:
new_jac = jac
if (not callable(jac)) and bool(jac):
def new_fun(x, *fargs, **fkwargs):
o, g = fun(expx(x), *fargs, **fkwargs)
return o, gradx(g, x)
else:
def new_fun(x, *fargs, **fkwargs):
return fun(expx(x), *fargs, **fkwargs)
result = minimizer(new_fun, logx(x0), jac=new_jac, bounds=bounds,
**minimizer_kwargs)
result[] = expx(result[])
return result
return new_minimizer | r"""
Log-Trick decorator for optimizers.
This decorator implements the "log trick" for optimizing positive bounded
variables. It will apply this trick for any variables that correspond to a
Positive() bound.
Examples
--------
>>> from scipy.optimize import minimize as sp_min
>>> from ..btypes import Bound, Positive
Here is an example where we may want to enforce a particular parameter or
parameters to be strictly greater than zero,
>>> def cost(w, lambda_):
... sq_norm = w.T.dot(w)
... return .5 * lambda_ * sq_norm, lambda_ * w
Now let's enforce that the `w` are positive,
>>> bounds = [Positive(), Positive(), Positive()]
>>> new_min = logtrick_minimizer(sp_min)
Initial values
>>> w_0 = np.array([.5, .1, .2])
>>> lambda_0 = .25
>>> res = new_min(cost, w_0, args=(lambda_0,), bounds=bounds,
... method='L-BFGS-B', jac=True)
>>> res.x >= 0
array([ True, True, True], dtype=bool)
Note
----
This decorator only works on unstructured optimizers. However, it can be
use with structured_minimizer, so long as it is the inner wrapper. |
389,129 | def get_path(self):
md5_hash = hashlib.md5(self.task_id.encode()).hexdigest()
logger.debug(, md5_hash, self.task_id)
return os.path.join(self.temp_dir, str(self.unique.value), md5_hash) | Returns a temporary file path based on a MD5 hash generated with the task's name and its arguments |
389,130 | def diri(table):
t = []
for i in table:
a = [j + 1 for j in i]
t.append(np.ndarray.tolist(np.random.mtrand.dirichlet(a)))
return t | from SparCC - "randomly draw from the corresponding posterior
Dirichlet distribution with a uniform prior" |
389,131 | def _Build(self, storage_file):
self._index = {}
for event_tag in storage_file.GetEventTags():
self.SetEventTag(event_tag) | Builds the event tag index.
Args:
storage_file (BaseStorageFile): storage file. |
389,132 | def process_tls(self, data, name):
ret = []
try:
lines = [x.strip() for x in data.split()]
for idx, line in enumerate(lines):
if line == :
continue
sub = self.process_host(line, name, idx)
if sub is not None:
ret.append(sub)
except Exception as e:
logger.error( % (name, e))
self.roca.trace_logger.log(e)
return ret | Remote TLS processing - one address:port per line
:param data:
:param name:
:return: |
389,133 | def get_sdk_dir(self):
try:
return self._sdk_dir
except AttributeError:
sdk_dir = self.find_sdk_dir()
self._sdk_dir = sdk_dir
return sdk_dir | Return the MSSSDK given the version string. |
389,134 | def fetch(bank, key):
c_key = .format(bank, key)
try:
_, value = api.kv.get(c_key)
if value is None:
return {}
return __context__[].loads(value[])
except Exception as exc:
raise SaltCacheError(
.format(
c_key, exc
)
) | Fetch a key value. |
389,135 | def pluralize(data_type):
known = {
u"address": u"addresses",
u"company": u"companies"
}
if data_type in known.keys():
return known[data_type]
else:
return u"%ss" % data_type | adds s to the data type or the correct english plural form |
389,136 | def __get_pid_by_scanning(self):
dwProcessId = None
dwThreadId = self.get_tid()
with win32.CreateToolhelp32Snapshot(win32.TH32CS_SNAPTHREAD) as hSnapshot:
te = win32.Thread32First(hSnapshot)
while te is not None:
if te.th32ThreadID == dwThreadId:
dwProcessId = te.th32OwnerProcessID
break
te = win32.Thread32Next(hSnapshot)
if dwProcessId is None:
msg = "Cannot find thread ID %d in any process" % dwThreadId
raise RuntimeError(msg)
return dwProcessId | Internally used by get_pid(). |
389,137 | def create_app(config_name):
app = Flask(__name__)
app.config.from_object(CONFIG[config_name])
BOOTSTRAP.init_app(app)
from flask_seguro.controllers.main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app | Factory Function |
389,138 | async def promote_chat_member(self, chat_id: typing.Union[base.Integer, base.String],
user_id: base.Integer,
can_change_info: typing.Union[base.Boolean, None] = None,
can_post_messages: typing.Union[base.Boolean, None] = None,
can_edit_messages: typing.Union[base.Boolean, None] = None,
can_delete_messages: typing.Union[base.Boolean, None] = None,
can_invite_users: typing.Union[base.Boolean, None] = None,
can_restrict_members: typing.Union[base.Boolean, None] = None,
can_pin_messages: typing.Union[base.Boolean, None] = None,
can_promote_members: typing.Union[base.Boolean, None] = None) -> base.Boolean:
payload = generate_payload(**locals())
result = await self.request(api.Methods.PROMOTE_CHAT_MEMBER, payload)
return result | Use this method to promote or demote a user in a supergroup or a channel.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Pass False for all boolean parameters to demote a user.
Source: https://core.telegram.org/bots/api#promotechatmember
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param can_change_info: Pass True, if the administrator can change chat title, photo and other settings
:type can_change_info: :obj:`typing.Union[base.Boolean, None]`
:param can_post_messages: Pass True, if the administrator can create channel posts, channels only
:type can_post_messages: :obj:`typing.Union[base.Boolean, None]`
:param can_edit_messages: Pass True, if the administrator can edit messages of other users, channels only
:type can_edit_messages: :obj:`typing.Union[base.Boolean, None]`
:param can_delete_messages: Pass True, if the administrator can delete messages of other users
:type can_delete_messages: :obj:`typing.Union[base.Boolean, None]`
:param can_invite_users: Pass True, if the administrator can invite new users to the chat
:type can_invite_users: :obj:`typing.Union[base.Boolean, None]`
:param can_restrict_members: Pass True, if the administrator can restrict, ban or unban chat members
:type can_restrict_members: :obj:`typing.Union[base.Boolean, None]`
:param can_pin_messages: Pass True, if the administrator can pin messages, supergroups only
:type can_pin_messages: :obj:`typing.Union[base.Boolean, None]`
:param can_promote_members: Pass True, if the administrator can add new administrators
with a subset of his own privileges or demote administrators that he has promoted,
directly or indirectly (promoted by administrators that were appointed by him)
:type can_promote_members: :obj:`typing.Union[base.Boolean, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean` |
389,139 | def copy(
ctx,
opts,
owner_repo_package,
destination,
skip_errors,
wait_interval,
no_wait_for_sync,
sync_attempts,
):
owner, source, slug = owner_repo_package
click.echo(
"Copying %(slug)s package from %(source)s to %(dest)s ... "
% {
"slug": click.style(slug, bold=True),
"source": click.style(source, bold=True),
"dest": click.style(destination, bold=True),
},
nl=False,
)
context_msg = "Failed to copy package!"
with handle_api_exceptions(
ctx, opts=opts, context_msg=context_msg, reraise_on_error=skip_errors
):
with maybe_spinner(opts):
_, new_slug = copy_package(
owner=owner, repo=source, identifier=slug, destination=destination
)
click.secho("OK", fg="green")
if no_wait_for_sync:
return
wait_for_package_sync(
ctx=ctx,
opts=opts,
owner=owner,
repo=destination,
slug=new_slug,
wait_interval=wait_interval,
skip_errors=skip_errors,
attempts=sync_attempts,
) | Copy a package to another repository.
This requires appropriate permissions for both the source
repository/package and the destination repository.
- OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the
REPO name where the package is stored, and the PACKAGE name (slug) of the
package itself. All separated by a slash.
Example: 'your-org/awesome-repo/better-pkg'.
- DEST: Specify the DEST (destination) repository to copy the package to.
This *must* be in the same namespace as the source repository.
Example: 'other-repo'
Full CLI example:
$ cloudsmith cp your-org/awesome-repo/better-pkg other-repo |
389,140 | def _has_fileno(stream):
try:
stream.fileno()
except (AttributeError, OSError, IOError, io.UnsupportedOperation):
return False
return True | Returns whether the stream object seems to have a working fileno()
Tells whether _redirect_stderr is likely to work.
Parameters
----------
stream : IO stream object
Returns
-------
has_fileno : bool
True if stream.fileno() exists and doesn't raise OSError or
UnsupportedOperation |
389,141 | def log_interp1d(self, xx, yy, kind=):
logx = np.log10(xx)
logy = np.log10(yy)
lin_interp = interp1d(logx, logy, kind=kind)
log_interp = lambda zz: np.power(10.0, lin_interp(np.log10(zz)))
return log_interp | Performs a log space 1d interpolation.
:param xx: the x values.
:param yy: the y values.
:param kind: the type of interpolation to apply (as per scipy interp1d)
:return: the interpolation function. |
389,142 | def protocol_version_to_kmip_version(value):
if not isinstance(value, ProtocolVersion):
return None
if value.major == 1:
if value.minor == 0:
return enums.KMIPVersion.KMIP_1_0
elif value.minor == 1:
return enums.KMIPVersion.KMIP_1_1
elif value.minor == 2:
return enums.KMIPVersion.KMIP_1_2
elif value.minor == 3:
return enums.KMIPVersion.KMIP_1_3
elif value.minor == 4:
return enums.KMIPVersion.KMIP_1_4
else:
return None
else:
return None | Convert a ProtocolVersion struct to its KMIPVersion enumeration equivalent.
Args:
value (ProtocolVersion): A ProtocolVersion struct to be converted into
a KMIPVersion enumeration.
Returns:
KMIPVersion: The enumeration equivalent of the struct. If the struct
cannot be converted to a valid enumeration, None is returned. |
389,143 | def plot_element_profile(self, element, comp, show_label_index=None,
xlim=5):
plt = pretty_plot(12, 8)
pd = self._pd
evolution = pd.get_element_profile(element, comp)
num_atoms = evolution[0]["reaction"].reactants[0].num_atoms
element_energy = evolution[0][]
for i, d in enumerate(evolution):
v = -(d["chempot"] - element_energy)
print ("index= %s, -\u0394\u03BC=%.4f(eV)," % (i, v), d["reaction"])
if i != 0:
plt.plot([x2, x2], [y1, d["evolution"] / num_atoms],
, linewidth=2.5)
x1 = v
y1 = d["evolution"] / num_atoms
if i != len(evolution) - 1:
x2 = - (evolution[i + 1]["chempot"] - element_energy)
else:
x2 = 5.0
if show_label_index is not None and i in show_label_index:
products = [re.sub(r"(\d+)", r"$_{\1}$", p.reduced_formula)
for p in d["reaction"].products
if p.reduced_formula != element.symbol]
plt.annotate(", ".join(products), xy=(v + 0.05, y1 + 0.05),
fontsize=24, color=)
plt.plot([x1, x2], [y1, y1], , linewidth=3)
else:
plt.plot([x1, x2], [y1, y1], , linewidth=2.5)
plt.xlim((0, xlim))
plt.xlabel("-$\\Delta{\\mu}$ (eV)")
plt.ylabel("Uptake per atom")
return plt | Draw the element profile plot for a composition varying different
chemical potential of an element.
X value is the negative value of the chemical potential reference to
elemental chemical potential. For example, if choose Element("Li"),
X= -(µLi-µLi0), which corresponds to the voltage versus metal anode.
Y values represent for the number of element uptake in this composition
(unit: per atom). All reactions are printed to help choosing the
profile steps you want to show label in the plot.
Args:
element (Element): An element of which the chemical potential is
considered. It also must be in the phase diagram.
comp (Composition): A composition.
show_label_index (list of integers): The labels for reaction products
you want to show in the plot. Default to None (not showing any
annotation for reaction products). For the profile steps you want
to show the labels, just add it to the show_label_index. The
profile step counts from zero. For example, you can set
show_label_index=[0, 2, 5] to label profile step 0,2,5.
xlim (float): The max x value. x value is from 0 to xlim. Default to
5 eV.
Returns:
Plot of element profile evolution by varying the chemical potential
of an element. |
389,144 | def create_presentation(self):
if not self.overwrite and os.path.exists(self.output):
raise ConversionError("File %s already exist and --overwrite not specified" % self.output)
video = self.download_video()
raw_slides = self.download_slides()
png_slides = self._convert_slides(raw_slides)
frame_pattern = self._prepare_frames(png_slides)
return self._assemble(video, frame_pattern) | Create the presentation.
The audio track is mixed with the slides. The resulting file is saved as self.output
DownloadError is raised if some resources cannot be fetched.
ConversionError is raised if the final video cannot be created. |
389,145 | async def getStickerSet(self, name):
p = _strip(locals())
return await self._api_request(, _rectify(p)) | See: https://core.telegram.org/bots/api#getstickerset |
389,146 | def fromDatetime(klass, dtime):
self = klass.__new__(klass)
if dtime.tzinfo is not None:
self._time = dtime.astimezone(FixedOffset(0, 0)).replace(tzinfo=None)
else:
self._time = dtime
self.resolution = datetime.timedelta.resolution
return self | Return a new Time instance from a datetime.datetime instance.
If the datetime instance does not have an associated timezone, it is
assumed to be UTC. |
389,147 | def fit(self, Z, **fit_params):
Zt, fit_params = self._pre_transform(Z, **fit_params)
self.steps[-1][-1].fit(Zt, **fit_params)
Zt.unpersist()
return self | Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
Z : ArrayRDD, TupleRDD or DictRDD
Input data in blocked distributed format.
Returns
-------
self : SparkPipeline |
389,148 | def request(self, method, url, params=None, **aio_kwargs):
oparams = {
: self.consumer_key,
: sha1(str(RANDOM()).encode()).hexdigest(),
: self.signature.name,
: str(int(time.time())),
: self.version,
}
oparams.update(params or {})
if self.oauth_token:
oparams[] = self.oauth_token
url = self._get_url(url)
if urlsplit(url).query:
raise ValueError(
)
oparams[] = self.signature.sign(
self.consumer_secret, method, url,
oauth_token_secret=self.oauth_token_secret, **oparams)
self.logger.debug("%s %s", url, oparams)
return self._request(method, url, params=oparams, **aio_kwargs) | Make a request to provider. |
389,149 | def heading_title(self):
art_title = self.article.root.xpath()[0]
article_title = deepcopy(art_title)
article_title.tag =
article_title.attrib[] =
article_title.attrib[] =
return article_title | Makes the Article Title for the Heading.
Metadata element, content derived from FrontMatter |
389,150 | def create_new_dispatch(self, dispatch):
self._validate_uuid(dispatch.dispatch_id)
url = "/notification/v1/dispatch"
post_response = NWS_DAO().postURL(
url, self._write_headers(), self._json_body(dispatch.json_data()))
if post_response.status != 200:
raise DataFailureException(
url, post_response.status, post_response.data)
return post_response.status | Create a new dispatch
:param dispatch:
is the new dispatch that the client wants to create |
389,151 | def _get_function_name(self, fn, default="None"):
if fn is None:
fn_name = default
else:
fn_name = fn.__name__
return fn_name | Return name of function, using default value if function not defined |
389,152 | def _get_ssh_client(self):
return ipa_utils.get_ssh_client(
self.instance_ip,
self.ssh_private_key_file,
self.ssh_user,
timeout=self.timeout
) | Return a new or existing SSH client for given ip. |
389,153 | def default(self, request, exception):
self.log(format_exc())
try:
url = repr(request.url)
except AttributeError:
url = "unknown"
response_message = "Exception occurred while handling uri: %s"
logger.exception(response_message, url)
if issubclass(type(exception), SanicException):
return text(
"Error: {}".format(exception),
status=getattr(exception, "status_code", 500),
headers=getattr(exception, "headers", dict()),
)
elif self.debug:
html_output = self._render_traceback_html(exception, request)
return html(html_output, status=500)
else:
return html(INTERNAL_SERVER_ERROR_HTML, status=500) | Provide a default behavior for the objects of :class:`ErrorHandler`.
If a developer chooses to extent the :class:`ErrorHandler` they can
provide a custom implementation for this method to behave in a way
they see fit.
:param request: Incoming request
:param exception: Exception object
:type request: :class:`sanic.request.Request`
:type exception: :class:`sanic.exceptions.SanicException` or
:class:`Exception`
:return: |
389,154 | def _nth_of_quarter(self, nth, day_of_week):
if nth == 1:
return self.first_of("quarter", day_of_week)
dt = self.replace(self.year, self.quarter * 3, 1)
last_month = dt.month
year = dt.year
dt = dt.first_of("quarter")
for i in range(nth - (1 if dt.day_of_week == day_of_week else 0)):
dt = dt.next(day_of_week)
if last_month < dt.month or year != dt.year:
return False
return self.set(self.year, dt.month, dt.day) | Modify to the given occurrence of a given day of the week
in the current quarter. If the calculated occurrence is outside,
the scope of the current quarter, then return False and no
modifications are made. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:type nth: int
:type day_of_week: int or None
:rtype: Date |
389,155 | def add(self, element, multiplicity=1):
if multiplicity < 1:
raise ValueError("Multiplicity must be positive")
self._elements[element] += multiplicity
self._total += multiplicity | Adds an element to the multiset.
>>> ms = Multiset()
>>> ms.add('a')
>>> sorted(ms)
['a']
An optional multiplicity can be specified to define how many of the element are added:
>>> ms.add('b', 2)
>>> sorted(ms)
['a', 'b', 'b']
This extends the :meth:`MutableSet.add` signature to allow specifying the multiplicity.
Args:
element:
The element to add to the multiset.
multiplicity:
The multiplicity i.e. count of elements to add. |
389,156 | def _unpack_basis_label_or_index(self, label_or_index):
self._check_basis_label_type(label_or_index)
if isinstance(label_or_index, str):
label = label_or_index
try:
ind = self.basis_labels.index(label)
except ValueError:
raise ValueError(
"%r is not one of the basis labels %r"
% (label, self.basis_labels))
elif isinstance(label_or_index, int):
ind = label_or_index
if ind < 0:
raise ValueError("Index %d must be >= 0" % ind)
if self.has_basis:
if ind >= self.dimension:
raise ValueError(
"Index %s must be < the dimension %d of Hilbert "
"space %s" % (ind, self.dimension, self))
label = self.basis_labels[label_or_index]
else:
label = str(label_or_index)
elif isinstance(label_or_index, SymbolicLabelBase):
label = label_or_index
try:
ind = label_or_index.fock_index
except AttributeError:
raise TypeError(
"label_or_index must define a fock_index attribute in "
"order to be used for identifying a level in a Hilbert "
"space")
else:
raise TypeError(
"label_or_index must be an int or str, or SymbolicLabelBase, "
"not %s" % type(label_or_index))
return label, ind | return tuple (label, ind) from `label_or_index`
If `label_or_int` is a :class:`.SymbolicLabelBase` sub-instance, it
will be stored in the `label` attribute, and the `ind` attribute will
return the value of the label's :attr:`.FockIndex.fock_index`
attribute. No checks are performed for symbolic labels.
:meth:`_check_basis_label_type` is called on `label_or_index`.
Raises:
ValueError: if `label_or_index` is a :class:`str` referencing an
invalid basis state; or, if `label_or_index` is an :class:`int`
< 0 or >= the dimension of the Hilbert space
BasisNotSetError: if `label_or_index` is a :class:`str`, but the
Hilbert space has no defined basis
TypeError: if `label_or_int` is not a :class:`str`, :class:`int`,
or :class:`.SymbolicLabelBase`, or more generally whatever
types are allowed through the `_basis_label_types` attribute of
the Hilbert space. |
389,157 | def p_primary_expr_no_brace_4(self, p):
if isinstance(p[2], self.asttypes.GroupingOp):
p[0] = p[2]
else:
p[0] = self.asttypes.GroupingOp(expr=p[2])
p[0].setpos(p) | primary_expr_no_brace : LPAREN expr RPAREN |
389,158 | def get_accent_string(string):
accents = list(filter(lambda accent: accent != Accent.NONE,
map(get_accent_char, string)))
return accents[-1] if accents else Accent.NONE | Get the first accent from the right of a string. |
389,159 | def is_modified(self):
if len(self.__modified_data__) or len(self.__deleted_fields__):
return True
for value in self.__original_data__.values():
try:
if value.is_modified():
return True
except AttributeError:
pass
return False | Returns whether model is modified or not |
389,160 | def ajax_preview(request, **kwargs):
data = {
"html": render_to_string("pinax/blog/_preview.html", {
"content": parse(request.POST.get("markup"))
})
}
return JsonResponse(data) | Currently only supports markdown |
389,161 | def from_array(self, coeffs, r0, errors=None, normalization=,
csphase=1, lmax=None, copy=True):
if _np.iscomplexobj(coeffs):
raise TypeError()
if type(normalization) != str:
raise ValueError(
.format(str(type(normalization))))
if normalization.lower() not in (, , , ):
raise ValueError(
"The normalization must be , , , "
"or . Input value was {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value was {:s}."
.format(repr(csphase))
)
if errors is not None:
if coeffs.shape != errors.shape:
raise ValueError(
"The shape of coeffs and errors must be the same."
"Shape of coeffs = {:s}, shape of errors = {:s}"
.format(repr(coeffs.shape), repr(coeffs.errors))
)
lmaxin = coeffs.shape[1] - 1
if lmax is None:
lmax = lmaxin
else:
if lmax > lmaxin:
lmax = lmaxin
if normalization.lower() == and lmax > 85:
_warnings.warn("Calculations using unnormalized coefficients "
"are stable only for degrees less than or equal "
"to 85. lmax for the coefficients will be set to "
"85. Input value was {:d}.".format(lmax),
category=RuntimeWarning)
lmax = 85
if errors is not None:
clm = SHMagRealCoeffs(coeffs[:, 0:lmax+1, 0:lmax+1], r0=r0,
errors=errors[:, 0:lmax+1, 0:lmax+1],
normalization=normalization.lower(),
csphase=csphase, copy=copy)
else:
clm = SHMagRealCoeffs(coeffs[:, 0:lmax+1, 0:lmax+1], r0=r0,
normalization=normalization.lower(),
csphase=csphase, copy=copy)
return clm | Initialize the class with spherical harmonic coefficients from an input
array.
Usage
-----
x = SHMagCoeffs.from_array(array, r0, [errors, normalization, csphase,
lmax, copy])
Returns
-------
x : SHMagCoeffs class instance.
Parameters
----------
array : ndarray, shape (2, lmaxin+1, lmaxin+1).
The input spherical harmonic coefficients.
r0 : float
The reference radius of the spherical harmonic coefficients.
errors : ndarray, optional, default = None
The uncertainties of the spherical harmonic coefficients.
normalization : str, optional, default = 'schmidt'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax : int, optional, default = None
The maximum spherical harmonic degree to include in the returned
class instance. This must be less than or equal to lmaxin.
copy : bool, optional, default = True
If True, make a copy of array when initializing the class instance.
If False, initialize the class instance with a reference to array.
Notes
-----
The coefficients in the input array are assumed to have units of nT. |
389,162 | def is_complete(self):
return all(p.name in self.values for p in self.parameters if p.required) | Do all required parameters have values? |
389,163 | def get_assessment_part_item_session(self, *args, **kwargs):
if not self.supports_assessment_part_lookup():
raise errors.Unimplemented()
if self._proxy_in_args(*args, **kwargs):
raise errors.InvalidArgument()
return sessions.AssessmentPartItemSession(runtime=self._runtime) | Gets the ``OsidSession`` associated with the assessment part item service.
return: (osid.assessment.authoring.AssessmentPartItemSession)
- an ``AssessmentPartItemSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_part_item()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_part_lookup()`` is ``true``.* |
389,164 | def create_list_stories(
list_id_stories, number_of_stories, shuffle, max_threads
):
list_stories = []
with ThreadPoolExecutor(max_workers=max_threads) as executor:
futures = {
executor.submit(get_story, new)
for new in list_id_stories[:number_of_stories]
}
for future in tqdm(
as_completed(futures),
desc=,
unit=,
):
list_stories.append(future.result())
if shuffle:
random.shuffle(list_stories)
return list_stories | Show in a formatted way the stories for each item of the list. |
389,165 | def GetAll(alias=None,location=None,session=None):
if not alias: alias = clc.v2.Account.GetAlias(session=session)
policies = []
policy_resp = clc.v2.API.Call(, % alias,{},session=session)
for k in policy_resp:
r_val = policy_resp[k]
for r in r_val:
if r.get():
if location and r[].lower()!=location.lower(): continue
servers = [obj[] for obj in r[] if obj[] == "server"]
policies.append(AntiAffinity(id=r[],name=r[],location=r[],servers=servers,session=session))
return(policies) | Gets a list of anti-affinity policies within a given account.
https://t3n.zendesk.com/entries/44657214-Get-Anti-Affinity-Policies
>>> clc.v2.AntiAffinity.GetAll()
[<clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65e910>, <clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65ec90>] |
389,166 | def conversations_replies(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("conversations.replies", http_verb="GET", params=kwargs) | Retrieve a thread of messages posted to a conversation
Args:
channel (str): Conversation ID to fetch thread from. e.g. 'C1234567890'
ts (str): Unique identifier of a thread's parent message. e.g. '1234567890.123456' |
389,167 | def _find_value(key, *args):
for arg in args:
v = _get_value(arg, key)
if v is not None:
return v | Find a value for 'key' in any of the objects given as 'args |
389,168 | def initialize(self):
mkdir_p(self.archive_path)
mkdir_p(self.bin_path)
mkdir_p(self.codebase_path)
mkdir_p(self.input_basepath) | Create the laboratory directories. |
389,169 | def propose_value(self, value):
if self.proposed_value is None:
self.proposed_value = value
if self.leader:
self.current_accept_msg = Accept(self.network_uid, self.proposal_id, value)
return self.current_accept_msg | Sets the proposal value for this node iff this node is not already aware of
a previous proposal value. If the node additionally believes itself to be
the current leader, an Accept message will be returned |
389,170 | def handle_exc(exc):
err = ERRORS_TABLE.get(exc.pgcode)
if err:
abort(exceptions.InvalidQueryParams(**{
: err,
: ,
}))
abort(exceptions.DatabaseUnavailable) | Given a database exception determine how to fail
Attempt to lookup a known error & abort on a meaningful
error. Otherwise issue a generic DatabaseUnavailable exception.
:param exc: psycopg2 exception |
389,171 | def is_unwrapped(f):
try:
g = look_up(object_name(f))
return g != f and unwrap(g) == f
except (AttributeError, TypeError, ImportError):
return False | If `f` was imported and then unwrapped, this function might return True.
.. |is_unwrapped| replace:: :py:func:`is_unwrapped` |
389,172 | def put_metadata(self, key, value, namespace=):
entity = self.get_trace_entity()
if entity and entity.sampled:
entity.put_metadata(key, value, namespace) | Add metadata to the current active trace entity.
Metadata is not indexed but can be later retrieved
by BatchGetTraces API.
:param str namespace: optional. Default namespace is `default`.
It must be a string and prefix `AWS.` is reserved.
:param str key: metadata key under specified namespace
:param object value: any object that can be serialized into JSON string |
389,173 | def to_json(self):
res = dict()
res[] = self.count
res[] = self.messages
res[] = self.forced
res[] = self.keyboard
res[] = list()
for item in self.entities:
res[].append(item.to_json())
res[] = self.forced_message
return res | Serialize object to json dict
:return: dict |
389,174 | def page_url(self) -> str:
url = self.attributes[]
assert isinstance(url, str)
return url | (:class:`str`) The canonical url of the page. |
389,175 | def main():
opts = docopt(__doc__, version="cast 0.1")
cast = pychromecast.PyChromecast(CHROMECAST_HOST)
ramp = cast.get_protocol(pychromecast.PROTOCOL_RAMP)
time.sleep(SLEEP_TIME)
if ramp is None:
print
return 1
if opts[]:
ramp.next()
elif opts[]:
ramp.pause()
elif opts[]:
ramp.play()
elif opts[]:
ramp.playpause()
elif opts[]:
ramp.seek(opts[])
elif opts[]:
ramp.rewind()
elif opts[]:
_status_command(cast, ramp)
elif opts[]:
_volume_command(ramp, opts[])
time.sleep(SLEEP_TIME) | Read the options given on the command line and do the required actions.
This method is used in the entry_point `cast`. |
389,176 | def batch(self, timelimit=None):
from .launcher import BatchLauncher
prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:-1])
prev_dir = os.path.join(os.path.sep, prev_dir)
workdir = os.path.join(prev_dir, os.path.basename(self.workdir) + "_batch")
return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit) | Run the flow in batch mode, return exit status of the job script.
Requires a manager.yml file and a batch_adapter adapter.
Args:
timelimit: Time limit (int with seconds or string with time given with the slurm convention:
"days-hours:minutes:seconds"). If timelimit is None, the default value specified in the
`batch_adapter` entry of `manager.yml` is used. |
389,177 | def describe_change_set(awsclient, change_set_name, stack_name):
client = awsclient.get_client()
status = None
while status not in [, ]:
response = client.describe_change_set(
ChangeSetName=change_set_name,
StackName=stack_name)
status = response[]
if status == :
print(response[])
elif status == :
for change in response[]:
print(json2table(change[])) | Print out the change_set to console.
This needs to run create_change_set first.
:param awsclient:
:param change_set_name:
:param stack_name: |
389,178 | def rpc_get_usages(self, filename, source, offset):
line, column = pos_to_linecol(source, offset)
uses = run_with_debug(jedi, ,
source=source, line=line, column=column,
path=filename, encoding=)
if uses is None:
return None
result = []
for use in uses:
if use.module_path == filename:
offset = linecol_to_pos(source, use.line, use.column)
elif use.module_path is not None:
with open(use.module_path) as f:
text = f.read()
offset = linecol_to_pos(text, use.line, use.column)
result.append({"name": use.name,
"filename": use.module_path,
"offset": offset})
return result | Return the uses of the symbol at offset.
Returns a list of occurrences of the symbol, as dicts with the
fields name, filename, and offset. |
389,179 | def birch(args):
p = OptionParser(birch.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x6")
if len(args) != 2:
sys.exit(not p.print_help())
seqids, layout = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
K = Karyotype(fig, root, seqids, layout)
L = K.layout
xs = .79
dt = dict(rectangle=False, circle=False)
coords = {}
coords["Amborella"] = (xs, L[0].y)
coords["Vitis"] = (xs, L[1].y)
coords["Prunus"] = (xs, L[2].y)
coords["Betula"] = (xs, L[3].y)
coords["Populus"] = (xs, L[4].y)
coords["Arabidopsis"] = (xs, L[5].y)
coords["fabids"] = join_nodes(root, coords, "Prunus", "Betula", xs, **dt)
coords["malvids"] = join_nodes(root, coords, \
"Populus", "Arabidopsis", xs, **dt)
coords["rosids"] = join_nodes(root, coords, "fabids", "malvids", xs, **dt)
coords["eudicots"] = join_nodes(root, coords, "rosids", "Vitis", xs, **dt)
coords["angiosperm"] = join_nodes(root, coords, \
"eudicots", "Amborella", xs, **dt)
branch_length(root, coords["Amborella"], coords["angiosperm"], ">160.0")
branch_length(root, coords["eudicots"], coords["angiosperm"],
">78.2", va="top")
branch_length(root, coords["Vitis"], coords["eudicots"], "138.5")
branch_length(root, coords["rosids"], coords["eudicots"],
"19.8", va="top")
branch_length(root, coords["Prunus"], coords["fabids"],
"104.2", ha="right", va="top")
branch_length(root, coords["Arabidopsis"], coords["malvids"],
"110.2", va="top")
branch_length(root, coords["fabids"], coords["rosids"],
"19.8", ha="right", va="top")
branch_length(root, coords["malvids"], coords["rosids"],
"8.5", va="top")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "birch"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog birch seqids layout
Plot birch macro-synteny, with an embedded phylogenetic tree to the right. |
389,180 | def p_DictionaryMember(p):
p[0] = model.DictionaryMember(type=p[1], name=p[2], default=p[3]) | DictionaryMember : Type IDENTIFIER Default ";" |
389,181 | def url(self):
return urlresolvers.reverse(
"admin:%s_%s_change" % (self.content_type.app_label,
self.content_type.model),
args = (self.get_object().uid,)) | Return the admin url of the object. |
389,182 | def _reprJSON(self):
return {: (self.spectrumRef, self.activation,
self.isolationWindow, self.selectedIonList
)
} | Returns a JSON serializable represenation of a ``MzmlPrecursor``
class instance. Use :func:`maspy.core.MzmlPrecursor._fromJSON()` to
generate a new ``MzmlPrecursor`` instance from the return value.
:returns: a JSON serializable python object |
389,183 | def _fast_read(self, infile):
infile.seek(0)
return(int(infile.read().decode().strip())) | Function for fast reading from sensor files. |
389,184 | def load_directory(self, top_path, followlinks):
for dir_name, child_dirs, child_files in os.walk(top_path, followlinks=followlinks):
for child_filename in child_files:
if child_filename == DDS_IGNORE_FILENAME:
pattern_lines = self._read_non_empty_lines(dir_name, child_filename)
self.add_patterns(dir_name, pattern_lines) | Traverse top_path directory and save patterns in any .ddsignore files found.
:param top_path: str: directory name we should traverse looking for ignore files
:param followlinks: boolean: should we traverse symbolic links |
389,185 | def tokens2ids(tokens: Iterable[str], vocab: Dict[str, int]) -> List[int]:
return [vocab.get(w, vocab[C.UNK_SYMBOL]) for w in tokens] | Returns sequence of integer ids given a sequence of tokens and vocab.
:param tokens: List of string tokens.
:param vocab: Vocabulary (containing UNK symbol).
:return: List of word ids. |
389,186 | def run_npm(self):
for name, version in npm_dependencies.items():
dep_name = % (name, version)
self.run_cmd([, , , dep_name]) | h/t https://github.com/elbaschid/virtual-node/blob/master/setup.py |
389,187 | def add_members(self, users=None, role=TeamRoles.MEMBER):
if role and role not in TeamRoles.values():
raise IllegalArgumentError("role should be one of `TeamRoles` {}, got ".format(TeamRoles.values(),
role))
if not users or not isinstance(users, (list, tuple, set)):
raise IllegalArgumentError("users should be a list of user_ids or `User` objects, got ".
format(users))
update_dict = dict(role=role)
if all(isinstance(user, int) for user in users):
update_dict[] = users
elif all(isinstance(user, User) for user in users):
update_dict[] = [user.id for user in users]
else:
raise IllegalArgumentError("All users should be a list of user_ids or `User` objects, got ".
format(users))
self._update(, team_id=self.id, update_dict=update_dict) | Members to add to a team.
:param members: list of members, either `User` objects or usernames
:type members: List of `User` or List of pk
:param role: (optional) role of the users to add (default `TeamRoles.MEMBER`)
:type role: basestring
:raises IllegalArgumentError: when providing incorrect roles
Example
-------
>>> my_team = client.team(name='My own team')
>>> other_user = client.users(name='That other person')
>>> myself = client.users(name='myself')
>>> my_team.add_members([myself], role=TeamRoles.MANAGER)
>>> my_team.add_members([other_user], role=TeamRoles.MEMBER) |
389,188 | def single_violation(self, column=None, value=None, **kwargs):
return self._resolve_call(, column,
value, **kwargs) | A single event violation is a one-time event that occurred on a fixed
date, and is associated with one permitted facility.
>>> PCS().single_violation('single_event_viol_date', '16-MAR-01') |
389,189 | def _add_genetic_models(self, variant_obj, info_dict):
genetic_models_entry = info_dict.get()
if genetic_models_entry:
genetic_models = []
for family_annotation in genetic_models_entry.split():
for genetic_model in family_annotation.split()[-1].split():
genetic_models.append(genetic_model)
logger.debug("Updating genetic models to: {0}".format(
.join(genetic_models)))
variant_obj.genetic_models = genetic_models | Add the genetic models found
Args:
variant_obj (puzzle.models.Variant)
info_dict (dict): A info dictionary |
389,190 | def _prep_acl_for_compare(ACL):
ret = copy.deepcopy(ACL)
ret[] = _normalize_user(ret[])
for item in ret.get(, ()):
item[] = _normalize_user(item.get())
return ret | Prepares the ACL returned from the AWS API for comparison with a given one. |
389,191 | def handle(client_message, handle_event_imap_invalidation=None, handle_event_imap_batch_invalidation=None, to_object=None):
message_type = client_message.get_message_type()
if message_type == EVENT_IMAPINVALIDATION and handle_event_imap_invalidation is not None:
key = None
if not client_message.read_bool():
key = client_message.read_data()
handle_event_imap_invalidation(key=key)
if message_type == EVENT_IMAPBATCHINVALIDATION and handle_event_imap_batch_invalidation is not None:
keys_size = client_message.read_int()
keys = []
for _ in range(0, keys_size):
keys_item = client_message.read_data()
keys.append(keys_item)
handle_event_imap_batch_invalidation(keys=keys) | Event handler |
389,192 | def apply_operation(self, symmop):
def operate_site(site):
new_cart = symmop.operate(site.coords)
return Site(site.species, new_cart,
properties=site.properties)
self._sites = [operate_site(s) for s in self._sites] | Apply a symmetry operation to the molecule.
Args:
symmop (SymmOp): Symmetry operation to apply. |
389,193 | def handle_label_relation(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
subject_node_dsl = self.ensure_node(tokens[SUBJECT])
description = tokens[OBJECT]
if self.graph.has_node_description(subject_node_dsl):
raise RelabelWarning(
line_number=self.get_line_number(),
line=line,
position=position,
node=self.graph.node,
old_label=self.graph.get_node_description(subject_node_dsl),
new_label=description
)
self.graph.set_node_description(subject_node_dsl, description)
return tokens | Handle statements like ``p(X) label "Label for X"``.
:raises: RelabelWarning |
389,194 | def _validate_bag(self, bag, **kwargs):
failed = None
try:
bag.validate(**kwargs)
except BagValidationError as e:
failed = e
if failed:
raise BagValidationError("%s" % failed) | Validate BagIt (checksums, payload.oxum etc) |
389,195 | def _process_added_port_event(self, port_name):
LOG.info("Hyper-V VM vNIC added: %s", port_name)
self._added_ports.add(port_name) | Callback for added ports. |
389,196 | def parse_tweet(raw_tweet, source, now=None):
if now is None:
now = datetime.now(timezone.utc)
raw_created_at, text = raw_tweet.split("\t", 1)
created_at = parse_iso8601(raw_created_at)
if created_at > now:
raise ValueError("Tweet is from the future")
return Tweet(click.unstyle(text.strip()), created_at, source) | Parses a single raw tweet line from a twtxt file
and returns a :class:`Tweet` object.
:param str raw_tweet: a single raw tweet line
:param Source source: the source of the given tweet
:param Datetime now: the current datetime
:returns: the parsed tweet
:rtype: Tweet |
389,197 | def attention_lm_moe_large():
hparams = attention_lm_moe_base()
hparams.num_hidden_layers = 5
hparams.moe_layers = "3"
hparams.hidden_size = 1024
hparams.num_heads = 16
hparams.filter_size = 4096
hparams.moe_hidden_sizes = "4096"
hparams.moe_num_experts = 128
hparams.layer_prepostprocess_dropout = 0.2
return hparams | Large model for distributed training.
Over 1B parameters, so requires multi-gpu training due to memory
requirements.
on lm1b_32k:
After 45K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.18
eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9
Returns:
an hparams object. |
389,198 | def _get_app_version(self, app_config):
base_name = app_config.__module__.split()[0]
module = __import__(base_name)
return getattr(module, , ) | Some plugins ship multiple applications and extensions.
However all of them have the same version, because they are released together.
That's why only-top level module is used to fetch version information. |
389,199 | def get_portchannel_info_by_intf_output_lacp_actor_brcd_state(self, **kwargs):
config = ET.Element("config")
get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf")
config = get_portchannel_info_by_intf
output = ET.SubElement(get_portchannel_info_by_intf, "output")
lacp = ET.SubElement(output, "lacp")
actor_brcd_state = ET.SubElement(lacp, "actor-brcd-state")
actor_brcd_state.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |