code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def load(self, *relations):
if len(self.items) > 0:
query = self.first().new_query().with_(*relations)
self._set_items(query.eager_load_relations(self.items))
return self | Load a set of relationships onto the collection. |
def merge_graphs(self, other_docgraph, verbose=False):
if hasattr(self, ):
self.merged_rootnodes.append(other_docgraph.root)
else:
self.merged_rootnodes = [other_docgraph.root]
rename_tokens(other_docgraph, self, verbose=verbose)
self.add_nodes_from(other_docgraph.nodes(data=True))
for node_id, node_attrs in other_docgraph.nodes(data=True):
if istoken(other_docgraph, node_id) and \
self.ns+ not in self.node[node_id]:
self.node[node_id].update({self.ns+: other_docgraph.get_token(node_id)})
self.add_edges_from(other_docgraph.edges(data=True))
if other_docgraph.name and not self.name:
self.name = other_docgraph.name
if other_docgraph.tokens and not self.tokens:
self.tokens = other_docgraph.tokens
if other_docgraph.sentences and not self.sentences:
self.sentences = other_docgraph.sentences
self.merge_rootnodes(other_docgraph) | Merges another document graph into the current one, thereby adding all
the necessary nodes and edges (with attributes, layers etc.).
NOTE: This will only work if both graphs have exactly the same
tokenization. |
def consecutive_ones_property(sets, universe=None):
if universe is None:
universe = set()
for S in sets:
universe |= set(S)
tree = PQ_tree(universe)
try:
for S in sets:
tree.reduce(S)
return tree.border()
except IsNotC1P:
return None | Check the consecutive ones property.
:param list sets: is a list of subsets of the ground set.
:param groundset: is the set of all elements,
by default it is the union of the given sets
:returns: returns a list of the ordered ground set where
every given set is consecutive,
or None if there is no solution.
:complexity: O(len(groundset) * len(sets))
:disclaimer: an optimal implementation would have complexity
O(len(groundset) + len(sets) + sum(map(len,sets))),
and there are more recent easier algorithms for this problem. |
def mkvirtualenv():
root = .join([deployment_root(),])
path = .join([root,env.project_fullname])
dirs_created = []
if env.verbosity:
print env.host,, path
if not exists(root): dirs_created += mkdirs(root)
with cd(root):
run(.join(["virtualenv",env.project_fullname]))
with cd(path):
dirs_created += mkdirs()
sudo(% env.user)
sudo()
run(.join(["echo /project//sitesettings > bin/postactivate"]))
sudo()
out = State(.join([env.host,,path,]))
out.object = dirs_created + [,,]
out.failed = False
return out | Create the virtualenv project environment |
def print_dictionary(self, d, h, n, nl=False):
if d in h:
return "{}..."
h.append(d)
s = []
if nl:
s.append("\n")
s.append(self.indent(n))
s.append("{")
for item in d.items():
s.append("\n")
s.append(self.indent(n+1))
if isinstance(item[1], (list,tuple)):
s.append(tostr(item[0]))
s.append("[]")
else:
s.append(tostr(item[0]))
s.append(" = ")
s.append(self.process(item[1], h, n, True))
s.append("\n")
s.append(self.indent(n))
s.append("}")
h.pop()
return "".join(s) | Print complex using the specified indent (n) and newline (nl). |
def save_predefined(self, predefined, client=None):
predefined = self.validate_predefined(predefined)
self._save(None, predefined, client) | Save this ACL for the current bucket using a predefined ACL.
If :attr:`user_project` is set, bills the API request to that project.
:type predefined: str
:param predefined: An identifier for a predefined ACL. Must be one
of the keys in :attr:`PREDEFINED_JSON_ACLS`
or :attr:`PREDEFINED_XML_ACLS` (which will be
aliased to the corresponding JSON name).
If passed, `acl` must be None.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent. |
def from_sample_rate(sample_rate, n_bands, always_even=False):
fb = FrequencyBand(0, sample_rate.nyquist)
return LinearScale(fb, n_bands, always_even=always_even) | Return a :class:`~zounds.spectral.LinearScale` instance whose upper
frequency bound is informed by the nyquist frequency of the sample rate.
Args:
sample_rate (SamplingRate): the sample rate whose nyquist frequency
will serve as the upper frequency bound of this scale
n_bands (int): the number of evenly-spaced frequency bands |
def serialize(self, queryset, **options):
self.options = options
self.stream = options.pop("stream", six.StringIO())
self.selected_fields = options.pop("fields", None)
self.use_natural_keys = options.pop("use_natural_keys", False)
if self.use_natural_keys and RemovedInDjango19Warning is not None:
warnings.warn("``use_natural_keys`` is deprecated; use ``use_natural_foreign_keys`` instead.",
RemovedInDjango19Warning)
self.use_natural_foreign_keys = options.pop(, False) or self.use_natural_keys
self.use_natural_primary_keys = options.pop(, False)
self.start_serialization()
self.first = True
for obj in queryset:
self.start_object(obj)
concrete_model = obj._meta.concrete_model
for field in concrete_model._meta.fields:
if field.serialize:
if field.remote_field is None:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_field(obj, field)
else:
if self.selected_fields is None or field.attname[:-3] in self.selected_fields:
self.handle_fk_field(obj, field)
for field in concrete_model._meta.many_to_many:
if field.serialize:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_m2m_field(obj, field)
self.end_object(obj)
if self.first:
self.first = False
self.end_serialization()
return self.getvalue() | Serialize a queryset. |
def from_image(cls, image):
(w, h) = image.size
if w > 512:
ratio = 512. / w
h = int(h * ratio)
image = image.resize((512, h), Image.ANTIALIAS)
if image.mode != :
image = image.convert()
pixels = np.array(list(image.getdata())).reshape(h, w)
extra_rows = int(math.ceil(h / 24)) * 24 - h
extra_pixels = np.ones((extra_rows, w), dtype=bool)
pixels = np.vstack((pixels, extra_pixels))
h += extra_rows
nb_stripes = h / 24
pixels = pixels.reshape(nb_stripes, 24, w).swapaxes(1, 2).reshape(-1, 8)
nh = int(w / 256)
nl = w % 256
data = []
pixels = np.invert(np.packbits(pixels))
stripes = np.split(pixels, nb_stripes)
for stripe in stripes:
data.extend([
ESC,
42,
33,
nl,
nh])
data.extend(stripe)
data.extend([
27,
74,
48])
height = h * 2
return cls(data, height) | Create a PrintableImage from a PIL Image
:param image: a PIL Image
:return: |
def _maybe_localize_point(ts, is_none, is_not_none, freq, tz):
if isinstance(freq, Tick) or freq is None:
localize_args = {: tz, : False}
else:
localize_args = {: None}
if is_none is None and is_not_none is not None:
ts = ts.tz_localize(**localize_args)
return ts | Localize a start or end Timestamp to the timezone of the corresponding
start or end Timestamp
Parameters
----------
ts : start or end Timestamp to potentially localize
is_none : argument that should be None
is_not_none : argument that should not be None
freq : Tick, DateOffset, or None
tz : str, timezone object or None
Returns
-------
ts : Timestamp |
def download(date_array, tag, sat_id, data_path, user=None, password=None):
import ftplib
from ftplib import FTP
import sys
ftp = FTP()
ftp.login()
ftp.cwd()
for date in date_array:
fname =
fname = fname.format(year=(date.year - date.year//100*100), month=date.month)
local_fname = fname
saved_fname = os.path.join(data_path,local_fname)
try:
print(+date.strftime())
sys.stdout.flush()
ftp.retrbinary(+fname, open(saved_fname,).write)
except ftplib.error_perm as exception:
if str(exception.args[0]).split(" ", 1)[0] != :
raise
else:
os.remove(saved_fname)
print(+date.strftime())
ftp.close()
return | Routine to download Kp index data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
Returns
--------
Void : (NoneType)
data downloaded to disk, if available.
Notes
-----
Called by pysat. Not intended for direct use by user. |
def on_set(self, key, value):
if self._on_set is not None:
self._on_set(key, value) | Callback called on successful set. Uses function from __init__. |
def __send_handle_get_request(self, handle, indices=None):
s response.
'
resp = self.__handlesystemconnector.send_handle_get_request(handle, indices)
return resp | Send a HTTP GET request to the handle server to read either an entire
handle or to some specified values from a handle record, using the
requests module.
:param handle: The handle.
:param indices: Optional. A list of indices to delete. Defaults to
None (i.e. the entire handle is deleted.). The list can contain
integers or strings.
:return: The server's response. |
def b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True,
periodic=True, verbose=True):
if np.ravel(x).ndim != 1:
raise ValueError(\
.format(np.ravel(x).ndim))
if (n_splines < 1) or not isinstance(n_splines, numbers.Integral):
raise ValueError()
if (spline_order < 0) or not isinstance(spline_order, numbers.Integral):
raise ValueError()
if n_splines < spline_order + 1:
raise ValueError(\
\
.format(n_splines, spline_order))
if n_splines == 0 and verbose:
warnings.warn(\
, stacklevel=2)
n_splines += spline_order * periodic
edge_knots = np.sort(deepcopy(edge_knots))
offset = edge_knots[0]
scale = edge_knots[-1] - edge_knots[0]
if scale == 0:
scale = 1
boundary_knots = np.linspace(0, 1, 1 + n_splines - spline_order)
diff = np.diff(boundary_knots[:2])[0]
x = (np.ravel(deepcopy(x)) - offset) / scale
if periodic:
x = x % (1 + 1e-9)
x = np.r_[x, 0., 1.]
x_extrapolte_l = (x < 0)
x_extrapolte_r = (x > 1)
x_interpolate = ~(x_extrapolte_r + x_extrapolte_l)
x = np.atleast_2d(x).T
n = len(x)
aug = np.arange(1, spline_order + 1) * diff
aug_knots = np.r_[-aug[::-1],
boundary_knots,
1 + aug]
aug_knots[-1] += 1e-9
bases = (x >= aug_knots[:-1]).astype(np.int) * \
(x < aug_knots[1:]).astype(np.int)
bases[-1] = bases[-2][::-1]
maxi = len(aug_knots) - 1
for m in range(2, spline_order + 2):
maxi -= 1
num = (x - aug_knots[:maxi])
num *= bases[:, :maxi]
denom = aug_knots[m-1 : maxi+m-1] - aug_knots[:maxi]
left = num/denom
num = (aug_knots[m : maxi+m] - x) * bases[:, 1:maxi+1]
denom = aug_knots[m:maxi+m] - aug_knots[1 : maxi+1]
right = num/denom
prev_bases = bases[-2:]
bases = left + right
if periodic and spline_order > 0:
bases[:, :spline_order] = np.max([bases[:, :spline_order],
bases[:, -spline_order:]],
axis=0)
bases = bases[:, :-spline_order]
if (any(x_extrapolte_r) or any(x_extrapolte_l)) and spline_order>0:
bases[~x_interpolate] = 0.
denom = (aug_knots[spline_order:-1] - aug_knots[: -spline_order - 1])
left = prev_bases[:, :-1] / denom
denom = (aug_knots[spline_order+1:] - aug_knots[1: -spline_order])
right = prev_bases[:, 1:] / denom
grads = (spline_order) * (left - right)
if any(x_extrapolte_l):
val = grads[0] * x[x_extrapolte_l] + bases[-2]
bases[x_extrapolte_l] = val
if any(x_extrapolte_r):
val = grads[1] * (x[x_extrapolte_r] - 1) + bases[-1]
bases[x_extrapolte_r] = val
bases = bases[:-2]
if sparse:
return sp.sparse.csc_matrix(bases)
return bases | tool to generate b-spline basis using vectorized De Boor recursion
the basis functions extrapolate linearly past the end-knots.
Parameters
----------
x : array-like, with ndims == 1.
edge_knots : array-like contaning locations of the 2 edge knots.
n_splines : int. number of splines to generate. must be >= spline_order+1
default: 20
spline_order : int. order of spline basis to create
default: 3
sparse : boolean. whether to return a sparse basis matrix or not.
default: True
verbose : bool, default: True
whether to print warnings
Returns
-------
basis : sparse csc matrix or array containing b-spline basis functions
with shape (len(x), n_splines) |
def _next(self, possible_solution):
is_complete = (len(possible_solution) == len(self._vars))
if is_complete:
self._solutions_seen += 1
if self.satisfies_constraints(possible_solution):
yield dict(possible_solution)
else:
if self.is_feasible(possible_solution):
for s in self.derived_solutions(possible_solution):
for solution in self._next(s):
yield solution | Where the magic happens. Produces a generator that returns all solutions given
a base solution to start searching. |
def mimetype_params(self):
def on_update(d):
self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get("content-type", ""))[1]
return CallbackDict(d, on_update) | The mimetype parameters as dict. For example if the
content type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5 |
def set_is_polling(polling, host=None, core_name=None):
hostsuccessdataerrorswarnings*
ret = _get_return_dict()
return ret
else:
resp = _replication_request(cmd, host=host, core_name=core_name)
return resp | SLAVE CALL
Prevent the slaves from polling the master for updates.
polling : boolean
True will enable polling. False will disable it.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.set_is_polling False |
def getparent(self, profile):
assert self.parent
for inputtemplate in profile.input:
if inputtemplate == self.parent:
return inputtemplate
raise Exception("Parent InputTemplate not found!") | Resolve a parent ID |
def to_call_agraph(self):
A = nx.nx_agraph.to_agraph(self.call_graph)
A.graph_attr.update({"dpi": 227, "fontsize": 20, "fontname": "Menlo"})
A.node_attr.update(
{"shape": "rectangle", "color": "
)
A.edge_attr.update({"color": "
return A | Build a PyGraphviz AGraph object corresponding to a call graph of
functions. |
def aesCCM(key, key_handle, nonce, data, decrypt=False):
if decrypt:
(data, saved_mac) = _split_data(data, len(data) - pyhsm.defines.YSM_AEAD_MAC_SIZE)
nonce = pyhsm.util.input_validate_nonce(nonce, pad = True)
mac = _cbc_mac(key, key_handle, nonce, len(data))
counter = _ctr_counter(key_handle, nonce, value = 0)
ctr_aes = AES.new(key, AES.MODE_CTR, counter = counter.next)
out = []
while data:
(thisblock, data) = _split_data(data, pyhsm.defines.YSM_BLOCK_SIZE)
if decrypt:
aes_out = ctr_aes.decrypt(thisblock)
mac.update(aes_out)
else:
mac.update(thisblock)
aes_out = ctr_aes.encrypt(thisblock)
out.append(aes_out)
counter.value = 0
mac.finalize(counter.pack())
if decrypt:
if mac.get() != saved_mac:
raise pyhsm.exception.YHSM_Error()
else:
out.append(mac.get())
return .join(out) | Function implementing YubiHSM AEAD encrypt/decrypt in software. |
def restore_course(self, courseid, backup):
self.wipe_course(courseid)
filepath = os.path.join(self.backup_dir, courseid, backup + ".zip")
with zipfile.ZipFile(filepath, "r") as zipf:
aggregations = bson.json_util.loads(zipf.read("aggregations.json").decode("utf-8"))
if len(aggregations) > 0:
self.database.aggregations.insert(aggregations)
user_tasks = bson.json_util.loads(zipf.read("user_tasks.json").decode("utf-8"))
if len(user_tasks) > 0:
self.database.user_tasks.insert(user_tasks)
submissions = bson.json_util.loads(zipf.read("submissions.json").decode("utf-8"))
for submission in submissions:
for key in ["input", "archive"]:
if key in submission and type(submission[key]) == bson.objectid.ObjectId:
submission[key] = self.submission_manager.get_gridfs().put(zipf.read(key + "/" + str(submission[key]) + ".data"))
if len(submissions) > 0:
self.database.submissions.insert(submissions)
self._logger.info("Course %s restored from backup directory.", courseid) | Restores a course of given courseid to a date specified in backup (format : YYYYMMDD.HHMMSS) |
def _new_percolator(spec, search_pattern):
if spec and search_pattern:
query = query_string_parser(search_pattern=search_pattern).to_dict()
for index in current_search.mappings.keys():
percolator_doc_type = _get_percolator_doc_type(index)
_create_percolator_mapping(index, percolator_doc_type)
current_search_client.index(
index=index, doc_type=percolator_doc_type,
id=.format(spec),
body={: query}
) | Create new percolator associated with the new set. |
def ListHunts(context=None):
items = context.SendIteratorRequest("ListHunts", hunt_pb2.ApiListHuntsArgs())
return utils.MapItemsIterator(lambda data: Hunt(data=data, context=context),
items) | List all GRR hunts. |
def depth_renderbuffer(self, size, *, samples=0) -> :
res = Renderbuffer.__new__(Renderbuffer)
res.mglo, res._glo = self.mglo.depth_renderbuffer(size, samples)
res._size = size
res._components = 1
res._samples = samples
res._dtype =
res._depth = True
res.ctx = self
res.extra = None
return res | :py:class:`Renderbuffer` objects are OpenGL objects that contain images.
They are created and used specifically with :py:class:`Framebuffer` objects.
Args:
size (tuple): The width and height of the renderbuffer.
Keyword Args:
samples (int): The number of samples. Value 0 means no multisample format.
Returns:
:py:class:`Renderbuffer` object |
def download(self, itemID, savePath):
if os.path.isdir(savePath) == False:
os.makedirs(savePath)
url = self._url + "/%s/download" % itemID
params = {
}
if len(params.keys()):
url = url + "?%s" % urlencode(params)
return self._get(url=url,
param_dict=params,
out_folder=savePath,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | downloads an item to local disk
Inputs:
itemID - unique id of item to download
savePath - folder to save the file in |
def _get_vnet(self, adapter_number):
vnet = "ethernet{}.vnet".format(adapter_number)
if vnet not in self._vmx_pairs:
raise VMwareError("vnet {} not in VMX file".format(vnet))
return vnet | Return the vnet will use in ubridge |
def assert_boolean_false(expr, msg_fmt="{msg}"):
if expr is not False:
msg = "{!r} is not False".format(expr)
fail(msg_fmt.format(msg=msg, expr=expr)) | Fail the test unless the expression is the constant False.
>>> assert_boolean_false(False)
>>> assert_boolean_false(0)
Traceback (most recent call last):
...
AssertionError: 0 is not False
The following msg_fmt arguments are supported:
* msg - the default error message
* expr - tested expression |
def template(page=None, layout=None, **kwargs):
pkey = "_template_extends__"
def decorator(f):
if inspect.isclass(f):
layout_ = layout or page
extends = kwargs.pop("extends", None)
if extends and hasattr(extends, pkey):
items = getattr(extends, pkey).items()
if "layout" in items:
layout_ = items.pop("layout")
for k, v in items:
kwargs.setdefault(k, v)
if not layout_:
layout_ = "layout.html"
kwargs.setdefault("brand_name", "")
kwargs["layout"] = layout_
setattr(f, pkey, kwargs)
setattr(f, "base_layout", kwargs.get("layout"))
f.g(TEMPLATE_CONTEXT=kwargs)
return f
else:
@functools.wraps(f)
def wrap(*args2, **kwargs2):
response = f(*args2, **kwargs2)
if isinstance(response, dict) or response is None:
response = response or {}
if page:
response.setdefault("template_", page)
if layout:
response.setdefault("layout_", layout)
for k, v in kwargs.items():
response.setdefault(k, v)
return response
return wrap
return decorator | Decorator to change the view template and layout.
It works on both View class and view methods
on class
only $layout is applied, everything else will be passed to the kwargs
Using as first argument, it will be the layout.
:first arg or $layout: The layout to use for that view
:param layout: The layout to use for that view
:param kwargs:
get pass to the TEMPLATE_CONTEXT
** on method that return a dict
page or layout are optional
:param page: The html page
:param layout: The layout to use for that view
:param kwargs:
get pass to the view as k/V
** on other methods that return other type, it doesn't apply
:return: |
def make_urls_hyperlinks(text: str) -> str:
find_url = r"\]])
)
<a href="\1">\1</a>([.\w\-]+@(\w[\w\-]+\.)+[\w\-]+).t need escaping inside square brackets
replace_email = r
text = re.sub(find_url, replace_url, text)
text = re.sub(find_email, replace_email, text)
return text | Adds hyperlinks to text that appears to contain URLs.
See
- http://stackoverflow.com/questions/1071191
- ... except that double-replaces everything; e.g. try with
``text = "me@somewhere.com me@somewhere.com"``
- http://stackp.online.fr/?p=19 |
def update_edge_todo(self, elev_fn, dem_proc):
for key in self.edges[elev_fn].keys():
self.edges[elev_fn][key].set_data(, data=dem_proc.edge_todo) | Can figure out how to update the todo based on the elev filename |
def _get_sector(self, channel, nlines, ncols):
if self._is_vis(channel):
margin = 100
sectors_ref = self.vis_sectors
else:
margin = 50
sectors_ref = self.ir_sectors
for (nlines_ref, ncols_ref), sector in sectors_ref.items():
if np.fabs(ncols - ncols_ref) < margin and \
np.fabs(nlines - nlines_ref) < margin:
return sector
return UNKNOWN_SECTOR | Determine which sector was scanned |
def bootstrap_files(self):
bootstrap_file_yamls = [
abspath(join(self.default_template_path, self.bootstrap_files_yaml)),
abspath(join(self.custom_template_path, self.bootstrap_files_yaml))]
bootstrap_files = dict()
if self.upload_authorized_keys:
bootstrap_files[] = BootstrapFile(self, , **{
: ,
: ,
: ,
: [
,
,
,
]})
for bootstrap_file_yaml in bootstrap_file_yamls:
if not exists(bootstrap_file_yaml):
continue
with open(bootstrap_file_yaml) as f:
info = yaml.load(f, Loader=SafeLoader)
if info is None:
continue
for k, v in info.items():
bootstrap_files[k] = BootstrapFile(self, k, **v)
for bf in bootstrap_files.values():
if not exists(bf.local) and bf.raw_fallback:
if not bf.existing_fallback:
print("Found no public key in %s, you have to create manually" % (expanduser(), bf.local))
sys.exit(1)
print("The file is missing." % bf.local)
for path in bf.existing_fallback:
yes = env.instance.config.get(, False)
if yes or yesno("Should we generate it using the key in ?" % path):
if not exists(bf.expected_path):
os.mkdir(bf.expected_path)
with open(bf.local, ) as out:
with open(path, ) as f:
out.write(f.read())
break
else:
sys.exit(1)
if not bf.check():
print( % bf.local)
sys.exit(1)
packages_path = join(self.download_path, )
if exists(packages_path):
for dirpath, dirnames, filenames in os.walk(packages_path):
path = dirpath.split(packages_path)[1][1:]
for filename in filenames:
if not filename.endswith():
continue
bootstrap_files[join(path, filename)] = BootstrapFile(
self, join(path, filename), **dict(
local=join(packages_path, join(path, filename)),
remote=join(, filename),
encrypted=False))
if self.ssh_keys is not None:
for ssh_key_name, ssh_key_options in list(self.ssh_keys):
ssh_key = join(self.custom_template_path, ssh_key_name)
if exists(ssh_key):
pub_key_name = % ssh_key_name
pub_key = % ssh_key
if not exists(pub_key):
print("Public key for missing." % (pub_key, ssh_key))
sys.exit(1)
bootstrap_files[ssh_key_name] = BootstrapFile(
self, ssh_key_name, **dict(
local=ssh_key,
remote= % ssh_key_name,
mode=0600))
bootstrap_files[pub_key_name] = BootstrapFile(
self, pub_key_name, **dict(
local=pub_key,
remote= % pub_key_name,
mode=0644))
if hasattr(env.instance, ):
vaultlib = env.instance.get_vault_lib()
for bf in bootstrap_files.values():
if bf.encrypted is None and exists(bf.local):
with open(bf.local) as f:
data = f.read()
bf.info[] = vaultlib.is_encrypted(data)
return bootstrap_files | we need some files to bootstrap the FreeBSD installation.
Some...
- need to be provided by the user (i.e. authorized_keys)
- others have some (sensible) defaults (i.e. rc.conf)
- some can be downloaded via URL (i.e.) http://pkg.freebsd.org/freebsd:10:x86:64/latest/Latest/pkg.txz
For those which can be downloaded we check the downloads directory. if the file exists there
(and if the checksum matches TODO!) we will upload it to the host. If not, we will fetch the file
from the given URL from the host.
For files that cannot be downloaded (authorized_keys, rc.conf etc.) we allow the user to provide their
own version in a ``bootstrap-files`` folder. The location of this folder can either be explicitly provided
via the ``bootstrap-files`` key in the host definition of the config file or it defaults to ``deployment/bootstrap-files``.
User provided files can be rendered as Jinja2 templates, by providing ``use_jinja: True`` in the YAML file.
They will be rendered with the instance configuration dictionary as context.
If the file is not found there, we revert to the default
files that are part of bsdploy. If the file cannot be found there either we either error out or for authorized_keys
we look in ``~/.ssh/identity.pub``. |
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]:
return self.forward_rnn.get_rnn_cells() + self.reverse_rnn.get_rnn_cells() | Returns a list of RNNCells used by this encoder. |
def _zero_based_index(self, onebased: Union[int, str]) -> int:
result = int(onebased)
if result > 0:
result -= 1
return result | Convert a one-based index to a zero-based index. |
def build_result(data):
more = {}
for key, value in data.items():
if key != :
newnode = value
else:
newnode = {}
for el in value:
nkey, nvalue = process_node(el)
newnode[nkey] = nvalue
more[key] = newnode
return more | Create a dictionary with the contents of result.json |
def _findSwipl():
platform = sys.platform[:3]
if platform == "win":
(path, swiHome) = _findSwiplWin()
elif platform in ("lin", "cyg"):
(path, swiHome) = _findSwiplLin()
elif platform == "dar":
(path, swiHome) = _findSwiplDar()
if path is None:
(path, swiHome) = _findSwiplMacOSHome()
else:
(path, swiHome) = _findSwiplLin()
if path is None:
raise ImportError(
)
else:
return (path, swiHome) | This function makes a big effort to find the path to the SWI-Prolog shared
library. Since this is both OS dependent and installation dependent, we may
not aways succeed. If we do, we return a name/path that can be used by
CDLL(). Otherwise we raise an exception.
:return: Tuple. Fist element is the name or path to the library that can be
used by CDLL. Second element is the path were SWI-Prolog resource
file may be found (this is needed in some Linuxes)
:rtype: Tuple of strings
:raises ImportError: If we cannot guess the name of the library |
def deploy(self, ids):
url = build_uri_with_ids(, ids)
return super(ApiVipRequest, self).post(url) | Method to deploy vip's
:param vips: List containing vip's desired to be deployed on equipment
:return: None |
def add_layer(self, element, layer):
assert isinstance(layer, str), "Layers must be strings!"
if isinstance(element, tuple):
assert len(element) == 2
assert all(isinstance(node, (str, int)) for node in element)
source_id, target_id = element
edges = self.edge[source_id][target_id]
for edge in edges:
existing_layers = edges[edge][]
existing_layers.add(layer)
edges[edge][] = existing_layers
if isinstance(element, (str, int)):
existing_layers = self.node[element][]
existing_layers.add(layer)
self.node[element][] = existing_layers | add a layer to an existing node or edge
Parameters
----------
element : str, int, (str/int, str/int)
the ID of a node or edge (source node ID, target node ID)
layer : str
the layer that the element shall be added to |
def set_headers(self, headers):
self.request["headers"].update(self._input_object(headers))
return self.request["headers"] | *Sets new request headers or updates the existing.*
``headers``: The headers to add or update as a JSON object or a
dictionary.
*Examples*
| `Set Headers` | { "authorization": "Basic QWxhZGRpbjpPcGVuU2VzYW1"} |
| `Set Headers` | { "Accept-Encoding": "identity"} |
| `Set Headers` | ${auth_dict} | |
def _worst_case_generation(self, worst_case_scale_factors, modes):
self.timeseries.generation_fluctuating = pd.DataFrame(
{: [worst_case_scale_factors[
.format(mode)] for mode in modes],
: [worst_case_scale_factors[
.format(mode)] for mode in modes]},
index=self.timeseries.timeindex)
self.timeseries.generation_dispatchable = pd.DataFrame(
{: [worst_case_scale_factors[
.format(mode)] for mode in modes]},
index=self.timeseries.timeindex) | Define worst case generation time series for fluctuating and
dispatchable generators.
Parameters
----------
worst_case_scale_factors : dict
Scale factors defined in config file 'config_timeseries.cfg'.
Scale factors describe actual power to nominal power ratio of in
worst-case scenarios.
modes : list
List with worst-cases to generate time series for. Can be
'feedin_case', 'load_case' or both. |
def is_all_field_none(self):
if self._uuid is not None:
return False
if self._created is not None:
return False
if self._updated is not None:
return False
if self._attachment is not None:
return False
return True | :rtype: bool |
def get(self, key, default=None, remote=False):
if not remote:
return super(CouchDB, self).get(key, default)
db = self._DATABASE_CLASS(self, key)
if db.exists():
super(CouchDB, self).__setitem__(key, db)
return db
return default | Overrides dictionary get behavior to retrieve database objects with
support for returning a default. If remote=True then a remote
request is made to retrieve the database from the remote server,
otherwise the client's locally cached database object is returned.
:param str key: Database name used to retrieve the database object.
:param str default: Default database name. Defaults to None.
:param bool remote: Dictates whether the locally cached
database is returned or a remote request is made to retrieve
the database from the server. Defaults to False.
:returns: Database object |
def read_csv(filename, delimiter=",", skip=0, guess_type=True, has_header=True, use_types={}):
with open(filename, ) as f:
if has_header:
header = f.readline().strip().split(delimiter)
else:
header = None
for i in range(skip):
f.readline()
for line in csv.DictReader(f, delimiter=delimiter, fieldnames=header):
if use_types:
yield apply_types(use_types, guess_type, line)
elif guess_type:
yield dmap(determine_type, line)
else:
yield line | Read a CSV file
Usage
-----
>>> data = read_csv(filename, delimiter=delimiter, skip=skip,
guess_type=guess_type, has_header=True, use_types={})
# Use specific types
>>> types = {"sepal.length": int, "petal.width": float}
>>> data = read_csv(filename, guess_type=guess_type, use_types=types)
keywords
:has_header:
Determine whether the file has a header or not |
def _gatherLookupIndexes(gpos):
kernFeatureIndexes = [index for index, featureRecord in enumerate(gpos.FeatureList.FeatureRecord) if featureRecord.FeatureTag == "kern"]
scriptKernFeatureIndexes = {}
for scriptRecord in gpos.ScriptList.ScriptRecord:
script = scriptRecord.ScriptTag
thisScriptKernFeatureIndexes = []
defaultLangSysRecord = scriptRecord.Script.DefaultLangSys
if defaultLangSysRecord is not None:
f = []
for featureIndex in defaultLangSysRecord.FeatureIndex:
if featureIndex not in kernFeatureIndexes:
continue
f.append(featureIndex)
if f:
thisScriptKernFeatureIndexes.append((None, f))
if scriptRecord.Script.LangSysRecord is not None:
for langSysRecord in scriptRecord.Script.LangSysRecord:
langSys = langSysRecord.LangSysTag
f = []
for featureIndex in langSysRecord.LangSys.FeatureIndex:
if featureIndex not in kernFeatureIndexes:
continue
f.append(featureIndex)
if f:
thisScriptKernFeatureIndexes.append((langSys, f))
scriptKernFeatureIndexes[script] = thisScriptKernFeatureIndexes
scriptLookupIndexes = {}
for script, featureDefinitions in scriptKernFeatureIndexes.items():
lookupIndexes = scriptLookupIndexes[script] = []
for language, featureIndexes in featureDefinitions:
for featureIndex in featureIndexes:
featureRecord = gpos.FeatureList.FeatureRecord[featureIndex]
for lookupIndex in featureRecord.Feature.LookupListIndex:
if lookupIndex not in lookupIndexes:
lookupIndexes.append(lookupIndex)
return scriptLookupIndexes | Gather a mapping of script to lookup indexes
referenced by the kern feature for each script.
Returns a dictionary of this structure:
{
"latn" : [0],
"DFLT" : [0]
} |
def build(self, builder):
params = dict(
Namespace=self.namespace,
Name=self.name,
Value=self.value,
TransactionType=self.transaction_type,
)
builder.start("mdsol:Attribute", params)
builder.end("mdsol:Attribute") | Build XML by appending to builder |
def get_vnetwork_vswitches_input_last_rcvd_instance(self, **kwargs):
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
input = ET.SubElement(get_vnetwork_vswitches, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
def add_output_opt(self, opt, out):
self.add_opt(opt, out._dax_repr())
self._add_output(out) | Add an option that determines an output |
def histogram_equalize(self, use_bands, **kwargs):
data = self._read(self[use_bands,...], **kwargs)
data = np.rollaxis(data.astype(np.float32), 0, 3)
flattened = data.flatten()
if 0 in data:
masked = np.ma.masked_values(data, 0).compressed()
image_histogram, bin_edges = np.histogram(masked, 256)
else:
image_histogram, bin_edges = np.histogram(flattened, 256)
bins = (bin_edges[:-1] + bin_edges[1:]) / 2.0
cdf = image_histogram.cumsum()
cdf = cdf / float(cdf[-1])
image_equalized = np.interp(flattened, bins, cdf).reshape(data.shape)
if in kwargs or in kwargs:
return self._histogram_stretch(image_equalized, **kwargs)
else:
return image_equalized | Equalize and the histogram and normalize value range
Equalization is on all three bands, not per-band |
def require_axis(f):
@wraps(f)
def _wrapper(self, *args, **kwargs):
if None in (self.axis, self.sel_axis):
raise ValueError(
%
dict(func_name=f.__name__, node=repr(self)))
return f(self, *args, **kwargs)
return _wrapper | Check if the object of the function has axis and sel_axis members |
def set_feature_transform(self, mode=, degree=1):
if self.status != :
print("Please load train data first.")
return self.train_X
self.feature_transform_mode = mode
self.feature_transform_degree = degree
self.train_X = self.train_X[:, 1:]
self.train_X = utility.DatasetLoader.feature_transform(
self.train_X,
self.feature_transform_mode,
self.feature_transform_degree
)
return self.train_X | Transform data feature to high level |
def _parse_config_file(self, cfg_files):
cfg_handler = configparser.ConfigParser(interpolation=None)
if not cfg_handler.read(map(str, cfg_files)):
return
self._parse_global_section(cfg_handler)
self._parse_vcs_section(cfg_handler)
self._parse_file_section(cfg_handler) | Parse config file (ini) and set properties
:return: |
def query(self, expr, **kwargs):
columns = self.columns
def query_builder(df, **kwargs):
df = df.copy()
df.index = pandas.RangeIndex(len(df))
df.columns = columns
df.query(expr, inplace=True, **kwargs)
df.columns = pandas.RangeIndex(len(df.columns))
return df
func = self._prepare_method(query_builder, **kwargs)
new_data = self._map_across_full_axis(1, func)
new_index = self.compute_index(0, new_data, True)
return self.__constructor__(new_data, new_index, self.columns, self.dtypes) | Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied. |
def newton_refine_curve(curve, point, s, new_s):
if NO_IMAGES:
return
ax = curve.plot(256)
ax.plot(point[0, :], point[1, :], marker="H")
wrong_points = curve.evaluate_multi(np.asfortranarray([s, new_s]))
ax.plot(
wrong_points[0, [0]],
wrong_points[1, [0]],
color="black",
linestyle="None",
marker="o",
)
ax.plot(
wrong_points[0, [1]],
wrong_points[1, [1]],
color="black",
linestyle="None",
marker="o",
markeredgewidth=1,
markerfacecolor="None",
)
ax.axis("scaled")
ax.set_xlim(-0.125, 3.125)
ax.set_ylim(-0.125, 1.375)
save_image(ax.figure, "newton_refine_curve.png") | Image for :func:`._curve_helpers.newton_refine` docstring. |
def arg_bool(name, default=False):
v = request.args.get(name, )
if not len(v):
return default
return v in BOOL_TRUISH | Fetch a query argument, as a boolean. |
def readmol(path, as_string=False):
supported_formats = []
if "\r" in path and as_string:
path = path.replace(, )
for sformat in supported_formats:
obc = pybel.ob.OBConversion()
obc.SetInFormat(sformat)
write_message("Detected {} as format. Trying to read file with OpenBabel...\n".format(sformat), mtype=)
if as_string:
try:
mymol = pybel.readstring(sformat, path)
except IOError:
sysexit(4, )
else:
read_file = pybel.readfile(format=sformat, filename=path, opt={"s": None})
try:
mymol = next(read_file)
except StopIteration:
sysexit(4, )
write_message("Molecule successfully read.\n", mtype=)
mymol.OBMol.PerceiveBondOrders()
return mymol, sformat
sysexit(4, ) | Reads the given molecule file and returns the corresponding Pybel molecule as well as the input file type.
In contrast to the standard Pybel implementation, the file is closed properly. |
def _sync_content_metadata(self, serialized_data, http_method):
try:
status_code, response_body = getattr(self, + http_method)(
urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.course_api_path),
serialized_data,
self.CONTENT_PROVIDER_SCOPE
)
except requests.exceptions.RequestException as exc:
raise ClientError(
.format(
error=exc.__class__.__name__,
message=str(exc)
)
)
if status_code >= 400:
raise ClientError(
.format(
status_code=status_code,
message=response_body
)
) | Synchronize content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
http_method: The HTTP method to use for the API request.
Raises:
ClientError: If Degreed API request fails. |
def delete_suffixes(self, word):
length = len(word)
suffixes = [, , , , , , ,
, , , , , ,
, , , ]
for suffix in suffixes:
if word.endswith(suffix) and self.r2 <= (length - len(suffix)):
word = word[:-len(suffix)]
return word
if word.endswith() and self.r2 <= (length - 3):
if word[length - 4] in :
word = word[:-3]
return word | Delete some very common suffixes. |
def gen_checkbox_list(sig_dic):
view_zuoxiang =
dic_tmp = sig_dic[]
for key in dic_tmp.keys():
tmp_str = .format(key, sig_dic[], dic_tmp[key])
view_zuoxiang += tmp_str
view_zuoxiang +=
return view_zuoxiang | For generating List view HTML file for CHECKBOX.
for each item. |
def zeros(dur=None):
if dur is None or (isinf(dur) and dur > 0):
while True:
yield 0.0
for x in xrange(int(.5 + dur)):
yield 0.0 | Zeros/zeroes stream generator.
You may sum your endless stream by this to enforce an end to it.
Parameters
----------
dur :
Duration, in number of samples; endless if not given.
Returns
-------
Stream that repeats "0.0" during a given time duration (if any) or
endlessly. |
def independentlinear60(display=False):
old_seed = np.random.seed()
np.random.seed(0)
N = 1000
M = 60
beta = np.zeros(M)
beta[0:30:3] = 1
f = lambda X: np.matmul(X, beta)
X_start = np.random.randn(N, M)
X = X_start - X_start.mean(0)
y = f(X) + np.random.randn(N) * 1e-2
np.random.seed(old_seed)
return pd.DataFrame(X), y | A simulated dataset with tight correlations among distinct groups of features. |
def find_indices(lst, element):
result = []
offset = -1
while True:
try:
offset = lst.index(element, offset+1)
except ValueError:
return result
result.append(offset) | Returns the indices for all occurrences of 'element' in 'lst'.
Args:
lst (list): List to search.
element: Element to find.
Returns:
list: List of indices or values |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.