code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def load(self, *relations):
"""
Load a set of relationships onto the collection.
"""
if len(self.items) > 0:
query = self.first().new_query().with_(*relations)
self._set_items(query.eager_load_relations(self.items))
return self | Load a set of relationships onto the collection. | Below is the the instruction that describes the task:
### Input:
Load a set of relationships onto the collection.
### Response:
def load(self, *relations):
"""
Load a set of relationships onto the collection.
"""
if len(self.items) > 0:
query = self.first().new_query().with_(*relations)
self._set_items(query.eager_load_relations(self.items))
return self |
def merge_graphs(self, other_docgraph, verbose=False):
"""
Merges another document graph into the current one, thereby adding all
the necessary nodes and edges (with attributes, layers etc.).
NOTE: This will only work if both graphs have exactly the same
tokenization.
"""
# keep track of all merged/old root nodes in case we need to
# delete them or their attributes (e.g. 'metadata')
if hasattr(self, 'merged_rootnodes'):
self.merged_rootnodes.append(other_docgraph.root)
else:
self.merged_rootnodes = [other_docgraph.root]
# renaming the tokens of the other graph to match this one
rename_tokens(other_docgraph, self, verbose=verbose)
self.add_nodes_from(other_docgraph.nodes(data=True))
# copy token node attributes to the current namespace
for node_id, node_attrs in other_docgraph.nodes(data=True):
if istoken(other_docgraph, node_id) and \
self.ns+':token' not in self.node[node_id]:
self.node[node_id].update({self.ns+':token': other_docgraph.get_token(node_id)})
self.add_edges_from(other_docgraph.edges(data=True))
# workaround for issues #89 and #96
# copy the token node IDs / sentence node IDs from the other graph,
# if this graph doesn't have such lists, yet
if other_docgraph.name and not self.name:
self.name = other_docgraph.name
if other_docgraph.tokens and not self.tokens:
self.tokens = other_docgraph.tokens
if other_docgraph.sentences and not self.sentences:
self.sentences = other_docgraph.sentences
# there should be no dangling, unused root nodes in a merged graph
self.merge_rootnodes(other_docgraph) | Merges another document graph into the current one, thereby adding all
the necessary nodes and edges (with attributes, layers etc.).
NOTE: This will only work if both graphs have exactly the same
tokenization. | Below is the the instruction that describes the task:
### Input:
Merges another document graph into the current one, thereby adding all
the necessary nodes and edges (with attributes, layers etc.).
NOTE: This will only work if both graphs have exactly the same
tokenization.
### Response:
def merge_graphs(self, other_docgraph, verbose=False):
"""
Merges another document graph into the current one, thereby adding all
the necessary nodes and edges (with attributes, layers etc.).
NOTE: This will only work if both graphs have exactly the same
tokenization.
"""
# keep track of all merged/old root nodes in case we need to
# delete them or their attributes (e.g. 'metadata')
if hasattr(self, 'merged_rootnodes'):
self.merged_rootnodes.append(other_docgraph.root)
else:
self.merged_rootnodes = [other_docgraph.root]
# renaming the tokens of the other graph to match this one
rename_tokens(other_docgraph, self, verbose=verbose)
self.add_nodes_from(other_docgraph.nodes(data=True))
# copy token node attributes to the current namespace
for node_id, node_attrs in other_docgraph.nodes(data=True):
if istoken(other_docgraph, node_id) and \
self.ns+':token' not in self.node[node_id]:
self.node[node_id].update({self.ns+':token': other_docgraph.get_token(node_id)})
self.add_edges_from(other_docgraph.edges(data=True))
# workaround for issues #89 and #96
# copy the token node IDs / sentence node IDs from the other graph,
# if this graph doesn't have such lists, yet
if other_docgraph.name and not self.name:
self.name = other_docgraph.name
if other_docgraph.tokens and not self.tokens:
self.tokens = other_docgraph.tokens
if other_docgraph.sentences and not self.sentences:
self.sentences = other_docgraph.sentences
# there should be no dangling, unused root nodes in a merged graph
self.merge_rootnodes(other_docgraph) |
def consecutive_ones_property(sets, universe=None):
""" Check the consecutive ones property.
:param list sets: is a list of subsets of the ground set.
:param groundset: is the set of all elements,
by default it is the union of the given sets
:returns: returns a list of the ordered ground set where
every given set is consecutive,
or None if there is no solution.
:complexity: O(len(groundset) * len(sets))
:disclaimer: an optimal implementation would have complexity
O(len(groundset) + len(sets) + sum(map(len,sets))),
and there are more recent easier algorithms for this problem.
"""
if universe is None:
universe = set()
for S in sets:
universe |= set(S)
tree = PQ_tree(universe)
try:
for S in sets:
tree.reduce(S)
return tree.border()
except IsNotC1P:
return None | Check the consecutive ones property.
:param list sets: is a list of subsets of the ground set.
:param groundset: is the set of all elements,
by default it is the union of the given sets
:returns: returns a list of the ordered ground set where
every given set is consecutive,
or None if there is no solution.
:complexity: O(len(groundset) * len(sets))
:disclaimer: an optimal implementation would have complexity
O(len(groundset) + len(sets) + sum(map(len,sets))),
and there are more recent easier algorithms for this problem. | Below is the the instruction that describes the task:
### Input:
Check the consecutive ones property.
:param list sets: is a list of subsets of the ground set.
:param groundset: is the set of all elements,
by default it is the union of the given sets
:returns: returns a list of the ordered ground set where
every given set is consecutive,
or None if there is no solution.
:complexity: O(len(groundset) * len(sets))
:disclaimer: an optimal implementation would have complexity
O(len(groundset) + len(sets) + sum(map(len,sets))),
and there are more recent easier algorithms for this problem.
### Response:
def consecutive_ones_property(sets, universe=None):
""" Check the consecutive ones property.
:param list sets: is a list of subsets of the ground set.
:param groundset: is the set of all elements,
by default it is the union of the given sets
:returns: returns a list of the ordered ground set where
every given set is consecutive,
or None if there is no solution.
:complexity: O(len(groundset) * len(sets))
:disclaimer: an optimal implementation would have complexity
O(len(groundset) + len(sets) + sum(map(len,sets))),
and there are more recent easier algorithms for this problem.
"""
if universe is None:
universe = set()
for S in sets:
universe |= set(S)
tree = PQ_tree(universe)
try:
for S in sets:
tree.reduce(S)
return tree.border()
except IsNotC1P:
return None |
def mkvirtualenv():
"""
Create the virtualenv project environment
"""
root = '/'.join([deployment_root(),'env'])
path = '/'.join([root,env.project_fullname])
dirs_created = []
if env.verbosity:
print env.host,'CREATING VIRTUALENV', path
if not exists(root): dirs_created += mkdirs(root)
with cd(root):
run(' '.join(["virtualenv",env.project_fullname]))
with cd(path):
dirs_created += mkdirs('egg_cache')
sudo('chown -R %s:www-data egg_cache'% env.user)
sudo('chmod -R g+w egg_cache')
run(''.join(["echo 'cd ",path,'/','project','/',env.project_package_name,'/sitesettings',"' > bin/postactivate"]))
sudo('chmod ugo+rwx bin/postactivate')
#Create a state
out = State(' '.join([env.host,'virtualenv',path,'created']))
out.object = dirs_created + ['bin','lib','include']
out.failed = False
return out | Create the virtualenv project environment | Below is the the instruction that describes the task:
### Input:
Create the virtualenv project environment
### Response:
def mkvirtualenv():
"""
Create the virtualenv project environment
"""
root = '/'.join([deployment_root(),'env'])
path = '/'.join([root,env.project_fullname])
dirs_created = []
if env.verbosity:
print env.host,'CREATING VIRTUALENV', path
if not exists(root): dirs_created += mkdirs(root)
with cd(root):
run(' '.join(["virtualenv",env.project_fullname]))
with cd(path):
dirs_created += mkdirs('egg_cache')
sudo('chown -R %s:www-data egg_cache'% env.user)
sudo('chmod -R g+w egg_cache')
run(''.join(["echo 'cd ",path,'/','project','/',env.project_package_name,'/sitesettings',"' > bin/postactivate"]))
sudo('chmod ugo+rwx bin/postactivate')
#Create a state
out = State(' '.join([env.host,'virtualenv',path,'created']))
out.object = dirs_created + ['bin','lib','include']
out.failed = False
return out |
def print_dictionary(self, d, h, n, nl=False):
"""Print complex using the specified indent (n) and newline (nl)."""
if d in h:
return "{}..."
h.append(d)
s = []
if nl:
s.append("\n")
s.append(self.indent(n))
s.append("{")
for item in d.items():
s.append("\n")
s.append(self.indent(n+1))
if isinstance(item[1], (list,tuple)):
s.append(tostr(item[0]))
s.append("[]")
else:
s.append(tostr(item[0]))
s.append(" = ")
s.append(self.process(item[1], h, n, True))
s.append("\n")
s.append(self.indent(n))
s.append("}")
h.pop()
return "".join(s) | Print complex using the specified indent (n) and newline (nl). | Below is the the instruction that describes the task:
### Input:
Print complex using the specified indent (n) and newline (nl).
### Response:
def print_dictionary(self, d, h, n, nl=False):
"""Print complex using the specified indent (n) and newline (nl)."""
if d in h:
return "{}..."
h.append(d)
s = []
if nl:
s.append("\n")
s.append(self.indent(n))
s.append("{")
for item in d.items():
s.append("\n")
s.append(self.indent(n+1))
if isinstance(item[1], (list,tuple)):
s.append(tostr(item[0]))
s.append("[]")
else:
s.append(tostr(item[0]))
s.append(" = ")
s.append(self.process(item[1], h, n, True))
s.append("\n")
s.append(self.indent(n))
s.append("}")
h.pop()
return "".join(s) |
def save_predefined(self, predefined, client=None):
"""Save this ACL for the current bucket using a predefined ACL.
If :attr:`user_project` is set, bills the API request to that project.
:type predefined: str
:param predefined: An identifier for a predefined ACL. Must be one
of the keys in :attr:`PREDEFINED_JSON_ACLS`
or :attr:`PREDEFINED_XML_ACLS` (which will be
aliased to the corresponding JSON name).
If passed, `acl` must be None.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
"""
predefined = self.validate_predefined(predefined)
self._save(None, predefined, client) | Save this ACL for the current bucket using a predefined ACL.
If :attr:`user_project` is set, bills the API request to that project.
:type predefined: str
:param predefined: An identifier for a predefined ACL. Must be one
of the keys in :attr:`PREDEFINED_JSON_ACLS`
or :attr:`PREDEFINED_XML_ACLS` (which will be
aliased to the corresponding JSON name).
If passed, `acl` must be None.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent. | Below is the the instruction that describes the task:
### Input:
Save this ACL for the current bucket using a predefined ACL.
If :attr:`user_project` is set, bills the API request to that project.
:type predefined: str
:param predefined: An identifier for a predefined ACL. Must be one
of the keys in :attr:`PREDEFINED_JSON_ACLS`
or :attr:`PREDEFINED_XML_ACLS` (which will be
aliased to the corresponding JSON name).
If passed, `acl` must be None.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
### Response:
def save_predefined(self, predefined, client=None):
"""Save this ACL for the current bucket using a predefined ACL.
If :attr:`user_project` is set, bills the API request to that project.
:type predefined: str
:param predefined: An identifier for a predefined ACL. Must be one
of the keys in :attr:`PREDEFINED_JSON_ACLS`
or :attr:`PREDEFINED_XML_ACLS` (which will be
aliased to the corresponding JSON name).
If passed, `acl` must be None.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
"""
predefined = self.validate_predefined(predefined)
self._save(None, predefined, client) |
def from_sample_rate(sample_rate, n_bands, always_even=False):
"""
Return a :class:`~zounds.spectral.LinearScale` instance whose upper
frequency bound is informed by the nyquist frequency of the sample rate.
Args:
sample_rate (SamplingRate): the sample rate whose nyquist frequency
will serve as the upper frequency bound of this scale
n_bands (int): the number of evenly-spaced frequency bands
"""
fb = FrequencyBand(0, sample_rate.nyquist)
return LinearScale(fb, n_bands, always_even=always_even) | Return a :class:`~zounds.spectral.LinearScale` instance whose upper
frequency bound is informed by the nyquist frequency of the sample rate.
Args:
sample_rate (SamplingRate): the sample rate whose nyquist frequency
will serve as the upper frequency bound of this scale
n_bands (int): the number of evenly-spaced frequency bands | Below is the the instruction that describes the task:
### Input:
Return a :class:`~zounds.spectral.LinearScale` instance whose upper
frequency bound is informed by the nyquist frequency of the sample rate.
Args:
sample_rate (SamplingRate): the sample rate whose nyquist frequency
will serve as the upper frequency bound of this scale
n_bands (int): the number of evenly-spaced frequency bands
### Response:
def from_sample_rate(sample_rate, n_bands, always_even=False):
"""
Return a :class:`~zounds.spectral.LinearScale` instance whose upper
frequency bound is informed by the nyquist frequency of the sample rate.
Args:
sample_rate (SamplingRate): the sample rate whose nyquist frequency
will serve as the upper frequency bound of this scale
n_bands (int): the number of evenly-spaced frequency bands
"""
fb = FrequencyBand(0, sample_rate.nyquist)
return LinearScale(fb, n_bands, always_even=always_even) |
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.pop("stream", six.StringIO())
self.selected_fields = options.pop("fields", None)
self.use_natural_keys = options.pop("use_natural_keys", False)
if self.use_natural_keys and RemovedInDjango19Warning is not None:
warnings.warn("``use_natural_keys`` is deprecated; use ``use_natural_foreign_keys`` instead.",
RemovedInDjango19Warning)
self.use_natural_foreign_keys = options.pop('use_natural_foreign_keys', False) or self.use_natural_keys
self.use_natural_primary_keys = options.pop('use_natural_primary_keys', False)
self.start_serialization()
self.first = True
for obj in queryset:
self.start_object(obj)
# Use the concrete parent class' _meta instead of the object's _meta
# This is to avoid local_fields problems for proxy models. Refs #17717.
concrete_model = obj._meta.concrete_model
# only one change local_fields -> fields for supporting nested models
for field in concrete_model._meta.fields:
if field.serialize:
if field.remote_field is None:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_field(obj, field)
else:
if self.selected_fields is None or field.attname[:-3] in self.selected_fields:
self.handle_fk_field(obj, field)
for field in concrete_model._meta.many_to_many:
if field.serialize:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_m2m_field(obj, field)
self.end_object(obj)
if self.first:
self.first = False
self.end_serialization()
return self.getvalue() | Serialize a queryset. | Below is the the instruction that describes the task:
### Input:
Serialize a queryset.
### Response:
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.pop("stream", six.StringIO())
self.selected_fields = options.pop("fields", None)
self.use_natural_keys = options.pop("use_natural_keys", False)
if self.use_natural_keys and RemovedInDjango19Warning is not None:
warnings.warn("``use_natural_keys`` is deprecated; use ``use_natural_foreign_keys`` instead.",
RemovedInDjango19Warning)
self.use_natural_foreign_keys = options.pop('use_natural_foreign_keys', False) or self.use_natural_keys
self.use_natural_primary_keys = options.pop('use_natural_primary_keys', False)
self.start_serialization()
self.first = True
for obj in queryset:
self.start_object(obj)
# Use the concrete parent class' _meta instead of the object's _meta
# This is to avoid local_fields problems for proxy models. Refs #17717.
concrete_model = obj._meta.concrete_model
# only one change local_fields -> fields for supporting nested models
for field in concrete_model._meta.fields:
if field.serialize:
if field.remote_field is None:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_field(obj, field)
else:
if self.selected_fields is None or field.attname[:-3] in self.selected_fields:
self.handle_fk_field(obj, field)
for field in concrete_model._meta.many_to_many:
if field.serialize:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_m2m_field(obj, field)
self.end_object(obj)
if self.first:
self.first = False
self.end_serialization()
return self.getvalue() |
def from_image(cls, image):
"""
Create a PrintableImage from a PIL Image
:param image: a PIL Image
:return:
"""
(w, h) = image.size
# Thermal paper is 512 pixels wide
if w > 512:
ratio = 512. / w
h = int(h * ratio)
image = image.resize((512, h), Image.ANTIALIAS)
if image.mode != '1':
image = image.convert('1')
pixels = np.array(list(image.getdata())).reshape(h, w)
# Add white pixels so that image fits into bytes
extra_rows = int(math.ceil(h / 24)) * 24 - h
extra_pixels = np.ones((extra_rows, w), dtype=bool)
pixels = np.vstack((pixels, extra_pixels))
h += extra_rows
nb_stripes = h / 24
pixels = pixels.reshape(nb_stripes, 24, w).swapaxes(1, 2).reshape(-1, 8)
nh = int(w / 256)
nl = w % 256
data = []
pixels = np.invert(np.packbits(pixels))
stripes = np.split(pixels, nb_stripes)
for stripe in stripes:
data.extend([
ESC,
42, # *
33, # double density mode
nl,
nh])
data.extend(stripe)
data.extend([
27, # ESC
74, # J
48])
# account for double density mode
height = h * 2
return cls(data, height) | Create a PrintableImage from a PIL Image
:param image: a PIL Image
:return: | Below is the the instruction that describes the task:
### Input:
Create a PrintableImage from a PIL Image
:param image: a PIL Image
:return:
### Response:
def from_image(cls, image):
"""
Create a PrintableImage from a PIL Image
:param image: a PIL Image
:return:
"""
(w, h) = image.size
# Thermal paper is 512 pixels wide
if w > 512:
ratio = 512. / w
h = int(h * ratio)
image = image.resize((512, h), Image.ANTIALIAS)
if image.mode != '1':
image = image.convert('1')
pixels = np.array(list(image.getdata())).reshape(h, w)
# Add white pixels so that image fits into bytes
extra_rows = int(math.ceil(h / 24)) * 24 - h
extra_pixels = np.ones((extra_rows, w), dtype=bool)
pixels = np.vstack((pixels, extra_pixels))
h += extra_rows
nb_stripes = h / 24
pixels = pixels.reshape(nb_stripes, 24, w).swapaxes(1, 2).reshape(-1, 8)
nh = int(w / 256)
nl = w % 256
data = []
pixels = np.invert(np.packbits(pixels))
stripes = np.split(pixels, nb_stripes)
for stripe in stripes:
data.extend([
ESC,
42, # *
33, # double density mode
nl,
nh])
data.extend(stripe)
data.extend([
27, # ESC
74, # J
48])
# account for double density mode
height = h * 2
return cls(data, height) |
def _maybe_localize_point(ts, is_none, is_not_none, freq, tz):
"""
Localize a start or end Timestamp to the timezone of the corresponding
start or end Timestamp
Parameters
----------
ts : start or end Timestamp to potentially localize
is_none : argument that should be None
is_not_none : argument that should not be None
freq : Tick, DateOffset, or None
tz : str, timezone object or None
Returns
-------
ts : Timestamp
"""
# Make sure start and end are timezone localized if:
# 1) freq = a Timedelta-like frequency (Tick)
# 2) freq = None i.e. generating a linspaced range
if isinstance(freq, Tick) or freq is None:
localize_args = {'tz': tz, 'ambiguous': False}
else:
localize_args = {'tz': None}
if is_none is None and is_not_none is not None:
ts = ts.tz_localize(**localize_args)
return ts | Localize a start or end Timestamp to the timezone of the corresponding
start or end Timestamp
Parameters
----------
ts : start or end Timestamp to potentially localize
is_none : argument that should be None
is_not_none : argument that should not be None
freq : Tick, DateOffset, or None
tz : str, timezone object or None
Returns
-------
ts : Timestamp | Below is the the instruction that describes the task:
### Input:
Localize a start or end Timestamp to the timezone of the corresponding
start or end Timestamp
Parameters
----------
ts : start or end Timestamp to potentially localize
is_none : argument that should be None
is_not_none : argument that should not be None
freq : Tick, DateOffset, or None
tz : str, timezone object or None
Returns
-------
ts : Timestamp
### Response:
def _maybe_localize_point(ts, is_none, is_not_none, freq, tz):
"""
Localize a start or end Timestamp to the timezone of the corresponding
start or end Timestamp
Parameters
----------
ts : start or end Timestamp to potentially localize
is_none : argument that should be None
is_not_none : argument that should not be None
freq : Tick, DateOffset, or None
tz : str, timezone object or None
Returns
-------
ts : Timestamp
"""
# Make sure start and end are timezone localized if:
# 1) freq = a Timedelta-like frequency (Tick)
# 2) freq = None i.e. generating a linspaced range
if isinstance(freq, Tick) or freq is None:
localize_args = {'tz': tz, 'ambiguous': False}
else:
localize_args = {'tz': None}
if is_none is None and is_not_none is not None:
ts = ts.tz_localize(**localize_args)
return ts |
def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""Routine to download Kp index data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
Returns
--------
Void : (NoneType)
data downloaded to disk, if available.
Notes
-----
Called by pysat. Not intended for direct use by user.
"""
import ftplib
from ftplib import FTP
import sys
ftp = FTP('ftp.gfz-potsdam.de') # connect to host, default port
ftp.login() # user anonymous, passwd anonymous@
ftp.cwd('/pub/home/obs/kp-ap/tab')
for date in date_array:
fname = 'kp{year:02d}{month:02d}.tab'
fname = fname.format(year=(date.year - date.year//100*100), month=date.month)
local_fname = fname
saved_fname = os.path.join(data_path,local_fname)
try:
print('Downloading file for '+date.strftime('%D'))
sys.stdout.flush()
ftp.retrbinary('RETR '+fname, open(saved_fname,'wb').write)
except ftplib.error_perm as exception:
# if exception[0][0:3] != '550':
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise
else:
os.remove(saved_fname)
print('File not available for '+date.strftime('%D'))
ftp.close()
return | Routine to download Kp index data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
Returns
--------
Void : (NoneType)
data downloaded to disk, if available.
Notes
-----
Called by pysat. Not intended for direct use by user. | Below is the the instruction that describes the task:
### Input:
Routine to download Kp index data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
Returns
--------
Void : (NoneType)
data downloaded to disk, if available.
Notes
-----
Called by pysat. Not intended for direct use by user.
### Response:
def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""Routine to download Kp index data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
Returns
--------
Void : (NoneType)
data downloaded to disk, if available.
Notes
-----
Called by pysat. Not intended for direct use by user.
"""
import ftplib
from ftplib import FTP
import sys
ftp = FTP('ftp.gfz-potsdam.de') # connect to host, default port
ftp.login() # user anonymous, passwd anonymous@
ftp.cwd('/pub/home/obs/kp-ap/tab')
for date in date_array:
fname = 'kp{year:02d}{month:02d}.tab'
fname = fname.format(year=(date.year - date.year//100*100), month=date.month)
local_fname = fname
saved_fname = os.path.join(data_path,local_fname)
try:
print('Downloading file for '+date.strftime('%D'))
sys.stdout.flush()
ftp.retrbinary('RETR '+fname, open(saved_fname,'wb').write)
except ftplib.error_perm as exception:
# if exception[0][0:3] != '550':
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise
else:
os.remove(saved_fname)
print('File not available for '+date.strftime('%D'))
ftp.close()
return |
def on_set(self, key, value):
"""Callback called on successful set. Uses function from __init__."""
if self._on_set is not None:
self._on_set(key, value) | Callback called on successful set. Uses function from __init__. | Below is the the instruction that describes the task:
### Input:
Callback called on successful set. Uses function from __init__.
### Response:
def on_set(self, key, value):
"""Callback called on successful set. Uses function from __init__."""
if self._on_set is not None:
self._on_set(key, value) |
def __send_handle_get_request(self, handle, indices=None):
'''
Send a HTTP GET request to the handle server to read either an entire
handle or to some specified values from a handle record, using the
requests module.
:param handle: The handle.
:param indices: Optional. A list of indices to delete. Defaults to
None (i.e. the entire handle is deleted.). The list can contain
integers or strings.
:return: The server's response.
'''
resp = self.__handlesystemconnector.send_handle_get_request(handle, indices)
return resp | Send a HTTP GET request to the handle server to read either an entire
handle or to some specified values from a handle record, using the
requests module.
:param handle: The handle.
:param indices: Optional. A list of indices to delete. Defaults to
None (i.e. the entire handle is deleted.). The list can contain
integers or strings.
:return: The server's response. | Below is the the instruction that describes the task:
### Input:
Send a HTTP GET request to the handle server to read either an entire
handle or to some specified values from a handle record, using the
requests module.
:param handle: The handle.
:param indices: Optional. A list of indices to delete. Defaults to
None (i.e. the entire handle is deleted.). The list can contain
integers or strings.
:return: The server's response.
### Response:
def __send_handle_get_request(self, handle, indices=None):
'''
Send a HTTP GET request to the handle server to read either an entire
handle or to some specified values from a handle record, using the
requests module.
:param handle: The handle.
:param indices: Optional. A list of indices to delete. Defaults to
None (i.e. the entire handle is deleted.). The list can contain
integers or strings.
:return: The server's response.
'''
resp = self.__handlesystemconnector.send_handle_get_request(handle, indices)
return resp |
def b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True,
periodic=True, verbose=True):
"""
tool to generate b-spline basis using vectorized De Boor recursion
the basis functions extrapolate linearly past the end-knots.
Parameters
----------
x : array-like, with ndims == 1.
edge_knots : array-like contaning locations of the 2 edge knots.
n_splines : int. number of splines to generate. must be >= spline_order+1
default: 20
spline_order : int. order of spline basis to create
default: 3
sparse : boolean. whether to return a sparse basis matrix or not.
default: True
verbose : bool, default: True
whether to print warnings
Returns
-------
basis : sparse csc matrix or array containing b-spline basis functions
with shape (len(x), n_splines)
"""
if np.ravel(x).ndim != 1:
raise ValueError('Data must be 1-D, but found {}'\
.format(np.ravel(x).ndim))
if (n_splines < 1) or not isinstance(n_splines, numbers.Integral):
raise ValueError('n_splines must be int >= 1')
if (spline_order < 0) or not isinstance(spline_order, numbers.Integral):
raise ValueError('spline_order must be int >= 1')
if n_splines < spline_order + 1:
raise ValueError('n_splines must be >= spline_order + 1. '\
'found: n_splines = {} and spline_order = {}'\
.format(n_splines, spline_order))
if n_splines == 0 and verbose:
warnings.warn('Requested 1 spline. This is equivalent to '\
'fitting an intercept', stacklevel=2)
n_splines += spline_order * periodic
# rescale edge_knots to [0,1], and generate boundary knots
edge_knots = np.sort(deepcopy(edge_knots))
offset = edge_knots[0]
scale = edge_knots[-1] - edge_knots[0]
if scale == 0:
scale = 1
boundary_knots = np.linspace(0, 1, 1 + n_splines - spline_order)
diff = np.diff(boundary_knots[:2])[0]
# rescale x as well
x = (np.ravel(deepcopy(x)) - offset) / scale
# wrap periodic values
if periodic:
x = x % (1 + 1e-9)
# append 0 and 1 in order to get derivatives for extrapolation
x = np.r_[x, 0., 1.]
# determine extrapolation indices
x_extrapolte_l = (x < 0)
x_extrapolte_r = (x > 1)
x_interpolate = ~(x_extrapolte_r + x_extrapolte_l)
# formatting
x = np.atleast_2d(x).T
n = len(x)
# augment knots
aug = np.arange(1, spline_order + 1) * diff
aug_knots = np.r_[-aug[::-1],
boundary_knots,
1 + aug]
aug_knots[-1] += 1e-9 # want last knot inclusive
# prepare Haar Basis
bases = (x >= aug_knots[:-1]).astype(np.int) * \
(x < aug_knots[1:]).astype(np.int)
bases[-1] = bases[-2][::-1] # force symmetric bases at 0 and 1
# do recursion from Hastie et al. vectorized
maxi = len(aug_knots) - 1
for m in range(2, spline_order + 2):
maxi -= 1
# left sub-basis
num = (x - aug_knots[:maxi])
num *= bases[:, :maxi]
denom = aug_knots[m-1 : maxi+m-1] - aug_knots[:maxi]
left = num/denom
# right sub-basis
num = (aug_knots[m : maxi+m] - x) * bases[:, 1:maxi+1]
denom = aug_knots[m:maxi+m] - aug_knots[1 : maxi+1]
right = num/denom
# track previous bases and update
prev_bases = bases[-2:]
bases = left + right
if periodic and spline_order > 0:
# make spline domain periodic
bases[:, :spline_order] = np.max([bases[:, :spline_order],
bases[:, -spline_order:]],
axis=0)
# remove extra splines used only for ensuring correct domain
bases = bases[:, :-spline_order]
# extrapolate
# since we have repeated end-knots, only the last 2 basis functions are
# non-zero at the end-knots, and they have equal and opposite gradient.
if (any(x_extrapolte_r) or any(x_extrapolte_l)) and spline_order>0:
bases[~x_interpolate] = 0.
denom = (aug_knots[spline_order:-1] - aug_knots[: -spline_order - 1])
left = prev_bases[:, :-1] / denom
denom = (aug_knots[spline_order+1:] - aug_knots[1: -spline_order])
right = prev_bases[:, 1:] / denom
grads = (spline_order) * (left - right)
if any(x_extrapolte_l):
val = grads[0] * x[x_extrapolte_l] + bases[-2]
bases[x_extrapolte_l] = val
if any(x_extrapolte_r):
val = grads[1] * (x[x_extrapolte_r] - 1) + bases[-1]
bases[x_extrapolte_r] = val
# get rid of the added values at 0, and 1
bases = bases[:-2]
if sparse:
return sp.sparse.csc_matrix(bases)
return bases | tool to generate b-spline basis using vectorized De Boor recursion
the basis functions extrapolate linearly past the end-knots.
Parameters
----------
x : array-like, with ndims == 1.
edge_knots : array-like contaning locations of the 2 edge knots.
n_splines : int. number of splines to generate. must be >= spline_order+1
default: 20
spline_order : int. order of spline basis to create
default: 3
sparse : boolean. whether to return a sparse basis matrix or not.
default: True
verbose : bool, default: True
whether to print warnings
Returns
-------
basis : sparse csc matrix or array containing b-spline basis functions
with shape (len(x), n_splines) | Below is the the instruction that describes the task:
### Input:
tool to generate b-spline basis using vectorized De Boor recursion
the basis functions extrapolate linearly past the end-knots.
Parameters
----------
x : array-like, with ndims == 1.
edge_knots : array-like contaning locations of the 2 edge knots.
n_splines : int. number of splines to generate. must be >= spline_order+1
default: 20
spline_order : int. order of spline basis to create
default: 3
sparse : boolean. whether to return a sparse basis matrix or not.
default: True
verbose : bool, default: True
whether to print warnings
Returns
-------
basis : sparse csc matrix or array containing b-spline basis functions
with shape (len(x), n_splines)
### Response:
def b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True,
periodic=True, verbose=True):
"""
tool to generate b-spline basis using vectorized De Boor recursion
the basis functions extrapolate linearly past the end-knots.
Parameters
----------
x : array-like, with ndims == 1.
edge_knots : array-like contaning locations of the 2 edge knots.
n_splines : int. number of splines to generate. must be >= spline_order+1
default: 20
spline_order : int. order of spline basis to create
default: 3
sparse : boolean. whether to return a sparse basis matrix or not.
default: True
verbose : bool, default: True
whether to print warnings
Returns
-------
basis : sparse csc matrix or array containing b-spline basis functions
with shape (len(x), n_splines)
"""
if np.ravel(x).ndim != 1:
raise ValueError('Data must be 1-D, but found {}'\
.format(np.ravel(x).ndim))
if (n_splines < 1) or not isinstance(n_splines, numbers.Integral):
raise ValueError('n_splines must be int >= 1')
if (spline_order < 0) or not isinstance(spline_order, numbers.Integral):
raise ValueError('spline_order must be int >= 1')
if n_splines < spline_order + 1:
raise ValueError('n_splines must be >= spline_order + 1. '\
'found: n_splines = {} and spline_order = {}'\
.format(n_splines, spline_order))
if n_splines == 0 and verbose:
warnings.warn('Requested 1 spline. This is equivalent to '\
'fitting an intercept', stacklevel=2)
n_splines += spline_order * periodic
# rescale edge_knots to [0,1], and generate boundary knots
edge_knots = np.sort(deepcopy(edge_knots))
offset = edge_knots[0]
scale = edge_knots[-1] - edge_knots[0]
if scale == 0:
scale = 1
boundary_knots = np.linspace(0, 1, 1 + n_splines - spline_order)
diff = np.diff(boundary_knots[:2])[0]
# rescale x as well
x = (np.ravel(deepcopy(x)) - offset) / scale
# wrap periodic values
if periodic:
x = x % (1 + 1e-9)
# append 0 and 1 in order to get derivatives for extrapolation
x = np.r_[x, 0., 1.]
# determine extrapolation indices
x_extrapolte_l = (x < 0)
x_extrapolte_r = (x > 1)
x_interpolate = ~(x_extrapolte_r + x_extrapolte_l)
# formatting
x = np.atleast_2d(x).T
n = len(x)
# augment knots
aug = np.arange(1, spline_order + 1) * diff
aug_knots = np.r_[-aug[::-1],
boundary_knots,
1 + aug]
aug_knots[-1] += 1e-9 # want last knot inclusive
# prepare Haar Basis
bases = (x >= aug_knots[:-1]).astype(np.int) * \
(x < aug_knots[1:]).astype(np.int)
bases[-1] = bases[-2][::-1] # force symmetric bases at 0 and 1
# do recursion from Hastie et al. vectorized
maxi = len(aug_knots) - 1
for m in range(2, spline_order + 2):
maxi -= 1
# left sub-basis
num = (x - aug_knots[:maxi])
num *= bases[:, :maxi]
denom = aug_knots[m-1 : maxi+m-1] - aug_knots[:maxi]
left = num/denom
# right sub-basis
num = (aug_knots[m : maxi+m] - x) * bases[:, 1:maxi+1]
denom = aug_knots[m:maxi+m] - aug_knots[1 : maxi+1]
right = num/denom
# track previous bases and update
prev_bases = bases[-2:]
bases = left + right
if periodic and spline_order > 0:
# make spline domain periodic
bases[:, :spline_order] = np.max([bases[:, :spline_order],
bases[:, -spline_order:]],
axis=0)
# remove extra splines used only for ensuring correct domain
bases = bases[:, :-spline_order]
# extrapolate
# since we have repeated end-knots, only the last 2 basis functions are
# non-zero at the end-knots, and they have equal and opposite gradient.
if (any(x_extrapolte_r) or any(x_extrapolte_l)) and spline_order>0:
bases[~x_interpolate] = 0.
denom = (aug_knots[spline_order:-1] - aug_knots[: -spline_order - 1])
left = prev_bases[:, :-1] / denom
denom = (aug_knots[spline_order+1:] - aug_knots[1: -spline_order])
right = prev_bases[:, 1:] / denom
grads = (spline_order) * (left - right)
if any(x_extrapolte_l):
val = grads[0] * x[x_extrapolte_l] + bases[-2]
bases[x_extrapolte_l] = val
if any(x_extrapolte_r):
val = grads[1] * (x[x_extrapolte_r] - 1) + bases[-1]
bases[x_extrapolte_r] = val
# get rid of the added values at 0, and 1
bases = bases[:-2]
if sparse:
return sp.sparse.csc_matrix(bases)
return bases |
def _next(self, possible_solution):
"""Where the magic happens. Produces a generator that returns all solutions given
a base solution to start searching.
"""
# bail out if we have seen it already. See __iter__ to where seen is initially set.
# A complete solution has all its variables set to a particular value.
is_complete = (len(possible_solution) == len(self._vars))
if is_complete:
self._solutions_seen += 1
if self.satisfies_constraints(possible_solution):
yield dict(possible_solution)
else:
if self.is_feasible(possible_solution):
for s in self.derived_solutions(possible_solution):
for solution in self._next(s):
yield solution | Where the magic happens. Produces a generator that returns all solutions given
a base solution to start searching. | Below is the the instruction that describes the task:
### Input:
Where the magic happens. Produces a generator that returns all solutions given
a base solution to start searching.
### Response:
def _next(self, possible_solution):
"""Where the magic happens. Produces a generator that returns all solutions given
a base solution to start searching.
"""
# bail out if we have seen it already. See __iter__ to where seen is initially set.
# A complete solution has all its variables set to a particular value.
is_complete = (len(possible_solution) == len(self._vars))
if is_complete:
self._solutions_seen += 1
if self.satisfies_constraints(possible_solution):
yield dict(possible_solution)
else:
if self.is_feasible(possible_solution):
for s in self.derived_solutions(possible_solution):
for solution in self._next(s):
yield solution |
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the
content type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
"""
def on_update(d):
self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get("content-type", ""))[1]
return CallbackDict(d, on_update) | The mimetype parameters as dict. For example if the
content type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5 | Below is the the instruction that describes the task:
### Input:
The mimetype parameters as dict. For example if the
content type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
### Response:
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the
content type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
"""
def on_update(d):
self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get("content-type", ""))[1]
return CallbackDict(d, on_update) |
def set_is_polling(polling, host=None, core_name=None):
'''
SLAVE CALL
Prevent the slaves from polling the master for updates.
polling : boolean
True will enable polling. False will disable it.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.set_is_polling False
'''
ret = _get_return_dict()
# since only slaves can call this let's check the config:
if _is_master() and _get_none_or_value(host) is None:
err = ['solr.set_is_polling can only be called by "slave" minions']
return ret.update({'success': False, 'errors': err})
cmd = "enablepoll" if polling else "disapblepoll"
if _get_none_or_value(core_name) is None and _check_for_cores():
success = True
for name in __opts__['solr.cores']:
resp = set_is_polling(cmd, host=host, core_name=name)
if not resp['success']:
success = False
data = {name: {'data': resp['data']}}
ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
return ret
else:
resp = _replication_request(cmd, host=host, core_name=core_name)
return resp | SLAVE CALL
Prevent the slaves from polling the master for updates.
polling : boolean
True will enable polling. False will disable it.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.set_is_polling False | Below is the the instruction that describes the task:
### Input:
SLAVE CALL
Prevent the slaves from polling the master for updates.
polling : boolean
True will enable polling. False will disable it.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.set_is_polling False
### Response:
def set_is_polling(polling, host=None, core_name=None):
'''
SLAVE CALL
Prevent the slaves from polling the master for updates.
polling : boolean
True will enable polling. False will disable it.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.set_is_polling False
'''
ret = _get_return_dict()
# since only slaves can call this let's check the config:
if _is_master() and _get_none_or_value(host) is None:
err = ['solr.set_is_polling can only be called by "slave" minions']
return ret.update({'success': False, 'errors': err})
cmd = "enablepoll" if polling else "disapblepoll"
if _get_none_or_value(core_name) is None and _check_for_cores():
success = True
for name in __opts__['solr.cores']:
resp = set_is_polling(cmd, host=host, core_name=name)
if not resp['success']:
success = False
data = {name: {'data': resp['data']}}
ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
return ret
else:
resp = _replication_request(cmd, host=host, core_name=core_name)
return resp |
def getparent(self, profile):
"""Resolve a parent ID"""
assert self.parent
for inputtemplate in profile.input:
if inputtemplate == self.parent:
return inputtemplate
raise Exception("Parent InputTemplate '"+self.parent+"' not found!") | Resolve a parent ID | Below is the the instruction that describes the task:
### Input:
Resolve a parent ID
### Response:
def getparent(self, profile):
"""Resolve a parent ID"""
assert self.parent
for inputtemplate in profile.input:
if inputtemplate == self.parent:
return inputtemplate
raise Exception("Parent InputTemplate '"+self.parent+"' not found!") |
def to_call_agraph(self):
""" Build a PyGraphviz AGraph object corresponding to a call graph of
functions. """
A = nx.nx_agraph.to_agraph(self.call_graph)
A.graph_attr.update({"dpi": 227, "fontsize": 20, "fontname": "Menlo"})
A.node_attr.update(
{"shape": "rectangle", "color": "#650021", "style": "rounded"}
)
A.edge_attr.update({"color": "#650021", "arrowsize": 0.5})
return A | Build a PyGraphviz AGraph object corresponding to a call graph of
functions. | Below is the the instruction that describes the task:
### Input:
Build a PyGraphviz AGraph object corresponding to a call graph of
functions.
### Response:
def to_call_agraph(self):
""" Build a PyGraphviz AGraph object corresponding to a call graph of
functions. """
A = nx.nx_agraph.to_agraph(self.call_graph)
A.graph_attr.update({"dpi": 227, "fontsize": 20, "fontname": "Menlo"})
A.node_attr.update(
{"shape": "rectangle", "color": "#650021", "style": "rounded"}
)
A.edge_attr.update({"color": "#650021", "arrowsize": 0.5})
return A |
def aesCCM(key, key_handle, nonce, data, decrypt=False):
"""
Function implementing YubiHSM AEAD encrypt/decrypt in software.
"""
if decrypt:
(data, saved_mac) = _split_data(data, len(data) - pyhsm.defines.YSM_AEAD_MAC_SIZE)
nonce = pyhsm.util.input_validate_nonce(nonce, pad = True)
mac = _cbc_mac(key, key_handle, nonce, len(data))
counter = _ctr_counter(key_handle, nonce, value = 0)
ctr_aes = AES.new(key, AES.MODE_CTR, counter = counter.next)
out = []
while data:
(thisblock, data) = _split_data(data, pyhsm.defines.YSM_BLOCK_SIZE)
# encrypt/decrypt and CBC MAC
if decrypt:
aes_out = ctr_aes.decrypt(thisblock)
mac.update(aes_out)
else:
mac.update(thisblock)
aes_out = ctr_aes.encrypt(thisblock)
out.append(aes_out)
# Finalize MAC
counter.value = 0
mac.finalize(counter.pack())
if decrypt:
if mac.get() != saved_mac:
raise pyhsm.exception.YHSM_Error('AEAD integrity check failed')
else:
out.append(mac.get())
return ''.join(out) | Function implementing YubiHSM AEAD encrypt/decrypt in software. | Below is the the instruction that describes the task:
### Input:
Function implementing YubiHSM AEAD encrypt/decrypt in software.
### Response:
def aesCCM(key, key_handle, nonce, data, decrypt=False):
"""
Function implementing YubiHSM AEAD encrypt/decrypt in software.
"""
if decrypt:
(data, saved_mac) = _split_data(data, len(data) - pyhsm.defines.YSM_AEAD_MAC_SIZE)
nonce = pyhsm.util.input_validate_nonce(nonce, pad = True)
mac = _cbc_mac(key, key_handle, nonce, len(data))
counter = _ctr_counter(key_handle, nonce, value = 0)
ctr_aes = AES.new(key, AES.MODE_CTR, counter = counter.next)
out = []
while data:
(thisblock, data) = _split_data(data, pyhsm.defines.YSM_BLOCK_SIZE)
# encrypt/decrypt and CBC MAC
if decrypt:
aes_out = ctr_aes.decrypt(thisblock)
mac.update(aes_out)
else:
mac.update(thisblock)
aes_out = ctr_aes.encrypt(thisblock)
out.append(aes_out)
# Finalize MAC
counter.value = 0
mac.finalize(counter.pack())
if decrypt:
if mac.get() != saved_mac:
raise pyhsm.exception.YHSM_Error('AEAD integrity check failed')
else:
out.append(mac.get())
return ''.join(out) |
def restore_course(self, courseid, backup):
""" Restores a course of given courseid to a date specified in backup (format : YYYYMMDD.HHMMSS) """
self.wipe_course(courseid)
filepath = os.path.join(self.backup_dir, courseid, backup + ".zip")
with zipfile.ZipFile(filepath, "r") as zipf:
aggregations = bson.json_util.loads(zipf.read("aggregations.json").decode("utf-8"))
if len(aggregations) > 0:
self.database.aggregations.insert(aggregations)
user_tasks = bson.json_util.loads(zipf.read("user_tasks.json").decode("utf-8"))
if len(user_tasks) > 0:
self.database.user_tasks.insert(user_tasks)
submissions = bson.json_util.loads(zipf.read("submissions.json").decode("utf-8"))
for submission in submissions:
for key in ["input", "archive"]:
if key in submission and type(submission[key]) == bson.objectid.ObjectId:
submission[key] = self.submission_manager.get_gridfs().put(zipf.read(key + "/" + str(submission[key]) + ".data"))
if len(submissions) > 0:
self.database.submissions.insert(submissions)
self._logger.info("Course %s restored from backup directory.", courseid) | Restores a course of given courseid to a date specified in backup (format : YYYYMMDD.HHMMSS) | Below is the the instruction that describes the task:
### Input:
Restores a course of given courseid to a date specified in backup (format : YYYYMMDD.HHMMSS)
### Response:
def restore_course(self, courseid, backup):
""" Restores a course of given courseid to a date specified in backup (format : YYYYMMDD.HHMMSS) """
self.wipe_course(courseid)
filepath = os.path.join(self.backup_dir, courseid, backup + ".zip")
with zipfile.ZipFile(filepath, "r") as zipf:
aggregations = bson.json_util.loads(zipf.read("aggregations.json").decode("utf-8"))
if len(aggregations) > 0:
self.database.aggregations.insert(aggregations)
user_tasks = bson.json_util.loads(zipf.read("user_tasks.json").decode("utf-8"))
if len(user_tasks) > 0:
self.database.user_tasks.insert(user_tasks)
submissions = bson.json_util.loads(zipf.read("submissions.json").decode("utf-8"))
for submission in submissions:
for key in ["input", "archive"]:
if key in submission and type(submission[key]) == bson.objectid.ObjectId:
submission[key] = self.submission_manager.get_gridfs().put(zipf.read(key + "/" + str(submission[key]) + ".data"))
if len(submissions) > 0:
self.database.submissions.insert(submissions)
self._logger.info("Course %s restored from backup directory.", courseid) |
def _new_percolator(spec, search_pattern):
"""Create new percolator associated with the new set."""
if spec and search_pattern:
query = query_string_parser(search_pattern=search_pattern).to_dict()
for index in current_search.mappings.keys():
# Create the percolator doc_type in the existing index for >= ES5
# TODO: Consider doing this only once in app initialization
percolator_doc_type = _get_percolator_doc_type(index)
_create_percolator_mapping(index, percolator_doc_type)
current_search_client.index(
index=index, doc_type=percolator_doc_type,
id='oaiset-{}'.format(spec),
body={'query': query}
) | Create new percolator associated with the new set. | Below is the the instruction that describes the task:
### Input:
Create new percolator associated with the new set.
### Response:
def _new_percolator(spec, search_pattern):
"""Create new percolator associated with the new set."""
if spec and search_pattern:
query = query_string_parser(search_pattern=search_pattern).to_dict()
for index in current_search.mappings.keys():
# Create the percolator doc_type in the existing index for >= ES5
# TODO: Consider doing this only once in app initialization
percolator_doc_type = _get_percolator_doc_type(index)
_create_percolator_mapping(index, percolator_doc_type)
current_search_client.index(
index=index, doc_type=percolator_doc_type,
id='oaiset-{}'.format(spec),
body={'query': query}
) |
def ListHunts(context=None):
"""List all GRR hunts."""
items = context.SendIteratorRequest("ListHunts", hunt_pb2.ApiListHuntsArgs())
return utils.MapItemsIterator(lambda data: Hunt(data=data, context=context),
items) | List all GRR hunts. | Below is the the instruction that describes the task:
### Input:
List all GRR hunts.
### Response:
def ListHunts(context=None):
"""List all GRR hunts."""
items = context.SendIteratorRequest("ListHunts", hunt_pb2.ApiListHuntsArgs())
return utils.MapItemsIterator(lambda data: Hunt(data=data, context=context),
items) |
def depth_renderbuffer(self, size, *, samples=0) -> 'Renderbuffer':
'''
:py:class:`Renderbuffer` objects are OpenGL objects that contain images.
They are created and used specifically with :py:class:`Framebuffer` objects.
Args:
size (tuple): The width and height of the renderbuffer.
Keyword Args:
samples (int): The number of samples. Value 0 means no multisample format.
Returns:
:py:class:`Renderbuffer` object
'''
res = Renderbuffer.__new__(Renderbuffer)
res.mglo, res._glo = self.mglo.depth_renderbuffer(size, samples)
res._size = size
res._components = 1
res._samples = samples
res._dtype = 'f4'
res._depth = True
res.ctx = self
res.extra = None
return res | :py:class:`Renderbuffer` objects are OpenGL objects that contain images.
They are created and used specifically with :py:class:`Framebuffer` objects.
Args:
size (tuple): The width and height of the renderbuffer.
Keyword Args:
samples (int): The number of samples. Value 0 means no multisample format.
Returns:
:py:class:`Renderbuffer` object | Below is the the instruction that describes the task:
### Input:
:py:class:`Renderbuffer` objects are OpenGL objects that contain images.
They are created and used specifically with :py:class:`Framebuffer` objects.
Args:
size (tuple): The width and height of the renderbuffer.
Keyword Args:
samples (int): The number of samples. Value 0 means no multisample format.
Returns:
:py:class:`Renderbuffer` object
### Response:
def depth_renderbuffer(self, size, *, samples=0) -> 'Renderbuffer':
'''
:py:class:`Renderbuffer` objects are OpenGL objects that contain images.
They are created and used specifically with :py:class:`Framebuffer` objects.
Args:
size (tuple): The width and height of the renderbuffer.
Keyword Args:
samples (int): The number of samples. Value 0 means no multisample format.
Returns:
:py:class:`Renderbuffer` object
'''
res = Renderbuffer.__new__(Renderbuffer)
res.mglo, res._glo = self.mglo.depth_renderbuffer(size, samples)
res._size = size
res._components = 1
res._samples = samples
res._dtype = 'f4'
res._depth = True
res.ctx = self
res.extra = None
return res |
def download(self, itemID, savePath):
"""
downloads an item to local disk
Inputs:
itemID - unique id of item to download
savePath - folder to save the file in
"""
if os.path.isdir(savePath) == False:
os.makedirs(savePath)
url = self._url + "/%s/download" % itemID
params = {
}
if len(params.keys()):
url = url + "?%s" % urlencode(params)
return self._get(url=url,
param_dict=params,
out_folder=savePath,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | downloads an item to local disk
Inputs:
itemID - unique id of item to download
savePath - folder to save the file in | Below is the the instruction that describes the task:
### Input:
downloads an item to local disk
Inputs:
itemID - unique id of item to download
savePath - folder to save the file in
### Response:
def download(self, itemID, savePath):
"""
downloads an item to local disk
Inputs:
itemID - unique id of item to download
savePath - folder to save the file in
"""
if os.path.isdir(savePath) == False:
os.makedirs(savePath)
url = self._url + "/%s/download" % itemID
params = {
}
if len(params.keys()):
url = url + "?%s" % urlencode(params)
return self._get(url=url,
param_dict=params,
out_folder=savePath,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) |
def _get_vnet(self, adapter_number):
"""
Return the vnet will use in ubridge
"""
vnet = "ethernet{}.vnet".format(adapter_number)
if vnet not in self._vmx_pairs:
raise VMwareError("vnet {} not in VMX file".format(vnet))
return vnet | Return the vnet will use in ubridge | Below is the the instruction that describes the task:
### Input:
Return the vnet will use in ubridge
### Response:
def _get_vnet(self, adapter_number):
"""
Return the vnet will use in ubridge
"""
vnet = "ethernet{}.vnet".format(adapter_number)
if vnet not in self._vmx_pairs:
raise VMwareError("vnet {} not in VMX file".format(vnet))
return vnet |
def assert_boolean_false(expr, msg_fmt="{msg}"):
"""Fail the test unless the expression is the constant False.
>>> assert_boolean_false(False)
>>> assert_boolean_false(0)
Traceback (most recent call last):
...
AssertionError: 0 is not False
The following msg_fmt arguments are supported:
* msg - the default error message
* expr - tested expression
"""
if expr is not False:
msg = "{!r} is not False".format(expr)
fail(msg_fmt.format(msg=msg, expr=expr)) | Fail the test unless the expression is the constant False.
>>> assert_boolean_false(False)
>>> assert_boolean_false(0)
Traceback (most recent call last):
...
AssertionError: 0 is not False
The following msg_fmt arguments are supported:
* msg - the default error message
* expr - tested expression | Below is the the instruction that describes the task:
### Input:
Fail the test unless the expression is the constant False.
>>> assert_boolean_false(False)
>>> assert_boolean_false(0)
Traceback (most recent call last):
...
AssertionError: 0 is not False
The following msg_fmt arguments are supported:
* msg - the default error message
* expr - tested expression
### Response:
def assert_boolean_false(expr, msg_fmt="{msg}"):
"""Fail the test unless the expression is the constant False.
>>> assert_boolean_false(False)
>>> assert_boolean_false(0)
Traceback (most recent call last):
...
AssertionError: 0 is not False
The following msg_fmt arguments are supported:
* msg - the default error message
* expr - tested expression
"""
if expr is not False:
msg = "{!r} is not False".format(expr)
fail(msg_fmt.format(msg=msg, expr=expr)) |
def template(page=None, layout=None, **kwargs):
"""
Decorator to change the view template and layout.
It works on both View class and view methods
on class
only $layout is applied, everything else will be passed to the kwargs
Using as first argument, it will be the layout.
:first arg or $layout: The layout to use for that view
:param layout: The layout to use for that view
:param kwargs:
get pass to the TEMPLATE_CONTEXT
** on method that return a dict
page or layout are optional
:param page: The html page
:param layout: The layout to use for that view
:param kwargs:
get pass to the view as k/V
** on other methods that return other type, it doesn't apply
:return:
"""
pkey = "_template_extends__"
def decorator(f):
if inspect.isclass(f):
layout_ = layout or page
extends = kwargs.pop("extends", None)
if extends and hasattr(extends, pkey):
items = getattr(extends, pkey).items()
if "layout" in items:
layout_ = items.pop("layout")
for k, v in items:
kwargs.setdefault(k, v)
if not layout_:
layout_ = "layout.html"
kwargs.setdefault("brand_name", "")
kwargs["layout"] = layout_
setattr(f, pkey, kwargs)
setattr(f, "base_layout", kwargs.get("layout"))
f.g(TEMPLATE_CONTEXT=kwargs)
return f
else:
@functools.wraps(f)
def wrap(*args2, **kwargs2):
response = f(*args2, **kwargs2)
if isinstance(response, dict) or response is None:
response = response or {}
if page:
response.setdefault("template_", page)
if layout:
response.setdefault("layout_", layout)
for k, v in kwargs.items():
response.setdefault(k, v)
return response
return wrap
return decorator | Decorator to change the view template and layout.
It works on both View class and view methods
on class
only $layout is applied, everything else will be passed to the kwargs
Using as first argument, it will be the layout.
:first arg or $layout: The layout to use for that view
:param layout: The layout to use for that view
:param kwargs:
get pass to the TEMPLATE_CONTEXT
** on method that return a dict
page or layout are optional
:param page: The html page
:param layout: The layout to use for that view
:param kwargs:
get pass to the view as k/V
** on other methods that return other type, it doesn't apply
:return: | Below is the the instruction that describes the task:
### Input:
Decorator to change the view template and layout.
It works on both View class and view methods
on class
only $layout is applied, everything else will be passed to the kwargs
Using as first argument, it will be the layout.
:first arg or $layout: The layout to use for that view
:param layout: The layout to use for that view
:param kwargs:
get pass to the TEMPLATE_CONTEXT
** on method that return a dict
page or layout are optional
:param page: The html page
:param layout: The layout to use for that view
:param kwargs:
get pass to the view as k/V
** on other methods that return other type, it doesn't apply
:return:
### Response:
def template(page=None, layout=None, **kwargs):
"""
Decorator to change the view template and layout.
It works on both View class and view methods
on class
only $layout is applied, everything else will be passed to the kwargs
Using as first argument, it will be the layout.
:first arg or $layout: The layout to use for that view
:param layout: The layout to use for that view
:param kwargs:
get pass to the TEMPLATE_CONTEXT
** on method that return a dict
page or layout are optional
:param page: The html page
:param layout: The layout to use for that view
:param kwargs:
get pass to the view as k/V
** on other methods that return other type, it doesn't apply
:return:
"""
pkey = "_template_extends__"
def decorator(f):
if inspect.isclass(f):
layout_ = layout or page
extends = kwargs.pop("extends", None)
if extends and hasattr(extends, pkey):
items = getattr(extends, pkey).items()
if "layout" in items:
layout_ = items.pop("layout")
for k, v in items:
kwargs.setdefault(k, v)
if not layout_:
layout_ = "layout.html"
kwargs.setdefault("brand_name", "")
kwargs["layout"] = layout_
setattr(f, pkey, kwargs)
setattr(f, "base_layout", kwargs.get("layout"))
f.g(TEMPLATE_CONTEXT=kwargs)
return f
else:
@functools.wraps(f)
def wrap(*args2, **kwargs2):
response = f(*args2, **kwargs2)
if isinstance(response, dict) or response is None:
response = response or {}
if page:
response.setdefault("template_", page)
if layout:
response.setdefault("layout_", layout)
for k, v in kwargs.items():
response.setdefault(k, v)
return response
return wrap
return decorator |
def make_urls_hyperlinks(text: str) -> str:
"""
Adds hyperlinks to text that appears to contain URLs.
See
- http://stackoverflow.com/questions/1071191
- ... except that double-replaces everything; e.g. try with
``text = "me@somewhere.com me@somewhere.com"``
- http://stackp.online.fr/?p=19
"""
find_url = r'''
(?x)( # verbose identify URLs within text
(http|ftp|gopher) # make sure we find a resource type
:// # ...needs to be followed by colon-slash-slash
(\w+[:.]?){2,} # at least two domain groups, e.g. (gnosis.)(cx)
(/?| # could be just the domain name (maybe w/ slash)
[^ \n\r"]+ # or stuff then space, newline, tab, quote
[\w/]) # resource name ends in alphanumeric or slash
(?=[\s\.,>)'"\]]) # assert: followed by white or clause ending
) # end of match group
'''
replace_url = r'<a href="\1">\1</a>'
find_email = re.compile(r'([.\w\-]+@(\w[\w\-]+\.)+[\w\-]+)')
# '.' doesn't need escaping inside square brackets
# https://stackoverflow.com/questions/10397968/escape-dot-in-a-regex-range
replace_email = r'<a href="mailto:\1">\1</a>'
text = re.sub(find_url, replace_url, text)
text = re.sub(find_email, replace_email, text)
return text | Adds hyperlinks to text that appears to contain URLs.
See
- http://stackoverflow.com/questions/1071191
- ... except that double-replaces everything; e.g. try with
``text = "me@somewhere.com me@somewhere.com"``
- http://stackp.online.fr/?p=19 | Below is the the instruction that describes the task:
### Input:
Adds hyperlinks to text that appears to contain URLs.
See
- http://stackoverflow.com/questions/1071191
- ... except that double-replaces everything; e.g. try with
``text = "me@somewhere.com me@somewhere.com"``
- http://stackp.online.fr/?p=19
### Response:
def make_urls_hyperlinks(text: str) -> str:
"""
Adds hyperlinks to text that appears to contain URLs.
See
- http://stackoverflow.com/questions/1071191
- ... except that double-replaces everything; e.g. try with
``text = "me@somewhere.com me@somewhere.com"``
- http://stackp.online.fr/?p=19
"""
find_url = r'''
(?x)( # verbose identify URLs within text
(http|ftp|gopher) # make sure we find a resource type
:// # ...needs to be followed by colon-slash-slash
(\w+[:.]?){2,} # at least two domain groups, e.g. (gnosis.)(cx)
(/?| # could be just the domain name (maybe w/ slash)
[^ \n\r"]+ # or stuff then space, newline, tab, quote
[\w/]) # resource name ends in alphanumeric or slash
(?=[\s\.,>)'"\]]) # assert: followed by white or clause ending
) # end of match group
'''
replace_url = r'<a href="\1">\1</a>'
find_email = re.compile(r'([.\w\-]+@(\w[\w\-]+\.)+[\w\-]+)')
# '.' doesn't need escaping inside square brackets
# https://stackoverflow.com/questions/10397968/escape-dot-in-a-regex-range
replace_email = r'<a href="mailto:\1">\1</a>'
text = re.sub(find_url, replace_url, text)
text = re.sub(find_email, replace_email, text)
return text |
def update_edge_todo(self, elev_fn, dem_proc):
"""
Can figure out how to update the todo based on the elev filename
"""
for key in self.edges[elev_fn].keys():
self.edges[elev_fn][key].set_data('todo', data=dem_proc.edge_todo) | Can figure out how to update the todo based on the elev filename | Below is the the instruction that describes the task:
### Input:
Can figure out how to update the todo based on the elev filename
### Response:
def update_edge_todo(self, elev_fn, dem_proc):
"""
Can figure out how to update the todo based on the elev filename
"""
for key in self.edges[elev_fn].keys():
self.edges[elev_fn][key].set_data('todo', data=dem_proc.edge_todo) |
def _get_sector(self, channel, nlines, ncols):
"""Determine which sector was scanned"""
if self._is_vis(channel):
margin = 100
sectors_ref = self.vis_sectors
else:
margin = 50
sectors_ref = self.ir_sectors
for (nlines_ref, ncols_ref), sector in sectors_ref.items():
if np.fabs(ncols - ncols_ref) < margin and \
np.fabs(nlines - nlines_ref) < margin:
return sector
return UNKNOWN_SECTOR | Determine which sector was scanned | Below is the the instruction that describes the task:
### Input:
Determine which sector was scanned
### Response:
def _get_sector(self, channel, nlines, ncols):
"""Determine which sector was scanned"""
if self._is_vis(channel):
margin = 100
sectors_ref = self.vis_sectors
else:
margin = 50
sectors_ref = self.ir_sectors
for (nlines_ref, ncols_ref), sector in sectors_ref.items():
if np.fabs(ncols - ncols_ref) < margin and \
np.fabs(nlines - nlines_ref) < margin:
return sector
return UNKNOWN_SECTOR |
def bootstrap_files(self):
""" we need some files to bootstrap the FreeBSD installation.
Some...
- need to be provided by the user (i.e. authorized_keys)
- others have some (sensible) defaults (i.e. rc.conf)
- some can be downloaded via URL (i.e.) http://pkg.freebsd.org/freebsd:10:x86:64/latest/Latest/pkg.txz
For those which can be downloaded we check the downloads directory. if the file exists there
(and if the checksum matches TODO!) we will upload it to the host. If not, we will fetch the file
from the given URL from the host.
For files that cannot be downloaded (authorized_keys, rc.conf etc.) we allow the user to provide their
own version in a ``bootstrap-files`` folder. The location of this folder can either be explicitly provided
via the ``bootstrap-files`` key in the host definition of the config file or it defaults to ``deployment/bootstrap-files``.
User provided files can be rendered as Jinja2 templates, by providing ``use_jinja: True`` in the YAML file.
They will be rendered with the instance configuration dictionary as context.
If the file is not found there, we revert to the default
files that are part of bsdploy. If the file cannot be found there either we either error out or for authorized_keys
we look in ``~/.ssh/identity.pub``.
"""
bootstrap_file_yamls = [
abspath(join(self.default_template_path, self.bootstrap_files_yaml)),
abspath(join(self.custom_template_path, self.bootstrap_files_yaml))]
bootstrap_files = dict()
if self.upload_authorized_keys:
bootstrap_files['authorized_keys'] = BootstrapFile(self, 'authorized_keys', **{
'directory': '/mnt/root/.ssh',
'directory_mode': '0600',
'remote': '/mnt/root/.ssh/authorized_keys',
'fallback': [
'~/.ssh/identity.pub',
'~/.ssh/id_rsa.pub',
'~/.ssh/id_dsa.pub',
'~/.ssh/id_ecdsa.pub']})
for bootstrap_file_yaml in bootstrap_file_yamls:
if not exists(bootstrap_file_yaml):
continue
with open(bootstrap_file_yaml) as f:
info = yaml.load(f, Loader=SafeLoader)
if info is None:
continue
for k, v in info.items():
bootstrap_files[k] = BootstrapFile(self, k, **v)
for bf in bootstrap_files.values():
if not exists(bf.local) and bf.raw_fallback:
if not bf.existing_fallback:
print("Found no public key in %s, you have to create '%s' manually" % (expanduser('~/.ssh'), bf.local))
sys.exit(1)
print("The '%s' file is missing." % bf.local)
for path in bf.existing_fallback:
yes = env.instance.config.get('bootstrap-yes', False)
if yes or yesno("Should we generate it using the key in '%s'?" % path):
if not exists(bf.expected_path):
os.mkdir(bf.expected_path)
with open(bf.local, 'wb') as out:
with open(path, 'rb') as f:
out.write(f.read())
break
else:
# answered no to all options
sys.exit(1)
if not bf.check():
print('Cannot find %s' % bf.local)
sys.exit(1)
packages_path = join(self.download_path, 'packages')
if exists(packages_path):
for dirpath, dirnames, filenames in os.walk(packages_path):
path = dirpath.split(packages_path)[1][1:]
for filename in filenames:
if not filename.endswith('.txz'):
continue
bootstrap_files[join(path, filename)] = BootstrapFile(
self, join(path, filename), **dict(
local=join(packages_path, join(path, filename)),
remote=join('/mnt/var/cache/pkg/All', filename),
encrypted=False))
if self.ssh_keys is not None:
for ssh_key_name, ssh_key_options in list(self.ssh_keys):
ssh_key = join(self.custom_template_path, ssh_key_name)
if exists(ssh_key):
pub_key_name = '%s.pub' % ssh_key_name
pub_key = '%s.pub' % ssh_key
if not exists(pub_key):
print("Public key '%s' for '%s' missing." % (pub_key, ssh_key))
sys.exit(1)
bootstrap_files[ssh_key_name] = BootstrapFile(
self, ssh_key_name, **dict(
local=ssh_key,
remote='/mnt/etc/ssh/%s' % ssh_key_name,
mode=0600))
bootstrap_files[pub_key_name] = BootstrapFile(
self, pub_key_name, **dict(
local=pub_key,
remote='/mnt/etc/ssh/%s' % pub_key_name,
mode=0644))
if hasattr(env.instance, 'get_vault_lib'):
vaultlib = env.instance.get_vault_lib()
for bf in bootstrap_files.values():
if bf.encrypted is None and exists(bf.local):
with open(bf.local) as f:
data = f.read()
bf.info['encrypted'] = vaultlib.is_encrypted(data)
return bootstrap_files | we need some files to bootstrap the FreeBSD installation.
Some...
- need to be provided by the user (i.e. authorized_keys)
- others have some (sensible) defaults (i.e. rc.conf)
- some can be downloaded via URL (i.e.) http://pkg.freebsd.org/freebsd:10:x86:64/latest/Latest/pkg.txz
For those which can be downloaded we check the downloads directory. if the file exists there
(and if the checksum matches TODO!) we will upload it to the host. If not, we will fetch the file
from the given URL from the host.
For files that cannot be downloaded (authorized_keys, rc.conf etc.) we allow the user to provide their
own version in a ``bootstrap-files`` folder. The location of this folder can either be explicitly provided
via the ``bootstrap-files`` key in the host definition of the config file or it defaults to ``deployment/bootstrap-files``.
User provided files can be rendered as Jinja2 templates, by providing ``use_jinja: True`` in the YAML file.
They will be rendered with the instance configuration dictionary as context.
If the file is not found there, we revert to the default
files that are part of bsdploy. If the file cannot be found there either we either error out or for authorized_keys
we look in ``~/.ssh/identity.pub``. | Below is the the instruction that describes the task:
### Input:
we need some files to bootstrap the FreeBSD installation.
Some...
- need to be provided by the user (i.e. authorized_keys)
- others have some (sensible) defaults (i.e. rc.conf)
- some can be downloaded via URL (i.e.) http://pkg.freebsd.org/freebsd:10:x86:64/latest/Latest/pkg.txz
For those which can be downloaded we check the downloads directory. if the file exists there
(and if the checksum matches TODO!) we will upload it to the host. If not, we will fetch the file
from the given URL from the host.
For files that cannot be downloaded (authorized_keys, rc.conf etc.) we allow the user to provide their
own version in a ``bootstrap-files`` folder. The location of this folder can either be explicitly provided
via the ``bootstrap-files`` key in the host definition of the config file or it defaults to ``deployment/bootstrap-files``.
User provided files can be rendered as Jinja2 templates, by providing ``use_jinja: True`` in the YAML file.
They will be rendered with the instance configuration dictionary as context.
If the file is not found there, we revert to the default
files that are part of bsdploy. If the file cannot be found there either we either error out or for authorized_keys
we look in ``~/.ssh/identity.pub``.
### Response:
def bootstrap_files(self):
""" we need some files to bootstrap the FreeBSD installation.
Some...
- need to be provided by the user (i.e. authorized_keys)
- others have some (sensible) defaults (i.e. rc.conf)
- some can be downloaded via URL (i.e.) http://pkg.freebsd.org/freebsd:10:x86:64/latest/Latest/pkg.txz
For those which can be downloaded we check the downloads directory. if the file exists there
(and if the checksum matches TODO!) we will upload it to the host. If not, we will fetch the file
from the given URL from the host.
For files that cannot be downloaded (authorized_keys, rc.conf etc.) we allow the user to provide their
own version in a ``bootstrap-files`` folder. The location of this folder can either be explicitly provided
via the ``bootstrap-files`` key in the host definition of the config file or it defaults to ``deployment/bootstrap-files``.
User provided files can be rendered as Jinja2 templates, by providing ``use_jinja: True`` in the YAML file.
They will be rendered with the instance configuration dictionary as context.
If the file is not found there, we revert to the default
files that are part of bsdploy. If the file cannot be found there either we either error out or for authorized_keys
we look in ``~/.ssh/identity.pub``.
"""
bootstrap_file_yamls = [
abspath(join(self.default_template_path, self.bootstrap_files_yaml)),
abspath(join(self.custom_template_path, self.bootstrap_files_yaml))]
bootstrap_files = dict()
if self.upload_authorized_keys:
bootstrap_files['authorized_keys'] = BootstrapFile(self, 'authorized_keys', **{
'directory': '/mnt/root/.ssh',
'directory_mode': '0600',
'remote': '/mnt/root/.ssh/authorized_keys',
'fallback': [
'~/.ssh/identity.pub',
'~/.ssh/id_rsa.pub',
'~/.ssh/id_dsa.pub',
'~/.ssh/id_ecdsa.pub']})
for bootstrap_file_yaml in bootstrap_file_yamls:
if not exists(bootstrap_file_yaml):
continue
with open(bootstrap_file_yaml) as f:
info = yaml.load(f, Loader=SafeLoader)
if info is None:
continue
for k, v in info.items():
bootstrap_files[k] = BootstrapFile(self, k, **v)
for bf in bootstrap_files.values():
if not exists(bf.local) and bf.raw_fallback:
if not bf.existing_fallback:
print("Found no public key in %s, you have to create '%s' manually" % (expanduser('~/.ssh'), bf.local))
sys.exit(1)
print("The '%s' file is missing." % bf.local)
for path in bf.existing_fallback:
yes = env.instance.config.get('bootstrap-yes', False)
if yes or yesno("Should we generate it using the key in '%s'?" % path):
if not exists(bf.expected_path):
os.mkdir(bf.expected_path)
with open(bf.local, 'wb') as out:
with open(path, 'rb') as f:
out.write(f.read())
break
else:
# answered no to all options
sys.exit(1)
if not bf.check():
print('Cannot find %s' % bf.local)
sys.exit(1)
packages_path = join(self.download_path, 'packages')
if exists(packages_path):
for dirpath, dirnames, filenames in os.walk(packages_path):
path = dirpath.split(packages_path)[1][1:]
for filename in filenames:
if not filename.endswith('.txz'):
continue
bootstrap_files[join(path, filename)] = BootstrapFile(
self, join(path, filename), **dict(
local=join(packages_path, join(path, filename)),
remote=join('/mnt/var/cache/pkg/All', filename),
encrypted=False))
if self.ssh_keys is not None:
for ssh_key_name, ssh_key_options in list(self.ssh_keys):
ssh_key = join(self.custom_template_path, ssh_key_name)
if exists(ssh_key):
pub_key_name = '%s.pub' % ssh_key_name
pub_key = '%s.pub' % ssh_key
if not exists(pub_key):
print("Public key '%s' for '%s' missing." % (pub_key, ssh_key))
sys.exit(1)
bootstrap_files[ssh_key_name] = BootstrapFile(
self, ssh_key_name, **dict(
local=ssh_key,
remote='/mnt/etc/ssh/%s' % ssh_key_name,
mode=0600))
bootstrap_files[pub_key_name] = BootstrapFile(
self, pub_key_name, **dict(
local=pub_key,
remote='/mnt/etc/ssh/%s' % pub_key_name,
mode=0644))
if hasattr(env.instance, 'get_vault_lib'):
vaultlib = env.instance.get_vault_lib()
for bf in bootstrap_files.values():
if bf.encrypted is None and exists(bf.local):
with open(bf.local) as f:
data = f.read()
bf.info['encrypted'] = vaultlib.is_encrypted(data)
return bootstrap_files |
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]:
"""
Returns a list of RNNCells used by this encoder.
"""
return self.forward_rnn.get_rnn_cells() + self.reverse_rnn.get_rnn_cells() | Returns a list of RNNCells used by this encoder. | Below is the the instruction that describes the task:
### Input:
Returns a list of RNNCells used by this encoder.
### Response:
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]:
"""
Returns a list of RNNCells used by this encoder.
"""
return self.forward_rnn.get_rnn_cells() + self.reverse_rnn.get_rnn_cells() |
def _zero_based_index(self, onebased: Union[int, str]) -> int:
"""Convert a one-based index to a zero-based index."""
result = int(onebased)
if result > 0:
result -= 1
return result | Convert a one-based index to a zero-based index. | Below is the the instruction that describes the task:
### Input:
Convert a one-based index to a zero-based index.
### Response:
def _zero_based_index(self, onebased: Union[int, str]) -> int:
"""Convert a one-based index to a zero-based index."""
result = int(onebased)
if result > 0:
result -= 1
return result |
def build_result(data):
"""Create a dictionary with the contents of result.json"""
more = {}
for key, value in data.items():
if key != 'elements':
newnode = value
else:
newnode = {}
for el in value:
nkey, nvalue = process_node(el)
newnode[nkey] = nvalue
more[key] = newnode
return more | Create a dictionary with the contents of result.json | Below is the the instruction that describes the task:
### Input:
Create a dictionary with the contents of result.json
### Response:
def build_result(data):
"""Create a dictionary with the contents of result.json"""
more = {}
for key, value in data.items():
if key != 'elements':
newnode = value
else:
newnode = {}
for el in value:
nkey, nvalue = process_node(el)
newnode[nkey] = nvalue
more[key] = newnode
return more |
def _findSwipl():
"""
This function makes a big effort to find the path to the SWI-Prolog shared
library. Since this is both OS dependent and installation dependent, we may
not aways succeed. If we do, we return a name/path that can be used by
CDLL(). Otherwise we raise an exception.
:return: Tuple. Fist element is the name or path to the library that can be
used by CDLL. Second element is the path were SWI-Prolog resource
file may be found (this is needed in some Linuxes)
:rtype: Tuple of strings
:raises ImportError: If we cannot guess the name of the library
"""
# Now begins the guesswork
platform = sys.platform[:3]
if platform == "win": # In Windows, we have the default installer
# path and the registry to look
(path, swiHome) = _findSwiplWin()
elif platform in ("lin", "cyg"):
(path, swiHome) = _findSwiplLin()
elif platform == "dar": # Help with MacOS is welcome!!
(path, swiHome) = _findSwiplDar()
if path is None:
(path, swiHome) = _findSwiplMacOSHome()
else:
# This should work for other UNIX
(path, swiHome) = _findSwiplLin()
# This is a catch all raise
if path is None:
raise ImportError('Could not find the SWI-Prolog library in this '
'platform. If you are sure it is installed, please '
'open an issue.')
else:
return (path, swiHome) | This function makes a big effort to find the path to the SWI-Prolog shared
library. Since this is both OS dependent and installation dependent, we may
not aways succeed. If we do, we return a name/path that can be used by
CDLL(). Otherwise we raise an exception.
:return: Tuple. Fist element is the name or path to the library that can be
used by CDLL. Second element is the path were SWI-Prolog resource
file may be found (this is needed in some Linuxes)
:rtype: Tuple of strings
:raises ImportError: If we cannot guess the name of the library | Below is the the instruction that describes the task:
### Input:
This function makes a big effort to find the path to the SWI-Prolog shared
library. Since this is both OS dependent and installation dependent, we may
not aways succeed. If we do, we return a name/path that can be used by
CDLL(). Otherwise we raise an exception.
:return: Tuple. Fist element is the name or path to the library that can be
used by CDLL. Second element is the path were SWI-Prolog resource
file may be found (this is needed in some Linuxes)
:rtype: Tuple of strings
:raises ImportError: If we cannot guess the name of the library
### Response:
def _findSwipl():
"""
This function makes a big effort to find the path to the SWI-Prolog shared
library. Since this is both OS dependent and installation dependent, we may
not aways succeed. If we do, we return a name/path that can be used by
CDLL(). Otherwise we raise an exception.
:return: Tuple. Fist element is the name or path to the library that can be
used by CDLL. Second element is the path were SWI-Prolog resource
file may be found (this is needed in some Linuxes)
:rtype: Tuple of strings
:raises ImportError: If we cannot guess the name of the library
"""
# Now begins the guesswork
platform = sys.platform[:3]
if platform == "win": # In Windows, we have the default installer
# path and the registry to look
(path, swiHome) = _findSwiplWin()
elif platform in ("lin", "cyg"):
(path, swiHome) = _findSwiplLin()
elif platform == "dar": # Help with MacOS is welcome!!
(path, swiHome) = _findSwiplDar()
if path is None:
(path, swiHome) = _findSwiplMacOSHome()
else:
# This should work for other UNIX
(path, swiHome) = _findSwiplLin()
# This is a catch all raise
if path is None:
raise ImportError('Could not find the SWI-Prolog library in this '
'platform. If you are sure it is installed, please '
'open an issue.')
else:
return (path, swiHome) |
def deploy(self, ids):
"""
Method to deploy vip's
:param vips: List containing vip's desired to be deployed on equipment
:return: None
"""
url = build_uri_with_ids('api/v3/vip-request/deploy/%s/', ids)
return super(ApiVipRequest, self).post(url) | Method to deploy vip's
:param vips: List containing vip's desired to be deployed on equipment
:return: None | Below is the the instruction that describes the task:
### Input:
Method to deploy vip's
:param vips: List containing vip's desired to be deployed on equipment
:return: None
### Response:
def deploy(self, ids):
"""
Method to deploy vip's
:param vips: List containing vip's desired to be deployed on equipment
:return: None
"""
url = build_uri_with_ids('api/v3/vip-request/deploy/%s/', ids)
return super(ApiVipRequest, self).post(url) |
def add_layer(self, element, layer):
"""
add a layer to an existing node or edge
Parameters
----------
element : str, int, (str/int, str/int)
the ID of a node or edge (source node ID, target node ID)
layer : str
the layer that the element shall be added to
"""
assert isinstance(layer, str), "Layers must be strings!"
if isinstance(element, tuple): # edge repr. by (source, target)
assert len(element) == 2
assert all(isinstance(node, (str, int)) for node in element)
source_id, target_id = element
# this class is based on a multi-digraph, so we'll have to iterate
# over all edges between the two nodes (even if there's just one)
edges = self.edge[source_id][target_id]
for edge in edges:
existing_layers = edges[edge]['layers']
existing_layers.add(layer)
edges[edge]['layers'] = existing_layers
if isinstance(element, (str, int)): # node
existing_layers = self.node[element]['layers']
existing_layers.add(layer)
self.node[element]['layers'] = existing_layers | add a layer to an existing node or edge
Parameters
----------
element : str, int, (str/int, str/int)
the ID of a node or edge (source node ID, target node ID)
layer : str
the layer that the element shall be added to | Below is the the instruction that describes the task:
### Input:
add a layer to an existing node or edge
Parameters
----------
element : str, int, (str/int, str/int)
the ID of a node or edge (source node ID, target node ID)
layer : str
the layer that the element shall be added to
### Response:
def add_layer(self, element, layer):
"""
add a layer to an existing node or edge
Parameters
----------
element : str, int, (str/int, str/int)
the ID of a node or edge (source node ID, target node ID)
layer : str
the layer that the element shall be added to
"""
assert isinstance(layer, str), "Layers must be strings!"
if isinstance(element, tuple): # edge repr. by (source, target)
assert len(element) == 2
assert all(isinstance(node, (str, int)) for node in element)
source_id, target_id = element
# this class is based on a multi-digraph, so we'll have to iterate
# over all edges between the two nodes (even if there's just one)
edges = self.edge[source_id][target_id]
for edge in edges:
existing_layers = edges[edge]['layers']
existing_layers.add(layer)
edges[edge]['layers'] = existing_layers
if isinstance(element, (str, int)): # node
existing_layers = self.node[element]['layers']
existing_layers.add(layer)
self.node[element]['layers'] = existing_layers |
def set_headers(self, headers):
"""*Sets new request headers or updates the existing.*
``headers``: The headers to add or update as a JSON object or a
dictionary.
*Examples*
| `Set Headers` | { "authorization": "Basic QWxhZGRpbjpPcGVuU2VzYW1"} |
| `Set Headers` | { "Accept-Encoding": "identity"} |
| `Set Headers` | ${auth_dict} |
"""
self.request["headers"].update(self._input_object(headers))
return self.request["headers"] | *Sets new request headers or updates the existing.*
``headers``: The headers to add or update as a JSON object or a
dictionary.
*Examples*
| `Set Headers` | { "authorization": "Basic QWxhZGRpbjpPcGVuU2VzYW1"} |
| `Set Headers` | { "Accept-Encoding": "identity"} |
| `Set Headers` | ${auth_dict} | | Below is the the instruction that describes the task:
### Input:
*Sets new request headers or updates the existing.*
``headers``: The headers to add or update as a JSON object or a
dictionary.
*Examples*
| `Set Headers` | { "authorization": "Basic QWxhZGRpbjpPcGVuU2VzYW1"} |
| `Set Headers` | { "Accept-Encoding": "identity"} |
| `Set Headers` | ${auth_dict} |
### Response:
def set_headers(self, headers):
"""*Sets new request headers or updates the existing.*
``headers``: The headers to add or update as a JSON object or a
dictionary.
*Examples*
| `Set Headers` | { "authorization": "Basic QWxhZGRpbjpPcGVuU2VzYW1"} |
| `Set Headers` | { "Accept-Encoding": "identity"} |
| `Set Headers` | ${auth_dict} |
"""
self.request["headers"].update(self._input_object(headers))
return self.request["headers"] |
def _worst_case_generation(self, worst_case_scale_factors, modes):
"""
Define worst case generation time series for fluctuating and
dispatchable generators.
Parameters
----------
worst_case_scale_factors : dict
Scale factors defined in config file 'config_timeseries.cfg'.
Scale factors describe actual power to nominal power ratio of in
worst-case scenarios.
modes : list
List with worst-cases to generate time series for. Can be
'feedin_case', 'load_case' or both.
"""
self.timeseries.generation_fluctuating = pd.DataFrame(
{'solar': [worst_case_scale_factors[
'{}_feedin_pv'.format(mode)] for mode in modes],
'wind': [worst_case_scale_factors[
'{}_feedin_other'.format(mode)] for mode in modes]},
index=self.timeseries.timeindex)
self.timeseries.generation_dispatchable = pd.DataFrame(
{'other': [worst_case_scale_factors[
'{}_feedin_other'.format(mode)] for mode in modes]},
index=self.timeseries.timeindex) | Define worst case generation time series for fluctuating and
dispatchable generators.
Parameters
----------
worst_case_scale_factors : dict
Scale factors defined in config file 'config_timeseries.cfg'.
Scale factors describe actual power to nominal power ratio of in
worst-case scenarios.
modes : list
List with worst-cases to generate time series for. Can be
'feedin_case', 'load_case' or both. | Below is the the instruction that describes the task:
### Input:
Define worst case generation time series for fluctuating and
dispatchable generators.
Parameters
----------
worst_case_scale_factors : dict
Scale factors defined in config file 'config_timeseries.cfg'.
Scale factors describe actual power to nominal power ratio of in
worst-case scenarios.
modes : list
List with worst-cases to generate time series for. Can be
'feedin_case', 'load_case' or both.
### Response:
def _worst_case_generation(self, worst_case_scale_factors, modes):
"""
Define worst case generation time series for fluctuating and
dispatchable generators.
Parameters
----------
worst_case_scale_factors : dict
Scale factors defined in config file 'config_timeseries.cfg'.
Scale factors describe actual power to nominal power ratio of in
worst-case scenarios.
modes : list
List with worst-cases to generate time series for. Can be
'feedin_case', 'load_case' or both.
"""
self.timeseries.generation_fluctuating = pd.DataFrame(
{'solar': [worst_case_scale_factors[
'{}_feedin_pv'.format(mode)] for mode in modes],
'wind': [worst_case_scale_factors[
'{}_feedin_other'.format(mode)] for mode in modes]},
index=self.timeseries.timeindex)
self.timeseries.generation_dispatchable = pd.DataFrame(
{'other': [worst_case_scale_factors[
'{}_feedin_other'.format(mode)] for mode in modes]},
index=self.timeseries.timeindex) |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._uuid is not None:
return False
if self._created is not None:
return False
if self._updated is not None:
return False
if self._attachment is not None:
return False
return True | :rtype: bool | Below is the the instruction that describes the task:
### Input:
:rtype: bool
### Response:
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._uuid is not None:
return False
if self._created is not None:
return False
if self._updated is not None:
return False
if self._attachment is not None:
return False
return True |
def get(self, key, default=None, remote=False):
"""
Overrides dictionary get behavior to retrieve database objects with
support for returning a default. If remote=True then a remote
request is made to retrieve the database from the remote server,
otherwise the client's locally cached database object is returned.
:param str key: Database name used to retrieve the database object.
:param str default: Default database name. Defaults to None.
:param bool remote: Dictates whether the locally cached
database is returned or a remote request is made to retrieve
the database from the server. Defaults to False.
:returns: Database object
"""
if not remote:
return super(CouchDB, self).get(key, default)
db = self._DATABASE_CLASS(self, key)
if db.exists():
super(CouchDB, self).__setitem__(key, db)
return db
return default | Overrides dictionary get behavior to retrieve database objects with
support for returning a default. If remote=True then a remote
request is made to retrieve the database from the remote server,
otherwise the client's locally cached database object is returned.
:param str key: Database name used to retrieve the database object.
:param str default: Default database name. Defaults to None.
:param bool remote: Dictates whether the locally cached
database is returned or a remote request is made to retrieve
the database from the server. Defaults to False.
:returns: Database object | Below is the the instruction that describes the task:
### Input:
Overrides dictionary get behavior to retrieve database objects with
support for returning a default. If remote=True then a remote
request is made to retrieve the database from the remote server,
otherwise the client's locally cached database object is returned.
:param str key: Database name used to retrieve the database object.
:param str default: Default database name. Defaults to None.
:param bool remote: Dictates whether the locally cached
database is returned or a remote request is made to retrieve
the database from the server. Defaults to False.
:returns: Database object
### Response:
def get(self, key, default=None, remote=False):
"""
Overrides dictionary get behavior to retrieve database objects with
support for returning a default. If remote=True then a remote
request is made to retrieve the database from the remote server,
otherwise the client's locally cached database object is returned.
:param str key: Database name used to retrieve the database object.
:param str default: Default database name. Defaults to None.
:param bool remote: Dictates whether the locally cached
database is returned or a remote request is made to retrieve
the database from the server. Defaults to False.
:returns: Database object
"""
if not remote:
return super(CouchDB, self).get(key, default)
db = self._DATABASE_CLASS(self, key)
if db.exists():
super(CouchDB, self).__setitem__(key, db)
return db
return default |
def read_csv(filename, delimiter=",", skip=0, guess_type=True, has_header=True, use_types={}):
"""Read a CSV file
Usage
-----
>>> data = read_csv(filename, delimiter=delimiter, skip=skip,
guess_type=guess_type, has_header=True, use_types={})
# Use specific types
>>> types = {"sepal.length": int, "petal.width": float}
>>> data = read_csv(filename, guess_type=guess_type, use_types=types)
keywords
:has_header:
Determine whether the file has a header or not
"""
with open(filename, 'r') as f:
# Skip the n first lines
if has_header:
header = f.readline().strip().split(delimiter)
else:
header = None
for i in range(skip):
f.readline()
for line in csv.DictReader(f, delimiter=delimiter, fieldnames=header):
if use_types:
yield apply_types(use_types, guess_type, line)
elif guess_type:
yield dmap(determine_type, line)
else:
yield line | Read a CSV file
Usage
-----
>>> data = read_csv(filename, delimiter=delimiter, skip=skip,
guess_type=guess_type, has_header=True, use_types={})
# Use specific types
>>> types = {"sepal.length": int, "petal.width": float}
>>> data = read_csv(filename, guess_type=guess_type, use_types=types)
keywords
:has_header:
Determine whether the file has a header or not | Below is the the instruction that describes the task:
### Input:
Read a CSV file
Usage
-----
>>> data = read_csv(filename, delimiter=delimiter, skip=skip,
guess_type=guess_type, has_header=True, use_types={})
# Use specific types
>>> types = {"sepal.length": int, "petal.width": float}
>>> data = read_csv(filename, guess_type=guess_type, use_types=types)
keywords
:has_header:
Determine whether the file has a header or not
### Response:
def read_csv(filename, delimiter=",", skip=0, guess_type=True, has_header=True, use_types={}):
"""Read a CSV file
Usage
-----
>>> data = read_csv(filename, delimiter=delimiter, skip=skip,
guess_type=guess_type, has_header=True, use_types={})
# Use specific types
>>> types = {"sepal.length": int, "petal.width": float}
>>> data = read_csv(filename, guess_type=guess_type, use_types=types)
keywords
:has_header:
Determine whether the file has a header or not
"""
with open(filename, 'r') as f:
# Skip the n first lines
if has_header:
header = f.readline().strip().split(delimiter)
else:
header = None
for i in range(skip):
f.readline()
for line in csv.DictReader(f, delimiter=delimiter, fieldnames=header):
if use_types:
yield apply_types(use_types, guess_type, line)
elif guess_type:
yield dmap(determine_type, line)
else:
yield line |
def _gatherLookupIndexes(gpos):
"""
Gather a mapping of script to lookup indexes
referenced by the kern feature for each script.
Returns a dictionary of this structure:
{
"latn" : [0],
"DFLT" : [0]
}
"""
# gather the indexes of the kern features
kernFeatureIndexes = [index for index, featureRecord in enumerate(gpos.FeatureList.FeatureRecord) if featureRecord.FeatureTag == "kern"]
# find scripts and languages that have kern features
scriptKernFeatureIndexes = {}
for scriptRecord in gpos.ScriptList.ScriptRecord:
script = scriptRecord.ScriptTag
thisScriptKernFeatureIndexes = []
defaultLangSysRecord = scriptRecord.Script.DefaultLangSys
if defaultLangSysRecord is not None:
f = []
for featureIndex in defaultLangSysRecord.FeatureIndex:
if featureIndex not in kernFeatureIndexes:
continue
f.append(featureIndex)
if f:
thisScriptKernFeatureIndexes.append((None, f))
if scriptRecord.Script.LangSysRecord is not None:
for langSysRecord in scriptRecord.Script.LangSysRecord:
langSys = langSysRecord.LangSysTag
f = []
for featureIndex in langSysRecord.LangSys.FeatureIndex:
if featureIndex not in kernFeatureIndexes:
continue
f.append(featureIndex)
if f:
thisScriptKernFeatureIndexes.append((langSys, f))
scriptKernFeatureIndexes[script] = thisScriptKernFeatureIndexes
# convert the feature indexes to lookup indexes
scriptLookupIndexes = {}
for script, featureDefinitions in scriptKernFeatureIndexes.items():
lookupIndexes = scriptLookupIndexes[script] = []
for language, featureIndexes in featureDefinitions:
for featureIndex in featureIndexes:
featureRecord = gpos.FeatureList.FeatureRecord[featureIndex]
for lookupIndex in featureRecord.Feature.LookupListIndex:
if lookupIndex not in lookupIndexes:
lookupIndexes.append(lookupIndex)
# done
return scriptLookupIndexes | Gather a mapping of script to lookup indexes
referenced by the kern feature for each script.
Returns a dictionary of this structure:
{
"latn" : [0],
"DFLT" : [0]
} | Below is the the instruction that describes the task:
### Input:
Gather a mapping of script to lookup indexes
referenced by the kern feature for each script.
Returns a dictionary of this structure:
{
"latn" : [0],
"DFLT" : [0]
}
### Response:
def _gatherLookupIndexes(gpos):
"""
Gather a mapping of script to lookup indexes
referenced by the kern feature for each script.
Returns a dictionary of this structure:
{
"latn" : [0],
"DFLT" : [0]
}
"""
# gather the indexes of the kern features
kernFeatureIndexes = [index for index, featureRecord in enumerate(gpos.FeatureList.FeatureRecord) if featureRecord.FeatureTag == "kern"]
# find scripts and languages that have kern features
scriptKernFeatureIndexes = {}
for scriptRecord in gpos.ScriptList.ScriptRecord:
script = scriptRecord.ScriptTag
thisScriptKernFeatureIndexes = []
defaultLangSysRecord = scriptRecord.Script.DefaultLangSys
if defaultLangSysRecord is not None:
f = []
for featureIndex in defaultLangSysRecord.FeatureIndex:
if featureIndex not in kernFeatureIndexes:
continue
f.append(featureIndex)
if f:
thisScriptKernFeatureIndexes.append((None, f))
if scriptRecord.Script.LangSysRecord is not None:
for langSysRecord in scriptRecord.Script.LangSysRecord:
langSys = langSysRecord.LangSysTag
f = []
for featureIndex in langSysRecord.LangSys.FeatureIndex:
if featureIndex not in kernFeatureIndexes:
continue
f.append(featureIndex)
if f:
thisScriptKernFeatureIndexes.append((langSys, f))
scriptKernFeatureIndexes[script] = thisScriptKernFeatureIndexes
# convert the feature indexes to lookup indexes
scriptLookupIndexes = {}
for script, featureDefinitions in scriptKernFeatureIndexes.items():
lookupIndexes = scriptLookupIndexes[script] = []
for language, featureIndexes in featureDefinitions:
for featureIndex in featureIndexes:
featureRecord = gpos.FeatureList.FeatureRecord[featureIndex]
for lookupIndex in featureRecord.Feature.LookupListIndex:
if lookupIndex not in lookupIndexes:
lookupIndexes.append(lookupIndex)
# done
return scriptLookupIndexes |
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(
Namespace=self.namespace,
Name=self.name,
Value=self.value,
TransactionType=self.transaction_type,
)
builder.start("mdsol:Attribute", params)
builder.end("mdsol:Attribute") | Build XML by appending to builder | Below is the the instruction that describes the task:
### Input:
Build XML by appending to builder
### Response:
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(
Namespace=self.namespace,
Name=self.name,
Value=self.value,
TransactionType=self.transaction_type,
)
builder.start("mdsol:Attribute", params)
builder.end("mdsol:Attribute") |
def get_vnetwork_vswitches_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
input = ET.SubElement(get_vnetwork_vswitches, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_vnetwork_vswitches_input_last_rcvd_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
input = ET.SubElement(get_vnetwork_vswitches, "input")
last_rcvd_instance = ET.SubElement(input, "last-rcvd-instance")
last_rcvd_instance.text = kwargs.pop('last_rcvd_instance')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def add_output_opt(self, opt, out):
""" Add an option that determines an output
"""
self.add_opt(opt, out._dax_repr())
self._add_output(out) | Add an option that determines an output | Below is the the instruction that describes the task:
### Input:
Add an option that determines an output
### Response:
def add_output_opt(self, opt, out):
""" Add an option that determines an output
"""
self.add_opt(opt, out._dax_repr())
self._add_output(out) |
def histogram_equalize(self, use_bands, **kwargs):
''' Equalize and the histogram and normalize value range
Equalization is on all three bands, not per-band'''
data = self._read(self[use_bands,...], **kwargs)
data = np.rollaxis(data.astype(np.float32), 0, 3)
flattened = data.flatten()
if 0 in data:
masked = np.ma.masked_values(data, 0).compressed()
image_histogram, bin_edges = np.histogram(masked, 256)
else:
image_histogram, bin_edges = np.histogram(flattened, 256)
bins = (bin_edges[:-1] + bin_edges[1:]) / 2.0
cdf = image_histogram.cumsum()
cdf = cdf / float(cdf[-1])
image_equalized = np.interp(flattened, bins, cdf).reshape(data.shape)
if 'stretch' in kwargs or 'gamma' in kwargs:
return self._histogram_stretch(image_equalized, **kwargs)
else:
return image_equalized | Equalize and the histogram and normalize value range
Equalization is on all three bands, not per-band | Below is the the instruction that describes the task:
### Input:
Equalize and the histogram and normalize value range
Equalization is on all three bands, not per-band
### Response:
def histogram_equalize(self, use_bands, **kwargs):
''' Equalize and the histogram and normalize value range
Equalization is on all three bands, not per-band'''
data = self._read(self[use_bands,...], **kwargs)
data = np.rollaxis(data.astype(np.float32), 0, 3)
flattened = data.flatten()
if 0 in data:
masked = np.ma.masked_values(data, 0).compressed()
image_histogram, bin_edges = np.histogram(masked, 256)
else:
image_histogram, bin_edges = np.histogram(flattened, 256)
bins = (bin_edges[:-1] + bin_edges[1:]) / 2.0
cdf = image_histogram.cumsum()
cdf = cdf / float(cdf[-1])
image_equalized = np.interp(flattened, bins, cdf).reshape(data.shape)
if 'stretch' in kwargs or 'gamma' in kwargs:
return self._histogram_stretch(image_equalized, **kwargs)
else:
return image_equalized |
def require_axis(f):
""" Check if the object of the function has axis and sel_axis members """
@wraps(f)
def _wrapper(self, *args, **kwargs):
if None in (self.axis, self.sel_axis):
raise ValueError('%(func_name) requires the node %(node)s '
'to have an axis and a sel_axis function' %
dict(func_name=f.__name__, node=repr(self)))
return f(self, *args, **kwargs)
return _wrapper | Check if the object of the function has axis and sel_axis members | Below is the the instruction that describes the task:
### Input:
Check if the object of the function has axis and sel_axis members
### Response:
def require_axis(f):
""" Check if the object of the function has axis and sel_axis members """
@wraps(f)
def _wrapper(self, *args, **kwargs):
if None in (self.axis, self.sel_axis):
raise ValueError('%(func_name) requires the node %(node)s '
'to have an axis and a sel_axis function' %
dict(func_name=f.__name__, node=repr(self)))
return f(self, *args, **kwargs)
return _wrapper |
def set_feature_transform(self, mode='polynomial', degree=1):
'''
Transform data feature to high level
'''
if self.status != 'load_train_data':
print("Please load train data first.")
return self.train_X
self.feature_transform_mode = mode
self.feature_transform_degree = degree
self.train_X = self.train_X[:, 1:]
self.train_X = utility.DatasetLoader.feature_transform(
self.train_X,
self.feature_transform_mode,
self.feature_transform_degree
)
return self.train_X | Transform data feature to high level | Below is the the instruction that describes the task:
### Input:
Transform data feature to high level
### Response:
def set_feature_transform(self, mode='polynomial', degree=1):
'''
Transform data feature to high level
'''
if self.status != 'load_train_data':
print("Please load train data first.")
return self.train_X
self.feature_transform_mode = mode
self.feature_transform_degree = degree
self.train_X = self.train_X[:, 1:]
self.train_X = utility.DatasetLoader.feature_transform(
self.train_X,
self.feature_transform_mode,
self.feature_transform_degree
)
return self.train_X |
def _parse_config_file(self, cfg_files):
"""Parse config file (ini) and set properties
:return:
"""
cfg_handler = configparser.ConfigParser(interpolation=None)
if not cfg_handler.read(map(str, cfg_files)):
return
self._parse_global_section(cfg_handler)
self._parse_vcs_section(cfg_handler)
self._parse_file_section(cfg_handler) | Parse config file (ini) and set properties
:return: | Below is the the instruction that describes the task:
### Input:
Parse config file (ini) and set properties
:return:
### Response:
def _parse_config_file(self, cfg_files):
"""Parse config file (ini) and set properties
:return:
"""
cfg_handler = configparser.ConfigParser(interpolation=None)
if not cfg_handler.read(map(str, cfg_files)):
return
self._parse_global_section(cfg_handler)
self._parse_vcs_section(cfg_handler)
self._parse_file_section(cfg_handler) |
def query(self, expr, **kwargs):
"""Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
"""
columns = self.columns
def query_builder(df, **kwargs):
# This is required because of an Arrow limitation
# TODO revisit for Arrow error
df = df.copy()
df.index = pandas.RangeIndex(len(df))
df.columns = columns
df.query(expr, inplace=True, **kwargs)
df.columns = pandas.RangeIndex(len(df.columns))
return df
func = self._prepare_method(query_builder, **kwargs)
new_data = self._map_across_full_axis(1, func)
# Query removes rows, so we need to update the index
new_index = self.compute_index(0, new_data, True)
return self.__constructor__(new_data, new_index, self.columns, self.dtypes) | Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied. | Below is the the instruction that describes the task:
### Input:
Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
### Response:
def query(self, expr, **kwargs):
"""Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
"""
columns = self.columns
def query_builder(df, **kwargs):
# This is required because of an Arrow limitation
# TODO revisit for Arrow error
df = df.copy()
df.index = pandas.RangeIndex(len(df))
df.columns = columns
df.query(expr, inplace=True, **kwargs)
df.columns = pandas.RangeIndex(len(df.columns))
return df
func = self._prepare_method(query_builder, **kwargs)
new_data = self._map_across_full_axis(1, func)
# Query removes rows, so we need to update the index
new_index = self.compute_index(0, new_data, True)
return self.__constructor__(new_data, new_index, self.columns, self.dtypes) |
def newton_refine_curve(curve, point, s, new_s):
"""Image for :func:`._curve_helpers.newton_refine` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256)
ax.plot(point[0, :], point[1, :], marker="H")
wrong_points = curve.evaluate_multi(np.asfortranarray([s, new_s]))
ax.plot(
wrong_points[0, [0]],
wrong_points[1, [0]],
color="black",
linestyle="None",
marker="o",
)
ax.plot(
wrong_points[0, [1]],
wrong_points[1, [1]],
color="black",
linestyle="None",
marker="o",
markeredgewidth=1,
markerfacecolor="None",
)
# Set the axis bounds / scaling.
ax.axis("scaled")
ax.set_xlim(-0.125, 3.125)
ax.set_ylim(-0.125, 1.375)
save_image(ax.figure, "newton_refine_curve.png") | Image for :func:`._curve_helpers.newton_refine` docstring. | Below is the the instruction that describes the task:
### Input:
Image for :func:`._curve_helpers.newton_refine` docstring.
### Response:
def newton_refine_curve(curve, point, s, new_s):
"""Image for :func:`._curve_helpers.newton_refine` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256)
ax.plot(point[0, :], point[1, :], marker="H")
wrong_points = curve.evaluate_multi(np.asfortranarray([s, new_s]))
ax.plot(
wrong_points[0, [0]],
wrong_points[1, [0]],
color="black",
linestyle="None",
marker="o",
)
ax.plot(
wrong_points[0, [1]],
wrong_points[1, [1]],
color="black",
linestyle="None",
marker="o",
markeredgewidth=1,
markerfacecolor="None",
)
# Set the axis bounds / scaling.
ax.axis("scaled")
ax.set_xlim(-0.125, 3.125)
ax.set_ylim(-0.125, 1.375)
save_image(ax.figure, "newton_refine_curve.png") |
def arg_bool(name, default=False):
""" Fetch a query argument, as a boolean. """
v = request.args.get(name, '')
if not len(v):
return default
return v in BOOL_TRUISH | Fetch a query argument, as a boolean. | Below is the the instruction that describes the task:
### Input:
Fetch a query argument, as a boolean.
### Response:
def arg_bool(name, default=False):
""" Fetch a query argument, as a boolean. """
v = request.args.get(name, '')
if not len(v):
return default
return v in BOOL_TRUISH |
def readmol(path, as_string=False):
"""Reads the given molecule file and returns the corresponding Pybel molecule as well as the input file type.
In contrast to the standard Pybel implementation, the file is closed properly."""
supported_formats = ['pdb']
# Fix for Windows-generated files: Remove carriage return characters
if "\r" in path and as_string:
path = path.replace('\r', '')
for sformat in supported_formats:
obc = pybel.ob.OBConversion()
obc.SetInFormat(sformat)
write_message("Detected {} as format. Trying to read file with OpenBabel...\n".format(sformat), mtype='debug')
# Read molecules with single bond information
if as_string:
try:
mymol = pybel.readstring(sformat, path)
except IOError:
sysexit(4, 'No valid file format provided.')
else:
read_file = pybel.readfile(format=sformat, filename=path, opt={"s": None})
try:
mymol = next(read_file)
except StopIteration:
sysexit(4, 'File contains no valid molecules.\n')
write_message("Molecule successfully read.\n", mtype='debug')
# Assign multiple bonds
mymol.OBMol.PerceiveBondOrders()
return mymol, sformat
sysexit(4, 'No valid file format provided.') | Reads the given molecule file and returns the corresponding Pybel molecule as well as the input file type.
In contrast to the standard Pybel implementation, the file is closed properly. | Below is the the instruction that describes the task:
### Input:
Reads the given molecule file and returns the corresponding Pybel molecule as well as the input file type.
In contrast to the standard Pybel implementation, the file is closed properly.
### Response:
def readmol(path, as_string=False):
"""Reads the given molecule file and returns the corresponding Pybel molecule as well as the input file type.
In contrast to the standard Pybel implementation, the file is closed properly."""
supported_formats = ['pdb']
# Fix for Windows-generated files: Remove carriage return characters
if "\r" in path and as_string:
path = path.replace('\r', '')
for sformat in supported_formats:
obc = pybel.ob.OBConversion()
obc.SetInFormat(sformat)
write_message("Detected {} as format. Trying to read file with OpenBabel...\n".format(sformat), mtype='debug')
# Read molecules with single bond information
if as_string:
try:
mymol = pybel.readstring(sformat, path)
except IOError:
sysexit(4, 'No valid file format provided.')
else:
read_file = pybel.readfile(format=sformat, filename=path, opt={"s": None})
try:
mymol = next(read_file)
except StopIteration:
sysexit(4, 'File contains no valid molecules.\n')
write_message("Molecule successfully read.\n", mtype='debug')
# Assign multiple bonds
mymol.OBMol.PerceiveBondOrders()
return mymol, sformat
sysexit(4, 'No valid file format provided.') |
def _sync_content_metadata(self, serialized_data, http_method):
"""
Synchronize content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
http_method: The HTTP method to use for the API request.
Raises:
ClientError: If Degreed API request fails.
"""
try:
status_code, response_body = getattr(self, '_' + http_method)(
urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.course_api_path),
serialized_data,
self.CONTENT_PROVIDER_SCOPE
)
except requests.exceptions.RequestException as exc:
raise ClientError(
'DegreedAPIClient request failed: {error} {message}'.format(
error=exc.__class__.__name__,
message=str(exc)
)
)
if status_code >= 400:
raise ClientError(
'DegreedAPIClient request failed with status {status_code}: {message}'.format(
status_code=status_code,
message=response_body
)
) | Synchronize content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
http_method: The HTTP method to use for the API request.
Raises:
ClientError: If Degreed API request fails. | Below is the the instruction that describes the task:
### Input:
Synchronize content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
http_method: The HTTP method to use for the API request.
Raises:
ClientError: If Degreed API request fails.
### Response:
def _sync_content_metadata(self, serialized_data, http_method):
"""
Synchronize content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
http_method: The HTTP method to use for the API request.
Raises:
ClientError: If Degreed API request fails.
"""
try:
status_code, response_body = getattr(self, '_' + http_method)(
urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.course_api_path),
serialized_data,
self.CONTENT_PROVIDER_SCOPE
)
except requests.exceptions.RequestException as exc:
raise ClientError(
'DegreedAPIClient request failed: {error} {message}'.format(
error=exc.__class__.__name__,
message=str(exc)
)
)
if status_code >= 400:
raise ClientError(
'DegreedAPIClient request failed with status {status_code}: {message}'.format(
status_code=status_code,
message=response_body
)
) |
def delete_suffixes(self, word):
"""
Delete some very common suffixes.
"""
length = len(word)
suffixes = ['al', 'ance', 'ence', 'er', 'ic', 'able', 'ible',
'ant', 'ement', 'ment', 'ent', 'ism', 'ate',
'iti', 'ous', 'ive', 'ize']
for suffix in suffixes:
if word.endswith(suffix) and self.r2 <= (length - len(suffix)):
word = word[:-len(suffix)]
return word
if word.endswith('ion') and self.r2 <= (length - 3):
if word[length - 4] in 'st':
word = word[:-3]
return word | Delete some very common suffixes. | Below is the the instruction that describes the task:
### Input:
Delete some very common suffixes.
### Response:
def delete_suffixes(self, word):
"""
Delete some very common suffixes.
"""
length = len(word)
suffixes = ['al', 'ance', 'ence', 'er', 'ic', 'able', 'ible',
'ant', 'ement', 'ment', 'ent', 'ism', 'ate',
'iti', 'ous', 'ive', 'ize']
for suffix in suffixes:
if word.endswith(suffix) and self.r2 <= (length - len(suffix)):
word = word[:-len(suffix)]
return word
if word.endswith('ion') and self.r2 <= (length - 3):
if word[length - 4] in 'st':
word = word[:-3]
return word |
def gen_checkbox_list(sig_dic):
'''
For generating List view HTML file for CHECKBOX.
for each item.
'''
view_zuoxiang = '''<span class="iga_pd_val">'''
dic_tmp = sig_dic['dic']
for key in dic_tmp.keys():
tmp_str = '''{{% if "{0}" in postinfo.extinfo["{1}"] %}} {2} {{% end %}}
'''.format(key, sig_dic['en'], dic_tmp[key])
view_zuoxiang += tmp_str
view_zuoxiang += '''</span>'''
return view_zuoxiang | For generating List view HTML file for CHECKBOX.
for each item. | Below is the the instruction that describes the task:
### Input:
For generating List view HTML file for CHECKBOX.
for each item.
### Response:
def gen_checkbox_list(sig_dic):
'''
For generating List view HTML file for CHECKBOX.
for each item.
'''
view_zuoxiang = '''<span class="iga_pd_val">'''
dic_tmp = sig_dic['dic']
for key in dic_tmp.keys():
tmp_str = '''{{% if "{0}" in postinfo.extinfo["{1}"] %}} {2} {{% end %}}
'''.format(key, sig_dic['en'], dic_tmp[key])
view_zuoxiang += tmp_str
view_zuoxiang += '''</span>'''
return view_zuoxiang |
def zeros(dur=None):
"""
Zeros/zeroes stream generator.
You may sum your endless stream by this to enforce an end to it.
Parameters
----------
dur :
Duration, in number of samples; endless if not given.
Returns
-------
Stream that repeats "0.0" during a given time duration (if any) or
endlessly.
"""
if dur is None or (isinf(dur) and dur > 0):
while True:
yield 0.0
for x in xrange(int(.5 + dur)):
yield 0.0 | Zeros/zeroes stream generator.
You may sum your endless stream by this to enforce an end to it.
Parameters
----------
dur :
Duration, in number of samples; endless if not given.
Returns
-------
Stream that repeats "0.0" during a given time duration (if any) or
endlessly. | Below is the the instruction that describes the task:
### Input:
Zeros/zeroes stream generator.
You may sum your endless stream by this to enforce an end to it.
Parameters
----------
dur :
Duration, in number of samples; endless if not given.
Returns
-------
Stream that repeats "0.0" during a given time duration (if any) or
endlessly.
### Response:
def zeros(dur=None):
"""
Zeros/zeroes stream generator.
You may sum your endless stream by this to enforce an end to it.
Parameters
----------
dur :
Duration, in number of samples; endless if not given.
Returns
-------
Stream that repeats "0.0" during a given time duration (if any) or
endlessly.
"""
if dur is None or (isinf(dur) and dur > 0):
while True:
yield 0.0
for x in xrange(int(.5 + dur)):
yield 0.0 |
def independentlinear60(display=False):
""" A simulated dataset with tight correlations among distinct groups of features.
"""
# set a constant seed
old_seed = np.random.seed()
np.random.seed(0)
# generate dataset with known correlation
N = 1000
M = 60
# set one coefficent from each group of 3 to 1
beta = np.zeros(M)
beta[0:30:3] = 1
f = lambda X: np.matmul(X, beta)
# Make sure the sample correlation is a perfect match
X_start = np.random.randn(N, M)
X = X_start - X_start.mean(0)
y = f(X) + np.random.randn(N) * 1e-2
# restore the previous numpy random seed
np.random.seed(old_seed)
return pd.DataFrame(X), y | A simulated dataset with tight correlations among distinct groups of features. | Below is the the instruction that describes the task:
### Input:
A simulated dataset with tight correlations among distinct groups of features.
### Response:
def independentlinear60(display=False):
""" A simulated dataset with tight correlations among distinct groups of features.
"""
# set a constant seed
old_seed = np.random.seed()
np.random.seed(0)
# generate dataset with known correlation
N = 1000
M = 60
# set one coefficent from each group of 3 to 1
beta = np.zeros(M)
beta[0:30:3] = 1
f = lambda X: np.matmul(X, beta)
# Make sure the sample correlation is a perfect match
X_start = np.random.randn(N, M)
X = X_start - X_start.mean(0)
y = f(X) + np.random.randn(N) * 1e-2
# restore the previous numpy random seed
np.random.seed(old_seed)
return pd.DataFrame(X), y |
def find_indices(lst, element):
""" Returns the indices for all occurrences of 'element' in 'lst'.
Args:
lst (list): List to search.
element: Element to find.
Returns:
list: List of indices or values
"""
result = []
offset = -1
while True:
try:
offset = lst.index(element, offset+1)
except ValueError:
return result
result.append(offset) | Returns the indices for all occurrences of 'element' in 'lst'.
Args:
lst (list): List to search.
element: Element to find.
Returns:
list: List of indices or values | Below is the the instruction that describes the task:
### Input:
Returns the indices for all occurrences of 'element' in 'lst'.
Args:
lst (list): List to search.
element: Element to find.
Returns:
list: List of indices or values
### Response:
def find_indices(lst, element):
""" Returns the indices for all occurrences of 'element' in 'lst'.
Args:
lst (list): List to search.
element: Element to find.
Returns:
list: List of indices or values
"""
result = []
offset = -1
while True:
try:
offset = lst.index(element, offset+1)
except ValueError:
return result
result.append(offset) |