code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_buffer = utils.BytearrayStream()
if self._object_type:
self._object_type.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField(
"The Deriv... | Write the data encoding the DeriveKey request payload to a stream.
Args:
output_buffer (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining... |
def id_(reset=False):
s True.
CLI Example:
.. code-block:: bash
salt monit.id [reset=True]
Monit id (?P<id>[^ ]+)echo y|monit -rcmd.run_allstdoutidmonit -icmd.run:')[-1].strip()
return ret | .. versionadded:: 2016.3.0
Return monit unique id.
reset : False
Reset current id and generate a new id when it's True.
CLI Example:
.. code-block:: bash
salt '*' monit.id [reset=True] |
def db020(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
.format(value))
self._db020 = value | Corresponds to IDD Field `db020`
mean coincident wet-bulb temperature to
Dry-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `db020`
Unit: C
if `value` is None... |
def check_instance(
mzn, *dzn_files, data=None, include=None, stdlib_dir=None, globals_dir=None,
allow_multiple_assignments=False
):
args = []
args += _flattening_args(
mzn, *dzn_files, data=data, include=include, stdlib_dir=stdlib_dir,
globals_dir=globals_dir,
allow_multip... | Perform instance checking on a model + data.
This function calls the command ``minizinc --instance-check-only`` to check
for consistency of the given model + data.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of... |
def get_voltage(self, channel, unit=):
adc_ch = self._ch_map[channel][][]
address = self._ch_map[channel][][]
raw = self._get_adc_value(address=address)[adc_ch]
dac_offset = self._ch_cal[channel][][]
dac_gain = self._ch_cal[channel][][]
voltage = ((raw - dac_of... | Reading voltage |
def visit(self, func):
base_len = len(self.name)
return self.visitvalues(lambda o: func(o.name[base_len:].lstrip("/"))) | Run ``func`` on each object's path.
Note: If ``func`` returns ``None`` (or doesn't return),
iteration continues. However, if ``func`` returns
anything else, it ceases and returns that value.
Examples
--------
>>> import zarr
>>> g1 = zarr.group()
... |
def _process_elem_text(elem, dic, subdic, text="@text", **options):
elem.text = elem.text.strip()
if elem.text:
etext = _parse_text(elem.text, **options)
if len(elem) or elem.attrib:
subdic[text] = etext
else:
dic[elem.tag] = etext | :param elem: ET Element object which has elem.text
:param dic: <container> (dict[-like]) object converted from elem
:param subdic: Sub <container> object converted from elem
:param options:
Keyword options, see the description of :func:`elem_to_container` for
more details.
:return: None... |
def number_cwt_peaks(x, n):
return len(find_peaks_cwt(vector=x, widths=np.array(list(range(1, n + 1))), wavelet=ricker)) | This feature calculator searches for different peaks in x. To do so, x is smoothed by a ricker wavelet and for
widths ranging from 1 to n. This feature calculator returns the number of peaks that occur at enough width scales
and with sufficiently high Signal-to-Noise-Ratio (SNR)
:param x: the time series t... |
def QA_SU_save_stock_min(client=DATABASE, ui_log=None, ui_progress=None):
try:
import jqdatasdk
jqdatasdk.auth("JQUSERNAME", "JQUSERPASSWD")
except:
raise ModuleNotFoundError
code_list = list(
map(
lambda x: x + ".XSHG" if x[0] == "6" else... | 聚宽实现方式
save current day's stock_min data |
def horizontal_layout(self, draw, slide):
padding = self.padding
heading = slide[]
top = padding
left = padding
top += heading[] + padding
rows = slide[]
for row in rows:
images = row.get(, 0)
items = row[]
... | Augment slide with horizontal layout info |
def set_slug(apps, schema_editor):
Event = apps.get_model(, )
for e in Event.objects.all():
e.slug = generate_slug(e.pk)
e.save(update_fields=[]) | Create a slug for each Event already in the DB. |
def filesessionmaker(sessionmaker, file_manager, file_managers=None):
u
registry = WeakKeyDictionary()
if file_managers:
for k, v in six.iteritems(file_managers):
if isinstance(k, FileAttribute):
raise NotImplementedError()
registry[k] = v
def find_file... | u'''Wrapper of session maker adding link to a FileManager instance
to session.::
file_manager = FileManager(cfg.TRANSIENT_ROOT,
cfg.PERSISTENT_ROOT)
filesessionmaker(sessionmaker(...), file_manager) |
def setup(self):
if not self.networks():
super(FunctionLearning, self).setup()
for net in self.networks():
self.source(network=net) | Setup does stuff only if there are no networks.
This is so it only runs once at the start of the experiment. It first
calls the same function in the super (see experiments.py in wallace).
Then it adds a source to each network. |
def aggregate(self, pipeline, **kwargs):
result = self.collection.aggregate(pipeline, **kwargs)
if pymongo.version_tuple < (3, 0, 0):
result = result[]
return result | Perform an aggregation and make sure that result will be everytime
CommandCursor. Will take care for pymongo version differencies
:param pipeline: {list} of aggregation pipeline stages
:return: {pymongo.command_cursor.CommandCursor} |
def countByValue(self):
return self.transform(
lambda rdd: self._context._context.parallelize(
rdd.countByValue().items())) | Apply countByValue to every RDD.abs
:rtype: DStream
.. warning::
Implemented as a local operation.
Example:
>>> import pysparkling
>>> sc = pysparkling.Context()
>>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1)
>>> (
... ssc
... |
def _get_address_family(table, instance):
address_family_mapping = {"inet": "ipv4", "inet6": "ipv6", "inetflow": "flow"}
if instance == "master":
family = table.rsplit(".", 1)[-2]
else:
family = table.split(".")[-2]
try:
address_family = addre... | Function to derive address family from a junos table name.
:params table: The name of the routing table
:returns: address family |
def generate_additional_context(self, matching_datasets):
dataset_ids = [upload.id for upload in matching_datasets]
tags = Tag.objects.filter(
dataset__in=dataset_ids
).distinct().annotate(
Count()
).order_by()[:5]
hubs = matching_datasets.values... | Return additional information about matching datasets.
Includes upload counts, related hubs, related tags. |
def plot_pnlmoney(self):
plt.scatter(x=self.pnl.sell_date.apply(str), y=self.pnl.pnl_money)
plt.gcf().autofmt_xdate()
return plt | 画出pnl盈亏额散点图 |
def to_perseus(graphs):
graph_attributes = []
networks = {}
for graph in graphs:
attributes = dict(graph.graph)
attributes.update({"Name" : attributes.get("Name", attributes.get("name", "networkx graph")),
"GUID": attributes.get("GUID", str(uuid.uuid4()))})
graph_att... | Create a network table and the network dictionary for export to Perseus.
:param graphs: Collection of networkx graphs
>>> from perseuspy import nx
>>> G = nx.random_graphs.barabasi_albert_graph(10, 3)
>>> network_table, networks = nx.to_perseus([G]) |
def calcDrawingProbs(self):
wmg = self.wmg
phi = self.phi
weights = []
for i in range(0, len(wmg.keys())):
weights.append(phi**i)
totalWeight = sum(weights)
for i in range(0, len(wmg.keys())):
weights[i] = weights[i]/t... | Returns a vector that contains the probabily of an item being from each position. We say
that every item in a order vector is drawn with weight phi^i where i is its position. |
def _from_p(self, mode):
self._check_modes(("P", "PA"))
if not self.palette:
raise RuntimeError("Cans alpha overrides data alpha
mode = "RGBA"
alpha = None
elif self.mode.endswith("A"):
alpha = self.data.sel(bands="A").data[.... | Convert the image from P or PA to RGB or RGBA. |
def register_on_clipboard_mode_changed(self, callback):
event_type = library.VBoxEventType.on_clipboard_mode_changed
return self.event_source.register_callback(callback, event_type) | Set the callback function to consume on clipboard mode changed
events.
Callback receives a IClipboardModeChangedEvent object.
Returns the callback_id |
def role_add(self, role=None, login=None, envs=[], query=):
data = {: self.args.login}
juicer.utils.Log.log_debug(
"Add Role to ", role, login)
for env in self.args.envs:
if not juicer.utils.role_exists_p(role, self.connectors[env]):
juicer.... | `login` - Login or username of user to add to `role`
`role` - Role to add user to
Add user to role |
def track_trace(self, name, properties=None, severity=None):
data = channel.contracts.MessageData()
data.message = name or NULL_CONSTANT_STRING
if properties:
data.properties = properties
if severity is not None:
data.severity_level = channel.contracts.Me... | Sends a single trace statement.
Args:
name (str). the trace statement.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
severity (str). the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, C... |
def append(self, other: ) -> :
if self.null():
return other
return (self.tail().append(other)).cons(self.head()) | Append other list to this list. |
def current_position(self):
token = self.tokenizer.peek(0)
if token:
return token.start, token.end
return self.tokenizer.position, self.tokenizer.position + 1 | Return a tuple of (start, end). |
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
minion-iddomain.tldjoinuserjoinpasswordou=clients,ou=org,dc=domain,dc=tld
if six.PY2:
domain = _to_unicode(domain)
... | Join a computer to an Active Directory domain. Requires a reboot.
Args:
domain (str):
The domain to which the computer should be joined, e.g.
``example.com``
username (str):
Username of an account which is authorized to join computers to the
specifi... |
def find_slave_widgets(self,tab):
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_manager
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
filtered_widget_list = ( widget for widge... | return all the frontends that do not own the kernel attached to the given widget/tab.
Only find frontends owned by the current application. Selection
based on connection file of the kernel.
This function does the conversion tabNumber/widget if needed. |
def _generate_examples(self, filepath):
image_array, values_array = _load_data(filepath)
labels_array = np.zeros_like(values_array, dtype=np.int64)
for i in range(values_array.shape[1]):
labels_array[:, i] = _discretize(values_array[:, i])
for image, labels, values in ... | Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels. |
def _lval_add_towards_polarity(x, polarity):
if x < 0:
if polarity < 0:
return Lval(, x)
return Lval(, x)
elif polarity > 0:
return Lval(, x)
return Lval(, x) | Compute the appropriate Lval "kind" for the limit of value `x` towards
`polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and
the infinity direction of polarity. |
def pairs(a):
a = np.asarray(a)
return as_strided(a, shape=(a.size - 1, 2), strides=a.strides * 2) | Return array of pairs of adjacent elements in a.
>>> pairs([1, 2, 3, 4])
array([[1, 2],
[2, 3],
[3, 4]]) |
def getgroupcustominformationurl(idgroup, customfield="", *args, **kwargs):
groupidparam = "/" + idgroup
url = getmambuurl(*args, **kwargs) + "groups" + groupidparam + "/custominformation" + ( ("/"+customfield) if customfield else "" )
return url | Request Group Custom Information URL.
See mambugroup module and pydoc for further information.
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future. |
def setup(self, settings):
KafkaBaseMonitor.setup(self, settings)
self.zoo_client = KazooClient(hosts=settings[])
self.zoo_client.start()
self.path = settings[] + settings[]
if not self.zoo_client.exists(self.path):
self.zoo_client.ensure_path(self.path) | Setup kafka |
def render_field_errors(field):
if field.errors:
html = .format(
errors=.join(field.errors)
)
return HTMLString(html)
return None | Render field errors as html. |
def _additions_remove_use_cd(**kwargs):
with _additions_mounted() as mount_point:
kernel = __grains__.get(, )
if kernel == :
return _additions_remove_linux_use_cd(mount_point, **kwargs) | Remove VirtualBox Guest Additions.
It uses the CD, connected by VirtualBox. |
def on_status_update(self, channel, callback):
if channel not in self._callbacks:
self._callbacks[channel] = []
self._callbacks[channel].append(callback) | Callback to execute on status of update of channel |
def set_input_value(self, selector, value):
script =
script = script % (selector, value)
self.evaluate(script) | Set the value of the input matched by given selector. |
async def wait_stream(aiterable):
async with streamcontext(aiterable) as streamer:
async for item in streamer:
item
try:
return item
except NameError:
raise StreamEmpty() | Wait for an asynchronous iterable to finish and return the last item.
The iterable is executed within a safe stream context.
A StreamEmpty exception is raised if the sequence is empty. |
def graph_to_dimacs(g, f):
f.write()
f.write()
f.write()
f.write()
f.write(.format(g.get_node_count() + 2, len(g.get_edges())))
f.write()
f.write()
f.write()
f.write()
f.write()
for node, weight in list(g.get_tweights().items()):
... | Persists the supplied graph in valid dimacs format into the file.
Parameters
----------
g : `~medpy.graphcut.graph.Graph`
A graph object to persist.
f : file
A file-like object. |
def diet_expert(x, hidden_size, params):
@fn_with_diet_vars(params)
def diet_expert_internal(x):
dim = x.get_shape().as_list()[-1]
h = tf.layers.dense(x, hidden_size, activation=tf.nn.relu, use_bias=False)
y = tf.layers.dense(h, dim, use_bias=False)
y *= tf.rsqrt(tf.to_float(dim * hidden_size))
... | A two-layer feed-forward network with relu activation on hidden layer.
Uses diet variables.
Recomputes hidden layer on backprop to save activation memory.
Args:
x: a Tensor with shape [batch, io_size]
hidden_size: an integer
params: a diet variable HParams object.
Returns:
a Tensor with shape... |
def add(self, effect):
effect.instance = self.instance_index
self.instance_index += 1
self.connection.send(ProtocolParser.add(effect)) | Add an LV2 plugin encapsulated as a jack client
:param Lv2Effect effect: Effect that will be loaded as LV2 plugin encapsulated |
def rpc_get_blockstack_ops_hash_at( self, block_id, **con_info ):
if not check_block(block_id):
return {: , : 400}
db = get_db_state(self.working_dir)
ops_hash = db.get_block_ops_hash( block_id )
db.close()
return self.success_response( {: ops_hash} ) | Get the hash over the sequence of names and namespaces altered at the given block.
Used by SNV clients.
Returns {'status': True, 'ops_hash': ops_hash} on success
Returns {'error': ...} on error |
def value(self):
if self._value:
return self._value
if self.formatter:
return self.formatter(self.raw)
return self.raw | Get the value of the match, using formatter if defined.
:return:
:rtype: |
def get_banks(self):
catalogs = self._get_provider_session().get_banks()
cat_list = []
for cat in catalogs:
cat_list.append(Bank(self._provider_manager, cat, self._runtime, self._proxy))
return BankList(cat_list) | Pass through to provider BankLookupSession.get_banks |
def make_assess_status_func(*args, **kwargs):
def _assess_status_func():
state, message = _determine_os_workload_status(*args, **kwargs)
status_set(state, message)
if state not in [, ]:
return message
return None
return _assess_status_func | Creates an assess_status_func() suitable for handing to pause_unit()
and resume_unit().
This uses the _determine_os_workload_status(...) function to determine
what the workload_status should be for the unit. If the unit is
not in maintenance or active states, then the message is returned to
the ca... |
def check_generic_request(self, item_session: ItemSession) -> Tuple[bool, str]:
verdict, reason, test_info = self.consult_filters(
item_session.request.url_info,
item_session.url_record)
verdict, reason = self.consult_hook(item_session, verdict,
... | Check URL filters and scripting hook.
Returns:
tuple: (bool, str) |
def geodetic2geocentric(theta, alt):
ct = np.cos(theta)
st = np.sin(theta)
a2 = 40680631.6
b2 = 40408296.0
one = a2 * st * st
two = b2 * ct * ct
three = one + two
rho = np.sqrt(three)
r = np.sqrt(alt * (alt + 2.0 * rho) + (a2 * one + b2 * two) / three)
cd = (alt + rho) / r
... | Conversion from geodetic to geocentric coordinates by using the WGS84 spheroid.
:param theta: colatitude (float, rad)
:param alt: altitude (float, km)
:return gccolat: geocentric colatitude (float, rad)
d: gccolat minus theta (float, rad)
r: geocentric radius (float, km) |
def Prep(self, size, additionalBytes):
if size > self.minalign:
self.minalign = size
alignSize = (~(len(self.Bytes) - self.Head() + additionalBytes)) + 1
alignSize &= (size - 1)
while self.Head() < alignSize+size+additionalBytes... | Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0. |
def find_mod_objs(modname, onlylocals=False):
__import__(modname)
mod = sys.modules[modname]
if hasattr(mod, ):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != ]
ismodule = inspect.ismodule
lo... | Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
`modname`,nor does it include private attributes (those that
beginwith '_' or are not in `__all__`).
Parameters
----------
modname : str
... |
def load_wc(cls, stream):
wc = wcxf.WC.load(stream)
return cls.from_wc(wc) | Return a `Wilson` instance initialized by a WCxf file-like object |
def add_rule(name, localport, protocol=, action=, dir=,
remoteip=):
*test8080tcp*test1icmpv4*test_remote_ip8000tcpallowin192.168.0.1
cmd = [, , , , ,
.format(name),
.format(protocol),
.format(dir),
.format(action),
.format(remoteip)]
if pr... | .. versionadded:: 2015.5.0
Add a new inbound or outbound rule to the firewall policy
Args:
name (str): The name of the rule. Must be unique and cannot be "all".
Required.
localport (int): The port the rule applies to. Must be a number between
0 and 65535. Can be a ran... |
def _handle_chat_event(self, event: events.ChatMessageWasReceived) -> None:
for subscriber in self._chat_subscribers:
try:
subscriber(event)
except Exception:
LOG.exception(self._prefix_log_message(
f"failed to send chat event ... | Not thread-safe. |
def requires_role(self, roles):
def wrap(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
if isinstance(roles, list):
if any(current_user.has_role(role) for role in roles):
return f(*args, **kwargs)
elif i... | Require specific configured roles for access to a :mod:`flask` route.
:param roles: Role or list of roles to test for access
(only one role is required to pass).
:type roles: str OR list(str)
:raises: FlaskKeystoneForbidden
This method will gate a particular endpo... |
async def _send_frame(self, message: bytes, opcode: int,
compress: Optional[int]=None) -> None:
if self._closing:
ws_logger.warning()
rsv = 0
if (compress or self.compress) and opcode < 8:
if compress:
... | Send a frame over the websocket with message as its payload. |
def override_colors(self, colors):
if not isinstance(colors, dict):
return
for key in self._color[True]:
if key in colors:
self._color[True][key] = colors[key] | Override default color of elements.
:param colors: New color value for given elements
:type colors: dict |
def _text_position(size, text, font):
width, height = font.getsize(text)
left = (size - width) / 2.0
top = (size - height) / 3.0
return left, top | Returns the left-top point where the text should be positioned. |
def delete_note(self, note_id):
note, status = self.trash_note(note_id)
if (status == -1):
return note, status
params = % (str(note_id))
request = Request(url=DATA_URL+params, method=)
request.add_header(self.header, self.get_token())
try:
... | Method to permanently delete a note
Arguments:
- note_id (string): key of the note to trash
Returns:
A tuple `(note, status)`
- note (dict): an empty dict or an error message
- status (int): 0 on success and -1 otherwise |
def registry_comparison(registry0, registry1):
comparison = {: {},
: [],
: {},
: {},
: {}}
for key, info in registry1.items():
if key in registry0:
if info[1] != registry0[key][1]:
created, deleted,... | Compares two dictionaries of registry keys returning their difference. |
def hexdump(src, length=8, colorize=False):
if not src:
return str(src)
if type(src) is not bytes:
raise yubico_exception.InputError(src\ % type(src))
offset = 0
result =
for this in group(src, length):
if colorize:
last, this = this[-1], this[:-1]
... | Produce a string hexdump of src, for debug output.
Input: bytestring; output: text string |
def purge_object(self, pid, log_message=None):
kwargs = {: pid}
if log_message:
kwargs[] = log_message
response = self.api.purgeObject(**kwargs)
return response.status_code == requests.codes.ok | Purge an object from Fedora. Calls :meth:`ApiFacade.purgeObject`.
:param pid: pid of the object to be purged
:param log_message: optional log message
:rtype: boolean |
def dist(self, point, exponent=2.0):
point = np.atleast_1d(point)
if len(point) != self.ndim:
raise ValueError(
.format(self.ndim, len(point)))
if np.any(np.isnan(point)):
return float()
i_larger = np.where(point > self.max_... | Return the distance of ``point`` to this set.
Parameters
----------
point : `array-like` or float
Point whose distance to calculate. Its length must be equal
to the set's dimension. Can be a float in the 1d case.
exponent : non-zero float or ``float('inf')``, opt... |
def _open_terminal():
for x in :
for y in :
pty_name = + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except OSError:
continue
return (fd, + x + y)
raise OSError() | Open pty master and return (master_fd, tty_name). |
def get_num_nodes(properties=None, hadoop_conf_dir=None, offline=False):
return len(get_task_trackers(properties, hadoop_conf_dir, offline)) | Get the number of task trackers in the Hadoop cluster.
All arguments are passed to :func:`get_task_trackers`. |
def _landsat_parse_scene_id(sceneid):
pre_collection = r"(L[COTEM]8\d{6}\d{7}[A-Z]{3}\d{2})"
collection_1 = r"(L[COTEM]08_L\d{1}[A-Z]{2}_\d{6}_\d{8}_\d{8}_\d{2}_(T1|T2|RT))"
if not re.match("^{}|{}$".format(pre_collection, collection_1), sceneid):
raise InvalidLandsatSceneId("Could not match {}... | Parse Landsat-8 scene id.
Author @perrygeo - http://www.perrygeo.com |
def key2elements(key):
namephrase = namephrase[1:-1]
return first2words + [namephrase] + [lastword] | split key to elements |
def with_context(cls, setup_phases, teardown_phases):
setup = flatten_phases_and_groups(setup_phases)
teardown = flatten_phases_and_groups(teardown_phases)
def _context_wrapper(*phases):
return cls(setup=setup,
main=flatten_phases_and_groups(phases),
teardown=te... | Create PhaseGroup creator function with setup and teardown phases.
Args:
setup_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/
callables/iterables, phases to run during the setup for the PhaseGroup
returned from the created function.
teardown_phases: list of phase_des... |
def uncontract_general(basis, use_copy=True):
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis[].items():
if not in el:
continue
newshells = []
for sh in el[]:
return prune_basis(basis, False) | Removes the general contractions from a basis set
The input basis set is not modified. The returned basis
may have functions with coefficients of zero and may have duplicate
shells.
If use_copy is True, the input basis set is not modified. |
def shape_rb_data(raw_rb):
rb_data = []
rb_data.append(np.mean(raw_rb, 0))
rb_data.append(np.std(raw_rb, 0))
return rb_data | Take the raw rb data and convert it into averages and std dev
Args:
raw_rb (numpy.array): m x n x l list where m is the number of seeds, n
is the number of Clifford sequences and l is the number of qubits
Return:
numpy_array: 2 x n x l list where index 0 is the mean over seeds, 1 i... |
def load(self, path):
DB = joblib.load(path)
self.catalog = DB.catalog
self.n_sources = DB.n_sources
self.name = DB.name
self.history = DB.history
del DB | Load the catalog from file
Parameters
----------
path: str
The path to the file |
def create_token(self, *, holder_name, card_number, credit_card_cvv, expiration_date, token_type=,
identity_document=None, billing_address=None, additional_details=None):
headers = self.client._get_public_headers()
payload = {
"token_type": token_type,
... | When creating a Token, remember to use the public-key header instead of the private-key header,
and do not include the app-id header.
Args:
holder_name: Name of the credit card holder.
card_number: Credit card number.
credit_card_cvv: The CVV number on the card (3 or... |
def Beta(alpha: vertex_constructor_param_types, beta: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().BetaVertex, label, cast_to_double_vertex(alpha), cast_to_double_vertex(beta)) | One to one constructor for mapping some tensorShape of alpha and beta to
a matching tensorShaped Beta.
:param alpha: the alpha of the Beta with either the same tensorShape as specified for this vertex or a scalar
:param beta: the beta of the Beta with either the same tensorShape as specified for this v... |
def close_all_files(self):
while len(self.open_file_infos) > 0:
file_info = self.open_file_infos.pop(0)
file_info.file_handle.close()
file_info.file_handle = None
self.closed_file_infos.append(file_info)
self.can_open_more_files = True | Close all open files (so that we can open more). |
def leaveWhitespace( self ):
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self | Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on
all contained expressions. |
def kind(self):
if "kind" not in self.attrs.keys():
self.attrs["kind"] = "None"
value = self.attrs["kind"]
return value if not value == "None" else None | Kind. |
def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:
return .format(
self.random.uniform(
minimum,
maximum,
precision=7,
),
) | Generate random price in BTC.
:param minimum: Minimum value of price.
:param maximum: Maximum value of price.
:return: Price in BTC. |
def _sorted_copy(self, comparison, reversed=False):
sorted = self.copy()
_list.sort(sorted, comparison)
if reversed:
_list.reverse(sorted)
return sorted | Returns a sorted copy with the colors arranged according to the given comparison. |
def include_theme_files(self, fragment):
theme = self.get_theme()
if not theme or not in theme:
return
theme_package, theme_files = theme.get(, None), theme.get(, [])
resource_loader = ResourceLoader(theme_package)
for theme_file in theme_files:
... | Gets theme configuration and renders theme css into fragment |
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level=):
time_counter = 0
start_time = time.time()
log.trace(, task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.... | Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds... |
def print_summary(self, w=0, objs=(), **print3opts):
self._printf(, w, self._total, _SI(self._total), self._incl, **print3opts)
if self._mask:
self._printf(, w, self._mask + 1, **print3opts)
self._printf(, w, _sizeof_Cvoidp, **print3opts)
n = len(objs or ())
... | Print the summary statistics.
*w=0* -- indentation for each line
*objs=()* -- optional, list of objects
*print3options* -- print options, as in Python 3.0 |
def build_image(self, image_name: str, image_tag: str,
repo_path: Path,
requirements_option: RequirementsOptions,
dependencies: Optional[List[str]]):
if self.inherit_image is not None:
return self.build_image_from_inherited_image(i... | Builds an image for specific requirements and dependencies, based on the settings.
:param image_name: How the image should be named
:param image_tag: And what tag it should have.
:param repo_path: Path to the cloned repository.
:param requirements_option: How requirements are set in the... |
def writeConfig(self):
with self.writeSharedFileStream(, isProtected=False) as fileHandle:
pickle.dump(self.__config, fileHandle, pickle.HIGHEST_PROTOCOL) | Persists the value of the :attr:`AbstractJobStore.config` attribute to the
job store, so that it can be retrieved later by other instances of this class. |
def send_email(template_name, context=None, *args, **kwargs):
context = context or {}
try:
html = render_to_string(
context=context,
template_name=.format(template_name),
)
except TemplateDoesNotExist:
html =
try:
text = render_to_string(
... | Send a templated email.
To generate the message used for the email, the method first
searches for an HTML template with the given name
(eg: <template>.html), and renders it with the provided context. The
process is repeated for the plain text message except a 'txt'
extension is used. All other opti... |
def aggregate_daily_with_joins(image_coll, start_date, end_date,
agg_type=):
if start_date and end_date:
date_list = ee.List.sequence(
ee.Date(start_date).millis(), ee.Date(end_date).millis(),
24 * 3600 * 1000)
... | Aggregate images by day (using joins)
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string
Start dat... |
def merge_dict(dict1, dict2):
if not isinstance(dict1, dict) or not isinstance(dict2, dict):
raise ValueError()
result = copy.deepcopy(dict1)
for k, v in dict2.items():
if k in result and isinstance(result[k], dict):
result[k] = merge_dict(result[k], v)
else:
... | Recursively merge dictionaries: dict2 on to dict1. This differs
from dict.update() in that values that are dicts are recursively merged.
Note that only dict value types are merged, not lists, etc.
:param dict dict1: dictionary to merge to
:param dict dict2: dictionary to merge with
:rtype: dict
... |
def __threshold(self, ymx_i):
return ymx_i - (self.S * np.diff(self.xsn).mean()) | Calculates the difference threshold for a
given difference local maximum.
Parameters
-----------
ymx_i : float
The normalized y value of a local maximum. |
def find_runner(program):
if os.path.isfile(program) and not os.access(program, os.X_OK):
try:
opened = open(program)
except PermissionError:
return None
first_line = opened.readline().strip()
if first_line.startswith():
return shlex.... | Return a command that will run program.
Args:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None |
def add_path_argument(cls, group, argname, dest=None, help_=None):
prefixed = % (cls.argument_prefix, argname)
if dest is None:
dest = prefixed.replace(, )
final_dest = dest[len(cls.argument_prefix) + 1:]
else:
final_dest = dest
dest = %... | Subclasses may call this to expose a path argument.
Args:
group: arparse.ArgumentGroup, the extension argument group
argname: str, the name of the argument, will be namespaced.
dest: str, similar to the `dest` argument of
`argparse.ArgumentParser.add_argument... |
def encrypt_message(data_to_encrypt, enc_alg, encryption_cert):
enc_alg_list = enc_alg.split()
cipher, key_length, mode = enc_alg_list[0], enc_alg_list[1], enc_alg_list[2]
enc_alg_asn1, key, encrypted_content = None, None, None
if cipher == :
key = util.rand_bytes(int(key_length)//8)... | Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte strin... |
def _get_answer_spans(answer_list, answer_start_list):
return [(answer_start_list[i], answer_start_list[i] + len(answer))
for i, answer in enumerate(answer_list)] | Find all answer spans from the context, returning start_index and end_index
:param list[str] answer_list: List of all answers
:param list[int] answer_start_list: List of all answers' start indices
Returns
-------
List[Tuple]
list of Tuple(answer_start_index answer_e... |
def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None):
t2t_env = rl_utils.setup_env(
hparams, batch_size=hparams.real_batch_size,
max_num_noops=hparams.max_num_noops
)
if which_epoch_data is not None:
if which_epoch_data == "last":
which_epoch_data = infer_last_epoch_num(d... | Load T2TGymEnv with data from one epoch.
Args:
hparams: hparams.
data_dir: data directory.
which_epoch_data: data from which epoch to load.
Returns:
env. |
def set_checks(self, checks, position=None):
if position is None:
position = self.position
self.checkdefs[position][0] = checks | Sets the checks at the position. |
def show_type(cls, result):
if result.ok:
return .join([result.expr, result.type])
return result.value | :param TryHaskell.Result result: Parse result of JSON data.
:rtype: str|unicode |
def reset_mode(self):
self.command(0x18, b"\x01", timeout=0.1)
self.transport.write(Chipset.ACK)
time.sleep(0.010) | Send a Reset command to set the operation mode to 0. |
def save_admin_log(build, **kwargs):
message = kwargs.pop(, None)
release = kwargs.pop(, None)
run = kwargs.pop(, None)
if not len(kwargs) == 1:
raise TypeError()
log_enum = kwargs.keys()[0]
log_type = getattr(models.AdminLog, log_enum.upper(), None)
if not log_type:
r... | Saves an action to the admin log. |
def _print_MatMul(self, expr):
from sympy import MatrixExpr
links = []
for i, j in zip(expr.args[1:], expr.args[:-1]):
if isinstance(i, MatrixExpr) and isinstance(j, MatrixExpr):
links.append()
else:
links.append()
printout... | Matrix multiplication printer. The sympy one turns everything into a
dot product without type-checking. |
def extract_root_meta(cls, serializer, resource):
many = False
if hasattr(serializer, ):
many = True
serializer = serializer.child
data = {}
if getattr(serializer, , None):
json_api_meta = serializer.get_root_meta(resource, many)
... | Calls a `get_root_meta` function on a serializer, if it exists. |
def check_config(data):
is_right = True
if "title" not in data:
logging.error("No in _config.yml")
is_right = False
return is_right | Check if metadata is right
TODO(crow): check more |
def _get_date(day=None, month=None, year=None):
now = datetime.date.today()
if day is None:
return now
try:
return datetime.date(
day=int(day),
month=int(month or now.month),
year=int(year or now.year),
)
except ValueError as error:
... | Returns a datetime object with optional params or today. |
def to_dict(self):
input_dict = super(Standardize, self)._save_to_input_dict()
input_dict["class"] = "GPy.util.normalizer.Standardize"
if self.mean is not None:
input_dict["mean"] = self.mean.tolist()
input_dict["std"] = self.std.tolist()
return input_di... | Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object |
def set_tag(self, project, repository, tag_name, commit_revision, description=None):
url = .format(project=project,
repository=repository)
body = {}
if tag_name is not None:
body[] = tag_name
... | Creates a tag using the information provided in the {@link RestCreateTagRequest request}
The authenticated user must have REPO_WRITE permission for the context repository to call this resource.
:param project:
:param repository:
:param tag_name:
:param commit_revision: commit has... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.