code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def calc_parent(self, i, j, h):
N = self.repo.array_size
c_i = i
c_j = j
c_h = h
c_n = c_i // (N ** c_h)
p_n = c_n // N
p_p = c_n % N
p_h = c_h + 1
span = N ** p_h
p_i = p_n * span
p_j = p_i + span
assert p_i <= c_i, .format(p_i, p_j)
assert p_j >= c_j, .format(p_i, p_j)
return p_i, p_j, p_h, p_p | Returns get_big_array and end of span of parent sequence that contains given child. |
def setns(fd, nstype):
res = lib.setns(fd, nstype)
if res != 0:
_check_error(ffi.errno) | Reassociate thread with a namespace
:param fd int: The file descriptor referreing to one of the namespace
entries in a :directory::`/proc/<pid>/ns/` directory.
:param nstype int: The type of namespace the calling thread should be
reasscoiated with. |
def autobuild_trub_script(file_name, slot_assignments=None, os_info=None, sensor_graph=None,
app_info=None, use_safeupdate=False):
build_update_script(file_name, slot_assignments, os_info, sensor_graph, app_info, use_safeupdate) | Build a trub script that loads given firmware into the given slots.
slot_assignments should be a list of tuples in the following form:
("slot X" or "controller", firmware_image_name)
The output of this autobuild action will be a trub script in
build/output/<file_name> that assigns the given firmware to each slot in
the order specified in the slot_assignments list.
Args:
file_name (str): The name of the output file that we should create.
This file name should end in .trub
slot_assignments (list of (str, str)): A list of tuples containing
the slot name and the firmware image that we should use to build
our update script. Optional
os_info (tuple(int, str)): A tuple of OS version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
sensor_graph (str): Name of sgf file. Optional.
app_info (tuple(int, str)): A tuple of App version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
use_safeupdate (bool): If True, Enables safemode before the firmware update records, then
disables them after the firmware update records. |
def _handle_get(self, transaction):
path = str("/" + transaction.request.uri_path)
transaction.response = Response()
transaction.response.destination = transaction.request.source
transaction.response.token = transaction.request.token
if path == defines.DISCOVERY_URL:
transaction = self._server.resourceLayer.discover(transaction)
else:
try:
resource = self._server.root[path]
except KeyError:
resource = None
if resource is None or path == :
transaction.response.code = defines.Codes.NOT_FOUND.number
else:
transaction.resource = resource
transaction = self._server.resourceLayer.get_resource(transaction)
return transaction | Handle GET requests
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the edited transaction with the response to the request |
def zsh_complete(self, path, cmd, *cmds, sourceable=False):
grouping = internal.zsh_version() >= (5, 4)
path = pathlib.Path(path)
firstline = [, cmd]
firstline.extend(cmds)
subcmds = list(self.subcmds.keys())
with path.open() as zcf:
print(*firstline, end=, file=zcf)
print(.format(cmd), file=zcf)
print(, file=zcf)
print(, end=BLK, file=zcf)
if subcmds:
substrs = ["{}\\:".format(sub, self.subcmds[sub].help)
for sub in subcmds]
print(.format(.join(substrs)),
end=BLK, file=zcf)
self._zsh_comp_command(zcf, None, grouping)
if subcmds:
print("", file=zcf)
print(, file=zcf)
for sub in subcmds:
print(.format(sub=sub, cmd=cmd),
file=zcf)
print(, file=zcf)
print(, file=zcf)
for sub in subcmds:
print(.format(cmd, sub), file=zcf)
print(, end=BLK, file=zcf)
self._zsh_comp_command(zcf, sub, grouping)
print(, file=zcf)
if sourceable:
print(.format(cmd), *cmds, file=zcf) | Write zsh compdef script.
Args:
path (path-like): desired path of the compdef script.
cmd (str): command name that should be completed.
cmds (str): extra command names that should be completed.
sourceable (bool): if True, the generated file will contain an
explicit call to ``compdef``, which means it can be sourced
to activate CLI completion. |
def add(self, text, checked=False, sort=None):
if self.parent is None:
raise exception.InvalidException()
node = self.parent.add(text, checked, sort)
self.indent(node)
return node | Add a new sub item to the list. This item must already be attached to a list.
Args:
text (str): The text.
checked (bool): Whether this item is checked.
sort (int): Item id for sorting. |
def find_xor_mask(data, alphabet=None, max_depth=3, min_depth=0, iv=None):
if alphabet is None:
alphabet = set(i for i in range(256) if i not in (0, 10, 13))
else:
alphabet = set(six.iterbytes(alphabet))
if iv is None:
iv = b * len(data)
if len(data) != len(iv):
raise ValueError()
if not min_depth and data == iv:
return []
data = xor(data, iv)
mask = 0
for ch in alphabet:
mask |= ch
mask = ~mask
data_map_tmpl = {}
for i, ch in enumerate(six.iterbytes(data)):
if ch & mask:
raise ValueError()
data_map_tmpl.setdefault(ch, []).append(i)
return [
b.join(six.int2byte(b) for b in r)
for r in results
]
raise ValueError() | Produce a series of bytestrings that when XORed together end up being
equal to ``data`` and only contain characters from the giving
``alphabet``. The initial state (or previous state) can be given as
``iv``.
Arguments:
data (bytes): The data to recreate as a series of XOR operations.
alphabet (bytes): The bytestring containing the allowed characters
for the XOR values. If ``None``, all characters except NUL bytes,
carriage returns and newlines will be allowed.
max_depth (int): The maximum depth to look for a solution.
min_depth (int): The minimum depth to look for a solution.
iv (bytes): Initialization vector. If ``None``, it will be assumed the
operation starts at an all zero string.
Returns:
A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother
if ``iv` is not providede) will be the same as ``data``.
Examples:
Produce a series of strings that when XORed together will result in
the string 'pwnypack' using only ASCII characters in the range 65 to
96:
>>> from pwny import *
>>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97)))
[b'````````', b'AAAAABAA', b'QVOXQCBJ']
>>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ')
'pwnypack' |
def _get_dir(toml_config_setting, sawtooth_home_dir, windows_dir, default_dir):
conf_file = os.path.join(_get_config_dir(), )
if os.path.exists(conf_file):
with open(conf_file) as fd:
raw_config = fd.read()
toml_config = toml.loads(raw_config)
if toml_config_setting in toml_config:
return toml_config[toml_config_setting]
if in os.environ:
return os.path.join(os.environ[], sawtooth_home_dir)
if os.name == :
base_dir = \
os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
return os.path.join(base_dir, windows_dir)
return default_dir | Determines the directory path based on configuration.
Arguments:
toml_config_setting (str): The name of the config setting related
to the directory which will appear in path.toml.
sawtooth_home_dir (str): The directory under the SAWTOOTH_HOME
environment variable. For example, for 'data' if the data
directory is $SAWTOOTH_HOME/data.
windows_dir (str): The windows path relative to the computed base
directory.
default_dir (str): The default path on Linux.
Returns:
directory (str): The path. |
def sort_idx(m, reverse=False):
return sorted(range(len(m)), key=lambda k: m[k], reverse=reverse) | Return the indices of m in sorted order (default: ascending order) |
def meta(self):
mount_points = []
for overlay in self.overlays:
mount_points.append(overlay.mount_point)
return [self.end_dir, self.start_dir, mount_points] | Data for loading later |
def _WriteFileChunk(self, chunk):
if chunk.chunk_index == 0:
st = os.stat_result((0o644, 0, 0, 0, 0, 0, chunk.total_size, 0, 0, 0))
target_path = _ClientPathToString(chunk.client_path, prefix=self.prefix)
yield self.archive_generator.WriteFileHeader(target_path, st=st)
yield self.archive_generator.WriteFileChunk(chunk.data)
if chunk.chunk_index == chunk.total_chunks - 1:
yield self.archive_generator.WriteFileFooter()
self.archived_files.add(chunk.client_path) | Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written |
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt:
attr_type = orb(_pkt[0])
return cls.registered_attributes.get(attr_type, cls)
return cls | Returns the right RadiusAttribute class for the given data. |
def count_cores_in_state(self, state, app_id):
if (isinstance(state, collections.Iterable) and
not isinstance(state, str)):
return sum(self.count_cores_in_state(s, app_id) for s in state)
if isinstance(state, str):
try:
state = getattr(consts.AppState, state)
except AttributeError:
pass
if state not in consts.AppState:
raise ValueError(
"count_cores_in_state: Unknown state {}".format(
repr(state)))
region = 0x0000ffff
level = (region >> 16) & 0x3
mask = region & 0x0000ffff
arg1 = consts.diagnostic_signal_types[consts.AppDiagnosticSignal.count]
arg2 = ((level << 26) | (1 << 22) |
(consts.AppDiagnosticSignal.count << 20) | (state << 16) |
(0xff << 8) | app_id)
arg3 = mask
return self._send_scp(
255, 255, 0, SCPCommands.signal, arg1, arg2, arg3).arg1 | Count the number of cores in a given state.
.. warning::
In current implementations of SARK, signals (which are used to
determine the state of cores) are highly likely to arrive but this
is not guaranteed (especially when the system's network is heavily
utilised). Users should treat this mechanism with caution. Future
versions of SARK may resolve this issue.
Parameters
----------
state : string or :py:class:`~rig.machine_control.consts.AppState` or
iterable
Count the number of cores currently in this state. This may be
either an entry of the
:py:class:`~rig.machine_control.consts.AppState` enum or, for
convenience, the name of a state (defined in
:py:class:`~rig.machine_control.consts.AppState`) as a string or
an iterable of these, in which case the total count will be
returned. |
def program(self, prog, offset = 0):
for addr, word in enumerate(prog):
self.program_word(offset + addr, word) | .. _program:
Write the content of the iterable ``prog`` starting with the optional offset ``offset``
to the device.
Invokes program_word_. |
def configure_app(app):
app.config_from_object()
if environ.get():
app.config_from_envvar()
return
config_map = {
: ,
: ,
: ,
: ,
}
rio_env = environ.get(, )
config = config_map.get(rio_env, config_map[])
app.config_from_object(config) | Configure Flask/Celery application.
* Rio will find environment variable `RIO_SETTINGS` first::
$ export RIO_SETTINGS=/path/to/settings.cfg
$ rio worker
* If `RIO_SETTINGS` is missing, Rio will try to load configuration
module in `rio.settings` according to another environment
variable `RIO_ENV`. Default load `rio.settings.dev`.
$ export RIO_ENV=prod
$ rio worker |
def get_daemon_stats(self, details=False):
logger.debug("Get daemon statistics for %s, %s %s", self.name, self.alive, self.reachable)
return self.con.get( % ( if details else )) | Send a HTTP request to the satellite (GET /get_daemon_stats)
:return: Daemon statistics
:rtype: dict |
def get_logger(name):
logger = logging.getLogger(name)
logger.setLevel(getenv(, ))
return logger | Get a logger with the specified name. |
def add_func(self, transmute_func, transmute_context):
swagger_path = transmute_func.get_swagger_path(transmute_context)
for p in transmute_func.paths:
self.add_path(p, swagger_path) | add a transmute function's swagger definition to the spec |
def create_container_instance_group(access_token, subscription_id, resource_group,
container_group_name, container_list, location,
ostype=, port=80, iptype=):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
, resource_group,
,
container_group_name,
, CONTAINER_API])
container_group_body = {: location}
properties = {: ostype}
properties[] = container_list
ipport = {: }
ipport[] = port
ipaddress = {: [ipport]}
ipaddress[] = iptype
properties[] = ipaddress
container_group_body[] = properties
body = json.dumps(container_group_body)
return do_put(endpoint, body, access_token) | Create a new container group with a list of containers specifified by container_list.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
container_list (list): A list of container properties. Use create_container_definition to
create each container property set.
location (str): Azure data center location. E.g. westus.
ostype (str): Container operating system type. Linux or Windows.
port (int): TCP port number. E.g. 8080.
iptype (str): Type of IP address. E.g. public.
Returns:
HTTP response with JSON body of container group. |
def to_glyphs_family_user_data_from_ufo(self, ufo):
target_user_data = self.font.userData
try:
for key, value in ufo.lib[FONT_USER_DATA_KEY].items():
if key not in target_user_data.keys():
target_user_data[key] = value
except KeyError:
pass | Set the GSFont userData from the UFO family-wide lib data. |
def _safe_copy_proto_list_values(dst_proto_list, src_proto_list, get_key):
def _assert_proto_container_unique_keys(proto_list, get_key):
keys = set()
for item in proto_list:
key = get_key(item)
if key in keys:
raise _ProtoListDuplicateKeyError(key)
keys.add(key)
_assert_proto_container_unique_keys(dst_proto_list, get_key)
_assert_proto_container_unique_keys(src_proto_list, get_key)
key_to_proto = {}
for proto in dst_proto_list:
key = get_key(proto)
key_to_proto[key] = proto
for proto in src_proto_list:
key = get_key(proto)
if key in key_to_proto:
if proto != key_to_proto.get(key):
raise _SameKeyDiffContentError(key)
else:
dst_proto_list.add().CopyFrom(proto) | Safely merge values from `src_proto_list` into `dst_proto_list`.
Each element in `dst_proto_list` must be mapped by `get_key` to a key
value that is unique within that list; likewise for `src_proto_list`.
If an element of `src_proto_list` has the same key as an existing
element in `dst_proto_list`, then the elements must also be equal.
Args:
dst_proto_list: A `RepeatedCompositeContainer` or
`RepeatedScalarContainer` into which values should be copied.
src_proto_list: A container holding the same kind of values as in
`dst_proto_list` from which values should be copied.
get_key: A function that takes an element of `dst_proto_list` or
`src_proto_list` and returns a key, such that if two elements have
the same key then it is required that they be deep-equal. For
instance, if `dst_proto_list` is a list of nodes, then `get_key`
might be `lambda node: node.name` to indicate that if two nodes
have the same name then they must be the same node. All keys must
be hashable.
Raises:
_ProtoListDuplicateKeyError: A proto_list contains items with duplicate
keys.
_SameKeyDiffContentError: An item with the same key has different contents. |
def modify_attached_policies(self, role_name, new_policies):
parts = role_name.split(, 1)
if len(parts) == 2:
prefix, name = parts
prefix = "/{0}/".format(prefix)
else:
prefix = "/"
name = parts[0]
current_attached_policies = []
with self.ignore_missing():
current_attached_policies = self.client.list_attached_role_policies(RoleName=name)
current_attached_policies = [p[] for p in current_attached_policies["AttachedPolicies"]]
new_attached_policies = ["arn:aws:iam::aws:policy/{0}".format(p) for p in new_policies]
changes = list(Differ.compare_two_documents(current_attached_policies, new_attached_policies))
if changes:
with self.catch_boto_400("Couldn't modify attached policies", role=role_name):
for policy in new_attached_policies:
if policy not in current_attached_policies:
for _ in self.change("+", "attached_policy", role=role_name, policy=policy):
self.client.attach_role_policy(RoleName=name, PolicyArn=policy)
for policy in current_attached_policies:
if policy not in new_attached_policies:
for _ in self.change("-", "attached_policy", role=role_name, changes=changes, policy=policy):
self.client.detach_role_policy(RoleName=name, PolicyArn=policy) | Make sure this role has just the new policies |
def on_touch_down(self, touch):
if self.parent is None:
return
if self.collide_point(*touch.pos):
self.parent.bar_touched(self, touch) | Tell my parent if I've been touched |
def calc_av_uv_v1(self):
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
for i in range(2):
if flu.h <= con.hm:
flu.av[i] = 0.
flu.uv[i] = 0.
elif flu.h <= (con.hm+der.hv[i]):
flu.av[i] = (flu.h-con.hm)*(con.bv[i]+(flu.h-con.hm)*con.bnv[i]/2.)
flu.uv[i] = con.bv[i]+(flu.h-con.hm)*(1.+con.bnv[i]**2)**.5
else:
flu.av[i] = (der.hv[i]*(con.bv[i]+der.hv[i]*con.bnv[i]/2.) +
((flu.h-(con.hm+der.hv[i])) *
(con.bv[i]+der.hv[i]*con.bnv[i])))
flu.uv[i] = ((con.bv[i])+(der.hv[i]*(1.+con.bnv[i]**2)**.5) +
(flu.h-(con.hm+der.hv[i]))) | Calculate the flown through area and the wetted perimeter of both
forelands.
Note that the each foreland lies between the main channel and one
outer embankment and that water flowing exactly above the a foreland
is contributing to |AV|. The theoretical surface seperating water
above the main channel from water above the foreland is not
contributing to |UV|, but the surface seperating water above the
foreland from water above its outer embankment is contributing to |UV|.
Required control parameters:
|HM|
|BV|
|BNV|
Required derived parameter:
|HV|
Required flux sequence:
|H|
Calculated flux sequence:
|AV|
|UV|
Examples:
Generally, right trapezoids are assumed. Here, for simplicity, both
forelands are assumed to be symmetrical. Their smaller bases (bottoms)
hava a length of 2 meters, their non-vertical legs show an inclination
of 1 meter per 4 meters, and their height (depths) is 1 meter. Both
forelands lie 1 meter above the main channels bottom.
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> hm(1.0)
>>> bv(2.0)
>>> bnv(4.0)
>>> derived.hv(1.0)
The first example deals with normal flow conditions, where water flows
within the main channel completely (|H| < |HM|):
>>> fluxes.h = 0.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(0.0, 0.0)
>>> fluxes.uv
uv(0.0, 0.0)
The second example deals with moderate high flow conditions, where
water flows over both forelands, but not over their embankments
(|HM| < |H| < (|HM| + |HV|)):
>>> fluxes.h = 1.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(1.5, 1.5)
>>> fluxes.uv
uv(4.061553, 4.061553)
The third example deals with extreme high flow conditions, where
water flows over the both foreland and their outer embankments
((|HM| + |HV|) < |H|):
>>> fluxes.h = 2.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(7.0, 7.0)
>>> fluxes.uv
uv(6.623106, 6.623106)
The forth example assures that zero widths or hights of the forelands
are handled properly:
>>> bv.left = 0.0
>>> derived.hv.right = 0.0
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(4.0, 3.0)
>>> fluxes.uv
uv(4.623106, 3.5) |
def gopro_set_response_send(self, cmd_id, status, force_mavlink1=False):
return self.send(self.gopro_set_response_encode(cmd_id, status), force_mavlink1=force_mavlink1) | Response from a GOPRO_COMMAND set request
cmd_id : Command ID (uint8_t)
status : Status (uint8_t) |
def avg_gate_fidelity(self, reference_unitary):
process_fidelity = self.process_fidelity(reference_unitary)
dimension = self.pauli_basis.ops[0].shape[0]
return (dimension * process_fidelity + 1.0) / (dimension + 1.0) | Compute the average gate fidelity of the estimated process with respect to a unitary
process. See `Chow et al., 2012, <https://doi.org/10.1103/PhysRevLett.109.060501>`_
:param (qutip.Qobj|matrix-like) reference_unitary: A unitary operator that induces a process
as `rho -> other*rho*other.dag()`, alternatively a superoperator or Pauli-transfer matrix.
:return: The average gate fidelity, a real number between 1/(d+1) and 1, where d is the
Hilbert space dimension.
:rtype: float |
def simple_prot(x, start):
for i in range(start,len(x)-1):
a,b,c = x[i-1], x[i], x[i+1]
if b - a > 0 and b -c >= 0:
return i
else:
return None | Find the first peak to the right of start |
def _qnwcheb1(n, a, b):
nodes = (b+a)/2 - (b-a)/2 * np.cos(np.pi/n * np.linspace(0.5, n-0.5, n))
t1 = np.arange(1, n+1) - 0.5
t2 = np.arange(0.0, n, 2)
t3 = np.concatenate((np.array([1.0]),
-2.0/(np.arange(1.0, n-1, 2)*np.arange(3.0, n+1, 2))))
weights = ((b-a)/n)*np.cos(np.pi/n*np.outer(t1, t2)) @ t3
return nodes, weights | Compute univariate Guass-Checbychev quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwcheb1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002. |
def check_field_multiplicity(tag, previous_tags):
fail = False
if not tag.field.multiple:
if not tag.id:
fail = previous_tags.filter(field=tag.field)
else:
fail = previous_tags.filter(field=tag.field).count() > 1
return fail | Check the multiplicity of a 'field' for an object. |
def records(self):
return [self._records[i] for i in range(len(self._records))] | Returns a list of records in SORT_KEY order |
def _get_options_group(group=None):
hex_options = frozenset([,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
])
unchecked_options = frozenset([,
,
,
,
,
])
])
hex_or_none_options = hex_options.intersection(none_options)
allowed = hex_options.union(unchecked_options, other_options, dir_options,
keyring_options, file_or_none_options,
pref_options, none_options)
if group and group in locals().keys():
return locals()[group] | Get a specific group of options which are allowed. |
def to_json(self):
result = super(Webhook, self).to_json()
result.update({
: self.name,
: self.url,
: self.topics,
: self.http_basic_username,
: self.headers
})
if self.filters:
result.update({: self.filters})
if self.transformation:
result.update({: self.transformation})
return result | Returns the JSON representation of the webhook. |
def set_data(self, data, invsigma=None):
self.data = np.array(data, dtype=np.float, ndmin=1)
if invsigma is None:
self.invsigma = np.ones(self.data.shape)
else:
i = np.array(invsigma, dtype=np.float)
self.invsigma = np.broadcast_arrays(self.data, i)[1]
if self.invsigma.shape != self.data.shape:
raise ValueError()
return self | Set the data to be modeled.
Returns *self*. |
def get(interface, method, version=1,
apihost=DEFAULT_PARAMS[], https=DEFAULT_PARAMS[],
caller=None, session=None, params=None):
url = u"%s://%s/%s/%s/v%s/" % (
if https else , apihost, interface, method, version)
return webapi_request(url, , caller=caller, session=session, params=params) | Send GET request to an API endpoint
.. versionadded:: 0.8.3
:param interface: interface name
:type interface: str
:param method: method name
:type method: str
:param version: method version
:type version: int
:param apihost: API hostname
:type apihost: str
:param https: whether to use HTTPS
:type https: bool
:param params: parameters for endpoint
:type params: dict
:return: endpoint response
:rtype: :class:`dict`, :class:`lxml.etree.Element`, :class:`str` |
def move_user_data(primary, secondary):
submissions = Submission.objects.filter(authors__id=secondary.pk)
for subm in submissions:
if subm.submitter == secondary:
subm.submitter = primary
subm.authors.remove(secondary)
subm.authors.add(primary)
subm.save()
try:
for course in secondary.profile.courses.all():
primary.profile.courses.add(course)
primary.profile.save()
except UserProfile.DoesNotExist:
pass | Moves all submissions and other data linked to the secondary user into the primary user.
Nothing is deleted here, we just modify foreign user keys. |
def build_chvatal_graph():
graph = build_cycle_graph(12)
edge_tpls = [
(1,7), (1,9), (2,5), (2,11),
(3,7), (3,9), (4,10), (4,12),
(5,8), (6,10), (6,12), (8,11),
]
for i, j in edge_tpls:
graph.new_edge(i, j)
return graph | Makes a new Chvatal graph.
Ref: http://mathworld.wolfram.com/ChvatalGraph.html |
def get_container_list(self) -> list:
containers = []
containers_list = self._client.containers.list()
for c_list in containers_list:
containers.append(c_list.short_id)
return containers | Get list of containers.
Returns:
list, all the ids of containers |
def gen_signature(self, privkey, pubkey, sig_path):
return salt.crypt.gen_signature(privkey,
pubkey,
sig_path,
self.passphrase) | Generate master public-key-signature |
def _compile(self, parselet_node, level=0):
if self.DEBUG:
debug_offset = "".join([" " for x in range(level)])
if self.DEBUG:
print(debug_offset, "%s::compile(%s)" % (
self.__class__.__name__, parselet_node))
if isinstance(parselet_node, dict):
parselet_tree = ParsleyNode()
for k, v in list(parselet_node.items()):
try:
m = self.REGEX_PARSELET_KEY.match(k)
if not m:
if self.DEBUG:
print(debug_offset, "could not parse key", k)
raise InvalidKeySyntax(k)
except:
raise InvalidKeySyntax("Key %s is not valid" % k)
key = m.group()
key_required = True
operator = m.group()
if operator == :
key_required = False
scope = m.group()
if isinstance(v, (list, tuple)):
v = v[0]
iterate = True
else:
iterate = False
try:
parsley_context = ParsleyContext(
key,
operator=operator,
required=key_required,
scope=self.selector_handler.make(scope) if scope else None,
iterate=iterate)
except SyntaxError:
if self.DEBUG:
print("Invalid scope:", k, scope)
raise
if self.DEBUG:
print(debug_offset, "current context:", parsley_context)
try:
child_tree = self._compile(v, level=level+1)
except SyntaxError:
if self.DEBUG:
print("Invalid value: ", v)
raise
except:
raise
if self.DEBUG:
print(debug_offset, "child tree:", child_tree)
parselet_tree[parsley_context] = child_tree
return parselet_tree
elif isstr(parselet_node):
return self.selector_handler.make(parselet_node)
else:
raise ValueError(
"Unsupported type(%s) for Parselet node <%s>" % (
type(parselet_node), parselet_node)) | Build part of the abstract Parsley extraction tree
Arguments:
parselet_node (dict) -- part of the Parsley tree to compile
(can be the root dict/node)
level (int) -- current recursion depth (used for debug) |
def overlap_add(blk_sig, size=None, hop=None, wnd=None, normalize=True):
import numpy as np
if size is None:
blk_sig = Stream(blk_sig)
size = len(blk_sig.peek())
if hop is None:
hop = size
if wnd is None:
wnd = np.ones(size)
elif callable(wnd) and not isinstance(wnd, Stream):
wnd = wnd(size)
if isinstance(wnd, Sequence):
wnd = np.array(wnd)
elif isinstance(wnd, Iterable):
wnd = np.hstack(wnd)
else:
raise TypeError("Window should be an iterable or a callable")
if normalize:
steps = Stream(wnd).blocks(hop).map(np.array)
gain = np.sum(np.abs(np.vstack(steps)), 0).max()
if gain:
old = np.zeros(size)
for blk in (wnd * blk for blk in blk_sig):
blk[:-hop] += old[hop:]
for el in blk[:hop]:
yield el
old = blk
for el in old[hop:]:
yield el | Overlap-add algorithm using Numpy arrays.
Parameters
----------
blk_sig :
An iterable of blocks (sequences), such as the ``Stream.blocks`` result.
size :
Block size for each ``blk_sig`` element, in samples.
hop :
Number of samples for two adjacent blocks (defaults to the size).
wnd :
Windowing function to be applied to each block or any iterable with
exactly ``size`` elements. If ``None`` (default), applies a rectangular
window.
normalize :
Flag whether the window should be normalized so that the process could
happen in the [-1; 1] range, dividing the window by its hop gain.
Default is ``True``.
Returns
-------
A Stream instance with the blocks overlapped and added.
See Also
--------
Stream.blocks :
Splits the Stream instance into blocks with given size and hop.
blocks :
Same to Stream.blocks but for without using the Stream class.
chain :
Lazily joins all iterables given as parameters.
chain.from_iterable :
Same to ``chain(*data)``, but the ``data`` evaluation is lazy.
window :
Window/apodization/tapering functions for a given size as a StrategyDict.
Note
----
Each block has the window function applied to it and the result is the
sum of the blocks without any edge-case special treatment for the first
and last few blocks. |
def cancellation_fee(self, percentage):
from .invoice import InvoiceController
assert(percentage >= 0 and percentage <= 100)
cancellation_fee = self.credit_note.value * percentage / 100
due = datetime.timedelta(days=1)
item = [("Cancellation fee", cancellation_fee)]
invoice = InvoiceController.manual_invoice(
self.credit_note.invoice.user, due, item
)
if not invoice.is_paid:
self.apply_to_invoice(invoice)
return InvoiceController(invoice) | Generates an invoice with a cancellation fee, and applies
credit to the invoice.
percentage (Decimal): The percentage of the credit note to turn into
a cancellation fee. Must be 0 <= percentage <= 100. |
def delete_container_service(access_token, subscription_id, resource_group, service_name):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
, resource_group,
, service_name,
, ACS_API])
return do_delete(endpoint, access_token) | Delete a named container.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
service_name (str): Name of container service.
Returns:
HTTP response. |
def tag_image(self, repository=None, tag=None):
if not (repository or tag):
raise ValueError("You need to specify either repository or tag.")
r = repository or self.name
t = "latest" if not tag else tag
self.d.tag(image=self.get_full_name(), repository=r, tag=t)
return DockerImage(r, tag=t) | Apply additional tags to the image or even add a new name
:param repository: str, see constructor
:param tag: str, see constructor
:return: instance of DockerImage |
def run_somaticsniper_full(job, tumor_bam, normal_bam, univ_options, somaticsniper_options):
work_dir = os.getcwd()
input_files = {
: tumor_bam[],
: tumor_bam[],
: normal_bam[],
: normal_bam[],
: somaticsniper_options[],
: somaticsniper_options[]}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in (, ):
input_files[key] = untargz(input_files[key + ], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
output_file = os.path.join(work_dir, )
parameters = [, input_files[],
, ,
,
,
, ,
, ,
input_files[],
input_files[],
docker_path(output_file)]
docker_call(tool=, tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options[], tool_version=somaticsniper_options[])
outfile = job.fileStore.writeGlobalFile(output_file)
job.fileStore.logToMaster( % univ_options[])
return outfile | Run SomaticSniper on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the genome-level vcf
:rtype: toil.fileStore.FileID |
def _xml_namespace_strip(root):
if not in root.tag:
return
for element in root.iter():
if in element.tag:
element.tag = element.tag.split()[1]
else:
pass | Strip the XML namespace prefix from all element tags under the given root Element. |
def parse(self, rrstr):
if self._initialized:
raise pycdlibexception.PyCdlibInternalError()
(su_len, su_entry_version_unused, len_id, len_des, len_src,
self.ext_ver) = struct.unpack_from(, rrstr[:8], 2)
if su_len > len(rrstr):
raise pycdlibexception.PyCdlibInvalidISO()
total_length = len_id + len_des + len_src
if total_length > su_len:
raise pycdlibexception.PyCdlibInvalidISO()
fmtstr = % (len_id, len_des, len_src)
(self.ext_id, self.ext_des, self.ext_src) = struct.unpack_from(fmtstr, rrstr, 8)
self._initialized = True | Parse a Rock Ridge Extensions Reference record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing. |
def get_exp_dir_num(parent_dir: str) -> int:
return max([int(fn.split(".")[0])
for fn in os.listdir(parent_dir) if fn.split(".")[0].isdigit()]
+ [-1]) | Gets the number of the current experiment directory. |
def double_click(self, on_element=None):
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.double_click()
for _ in range(4):
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.DOUBLE_CLICK, {}))
return self | Double-clicks an element.
:Args:
- on_element: The element to double-click.
If None, clicks on current mouse position. |
def listener(self, event, *args, **kwargs):
if len(args) == 1 and callable(args[0]):
raise RuntimeError("Cannot use the @listener decorator without "
"arguments")
def wrapper(listener_f):
if len(kwargs) > 0:
listener_f = (listener_f, kwargs)
self._listeners[event].append(listener_f)
return listener_f
return wrapper | Create a listener from a decorated function.
:param event: Event to listen to.
:type event: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The function to use as the listener
:rtype: fn |
def FUNCTION_DECL(self, cursor):
name = self.get_unique_name(cursor)
if self.is_registered(name):
return self.get_registered(name)
returns = self.parse_cursor_type(cursor.type.get_result())
attributes = []
extern = False
obj = typedesc.Function(name, returns, attributes, extern)
for arg in cursor.get_arguments():
arg_obj = self.parse_cursor(arg)
obj.add_argument(arg_obj)
self.register(name, obj)
self.set_location(obj, cursor)
self.set_comment(obj, cursor)
return obj | Handles function declaration |
def _strip_object(key):
if hasattr(key, ) and hasattr(key, ):
return key.for_branch(None).version_agnostic()
else:
return key | Strips branch and version info if the given key supports those attributes. |
def list_policy_versions(policy_name,
region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
ret = conn.list_policy_versions(policy_arn)
return ret.get(, {}).get(, {}).get()
except boto.exception.BotoServerError as e:
log.debug(e)
log.error(, policy_name)
return [] | List versions of a policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.list_policy_versions mypolicy |
def process_tokens(
tokens: Sequence[Token], *, end: str = "\n", sep: str = " "
) -> Tuple[str, str]:
flat_tokens = list()
for token in tokens:
if isinstance(token, UnicodeSequence):
flat_tokens.extend(token.tuple())
else:
flat_tokens.append(token)
with_color = _process_tokens(flat_tokens, end=end, sep=sep, color=True)
without_color = _process_tokens(flat_tokens, end=end, sep=sep, color=False)
return (with_color, without_color) | Returns two strings from a list of tokens.
One containing ASCII escape codes, the other
only the 'normal' characters |
def execute(self):
if JvmResolveSubsystem.global_instance().get_options().resolver != :
return
compile_classpath = self.context.products.get_data(,
init_func=ClasspathProducts.init_func(self.get_options().pants_workdir))
targets = self.context.targets()
if all(not isinstance(target, JarLibrary) for target in targets):
if self._report:
self.context.log.info("Not generating a report. No resolution performed.")
return
executor = self.create_java_executor()
results = self.resolve(executor=executor,
targets=targets,
classpath_products=compile_classpath,
confs=self.get_options().confs,
extra_args=self._args)
if self._report:
results_with_resolved_artifacts = [r for r in results if r.has_resolved_artifacts]
if not results_with_resolved_artifacts:
self.context.log.info("Not generating a report. No resolution performed.")
else:
for result in results_with_resolved_artifacts:
self._generate_ivy_report(result) | Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path). |
def _start_local_queue_process(self):
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port),
"worker_port": self.worker_port,
"worker_port_range": self.worker_port_range
})
self.queue_proc.start()
try:
worker_port = comm_q.get(block=True, timeout=120)
logger.debug(
"Got worker port {} from interchange".format(worker_port))
except queue.Empty:
logger.error(
"Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
self.worker_task_url = "tcp://{}:{}".format(
self.address, worker_port) | TODO: docstring |
def build_path(levels):
path = os.path.join(*levels)
if not dir_exists(path):
os.makedirs(path)
return path | make a linear directory structure from a list of path levels names
levels = ["chefdir", "trees", "test"]
builds ./chefdir/trees/test/ |
def displayable_path(path, separator=u):
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, six.text_type):
return path
elif not isinstance(path, bytes):
return six.text_type(path)
try:
return path.decode(_fsencoding(), )
except (UnicodeError, LookupError):
return path.decode(, ) | Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`. |
def remove_item(self, *args, **kwargs):
try:
self._get_provider_session().remove_item(*args, **kwargs)
except InvalidArgument:
self._get_sub_package_provider_session(
, ).remove_item(*args, **kwargs) | Pass through to provider methods. |
def find_eigen(hint=None):
r
try:
import pkgconfig
if pkgconfig.installed(,):
return pkgconfig.parse()[][0]
except:
pass
search_dirs = [] if hint is None else hint
search_dirs += [
"/usr/local/include/eigen3",
"/usr/local/homebrew/include/eigen3",
"/opt/local/var/macports/software/eigen3",
"/opt/local/include/eigen3",
"/usr/include/eigen3",
"/usr/include/local",
"/usr/include",
]
for d in search_dirs:
path = os.path.join(d, "Eigen", "Dense")
if os.path.exists(path):
vf = os.path.join(d, "Eigen", "src", "Core", "util", "Macros.h")
if not os.path.exists(vf):
continue
src = open(vf, "r").read()
v1 = re.findall("
v2 = re.findall("
v3 = re.findall("
if not len(v1) or not len(v2) or not len(v3):
continue
v = "{0}.{1}.{2}".format(v1[0], v2[0], v3[0])
print("Found Eigen version {0} in: {1}".format(v, d))
return d
return None | r'''
Try to find the Eigen library. If successful the include directory is returned. |
def _url_lookup_builder(id=None, artist_amg_id=None, upc=None, country=, media=, entity=None, attribute=None,
limit=50):
built_url = base_lookup_url
has_one_argument = False
if id is not None:
built_url += parameters[6] + str(id)
has_one_argument = True
if artist_amg_id is not None:
if has_one_argument:
built_url += ampersand + parameters[7] + artist_amg_id
else:
built_url += parameters[7] + str(artist_amg_id)
has_one_argument = True
if upc is not None:
if has_one_argument:
built_url += ampersand + parameters[8] + upc
else:
built_url += parameters[8] + str(upc)
built_url += ampersand + parameters[1] + country
built_url += ampersand + parameters[2] + media
if entity is not None:
built_url += ampersand + parameters[3] + entity
if attribute is not None:
built_url += ampersand + parameters[4] + attribute
built_url += ampersand + parameters[5] + str(limit)
return built_url | Builds the URL to perform the lookup based on the provided data
:param id: String. iTunes ID of the artist, album, track, ebook or software
:param artist_amg_id: String. All Music Guide ID of the artist
:param upc: String. UPCs/EANs
:param country: String. The two-letter country code for the store you want to search.
For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2
:param media: String. The media type you want to search for. Example: music
:param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist.
Full list: musicArtist, musicTrack, album, musicVideo, mix, song
:param attribute: String. The attribute you want to search for in the stores, relative to the specified media type.
:param limit: Integer. The number of search results you want the iTunes Store to return.
:return: The built URL as a string |
def options_help():
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message | Help message for options dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message |
def get_previous_scheduled_dagrun(self, session=None):
dag = self.get_dag()
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == dag.previous_schedule(self.execution_date)
).first() | The previous, SCHEDULED DagRun, if there is one |
def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
def wrapper(*args, **kw):
return new_fn(*args, **kw)
if old_fn.__doc__:
wrapper.__doc__ = old_fn.__doc__ + additional_text
return wrapper | Make new_fn have old_fn's doc string. This is particularly useful
for the do_... commands that hook into the help system.
Adapted from from a comp.lang.python posting
by Duncan Booth. |
def _register_token_network_without_limits(
self,
token_registry_abi: Dict,
token_registry_address: str,
token_address: str,
channel_participant_deposit_limit: Optional[int],
token_network_deposit_limit: Optional[int],
):
if channel_participant_deposit_limit:
raise ValueError(
,
)
if token_network_deposit_limit:
raise ValueError(
,
)
token_network_registry = self.web3.eth.contract(
abi=token_registry_abi,
address=token_registry_address,
)
version_from_onchain = token_network_registry.functions.contract_version().call()
if version_from_onchain != self.contract_manager.version_string:
raise RuntimeError(
f
f,
)
command = token_network_registry.functions.createERC20TokenNetwork(
token_address,
)
self.transact(command)
token_network_address = token_network_registry.functions.token_to_token_networks(
token_address,
).call()
token_network_address = to_checksum_address(token_network_address)
LOG.debug(f)
return token_network_address | Register token with a TokenNetworkRegistry contract
with a contracts-version that doesn't require deposit limits in the TokenNetwork
constructor. |
def _send(key, value, metric_type):
if STATSD_PREFIX:
key = .join([STATSD_PREFIX, key])
try:
STATSD_SOCKET.sendto(.format(key,
value,
metric_type).encode(),
STATSD_ADDR)
except socket.error:
LOGGER.exception(SOCKET_ERROR) | Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str value: The properly formatted statsd counter value |
def sample_surface(mesh, count):
area = mesh.area_faces
area_sum = np.sum(area)
area_cum = np.cumsum(area)
face_pick = np.random.random(count) * area_sum
face_index = np.searchsorted(area_cum, face_pick)
tri_origins = mesh.triangles[:, 0]
tri_vectors = mesh.triangles[:, 1:].copy()
tri_vectors -= np.tile(tri_origins, (1, 2)).reshape((-1, 2, 3))
tri_origins = tri_origins[face_index]
tri_vectors = tri_vectors[face_index]
random_lengths = np.random.random((len(tri_vectors), 2, 1))
random_test = random_lengths.sum(axis=1).reshape(-1) > 1.0
random_lengths[random_test] -= 1.0
random_lengths = np.abs(random_lengths)
sample_vector = (tri_vectors * random_lengths).sum(axis=1)
samples = sample_vector + tri_origins
return samples, face_index | Sample the surface of a mesh, returning the specified
number of points
For individual triangle sampling uses this method:
http://mathworld.wolfram.com/TrianglePointPicking.html
Parameters
---------
mesh: Trimesh object
count: number of points to return
Returns
---------
samples: (count,3) points in space on the surface of mesh
face_index: (count,) indices of faces for each sampled point |
def _init_metadata(self):
TextAnswerFormRecord._init_metadata(self)
FilesAnswerFormRecord._init_metadata(self)
super(AnswerTextAndFilesMixin, self)._init_metadata() | stub |
def rzz(self, theta, qubit1, qubit2):
return self.append(RZZGate(theta), [qubit1, qubit2], []) | Apply RZZ to circuit. |
def visitArrayExpr(self, ctx: jsgParser.ArrayExprContext):
from pyjsg.parser_impl.jsg_ebnf_parser import JSGEbnf
from pyjsg.parser_impl.jsg_valuetype_parser import JSGValueType
self._types = [JSGValueType(self._context, vt) for vt in ctx.valueType()]
if ctx.ebnfSuffix():
self._ebnf = JSGEbnf(self._context, ctx.ebnfSuffix()) | arrayExpr: OBRACKET valueType (BAR valueType)* ebnfSuffix? CBRACKET; |
def first_address(self, skip_network_address=True):
bin_address = self.__address.bin_address()
bin_address_length = len(bin_address)
if self.__mask > (bin_address_length - 2):
skip_network_address = False
for i in range(bin_address_length - self.__mask):
bin_address[self.__mask + i] = 0
if skip_network_address:
bin_address[bin_address_length - 1] = 1
return WIPV4Address(bin_address) | Return the first IP address of this network
:param skip_network_address: this flag specifies whether this function returns address of the network \
or returns address that follows address of the network (address, that a host could have)
:return: WIPV4Address |
def onNicknameChange(
self,
mid=None,
author_id=None,
changed_for=None,
new_nickname=None,
thread_id=None,
thread_type=ThreadType.USER,
ts=None,
metadata=None,
msg=None,
):
log.info(
"Nickname change from {} in {} ({}) for {}: {}".format(
author_id, thread_id, thread_type.name, changed_for, new_nickname
)
) | Called when the client is listening, and somebody changes the nickname of a person
:param mid: The action ID
:param author_id: The ID of the person who changed the nickname
:param changed_for: The ID of the person whom got their nickname changed
:param new_nickname: The new nickname
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param metadata: Extra metadata about the action
:param msg: A full set of the data recieved
:type thread_type: models.ThreadType |
def wrap_exception(func: Callable) -> Callable:
try:
from pygatt.backends.bgapi.exceptions import BGAPIError
from pygatt.exceptions import NotConnectedError
except ImportError:
return func
def _func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except BGAPIError as exception:
raise BluetoothBackendException() from exception
except NotConnectedError as exception:
raise BluetoothBackendException() from exception
return _func_wrapper | Decorator to wrap pygatt exceptions into BluetoothBackendException. |
def create_boxcar(aryCnd, aryOns, aryDrt, varTr, varNumVol,
aryExclCnd=None, varTmpOvsmpl=1000.):
if aryExclCnd is not None:
for cond in aryExclCnd:
aryOns = aryOns[aryCnd != cond]
aryDrt = aryDrt[aryCnd != cond]
aryCnd = aryCnd[aryCnd != cond]
resolution = varTr / float(varTmpOvsmpl)
aryCnd = np.asarray(aryCnd)
aryOns = np.asarray(aryOns, dtype=np.float)
unique_conditions = np.sort(np.unique(aryCnd))
boxcar = []
for c in unique_conditions:
tmp = np.zeros(int(varNumVol * varTr/resolution))
onset_c = aryOns[aryCnd == c]
duration_c = aryDrt[aryCnd == c]
onset_idx = np.round(onset_c / resolution).astype(np.int)
duration_idx = np.round(duration_c / resolution).astype(np.int)
aux = np.arange(int(varNumVol * varTr/resolution))
for start, dur in zip(onset_idx, duration_idx):
lgc = np.logical_and(aux >= start, aux < start + dur)
tmp = tmp + lgc
assert np.all(np.less(tmp, 2))
boxcar.append(tmp)
aryBxCrOut = np.array(boxcar).T
if aryBxCrOut.shape[1] == 1:
aryBxCrOut = np.squeeze(aryBxCrOut)
return aryBxCrOut.astype() | Creation of condition time courses in temporally upsampled space.
Parameters
----------
aryCnd : np.array
1D array with condition identifiers (every condition has its own int)
aryOns : np.array, same len as aryCnd
1D array with condition onset times in seconds.
aryDrt : np.array, same len as aryCnd
1D array with condition durations of different conditions in seconds.
varTr : float, positive
Time to repeat (TR) of the (fMRI) experiment.
varNumVol : float, positive
Number of volumes of the (fMRI) data.
aryExclCnd : array
1D array containing condition identifiers for conditions to be excluded
varTmpOvsmpl : float, positive
Factor by which the time courses should be temporally upsampled.
Returns
-------
aryBxCrOut : np.array, float16
Condition time courses in temporally upsampled space.
References:
-----
[1] https://github.com/fabianp/hrf_estimation |
def _loadData(self, data):
self._data = data
self.id = utils.cast(int, data.attrib.get())
self.serverId = utils.cast(int, data.attrib.get())
self.machineIdentifier = data.attrib.get()
self.name = data.attrib.get()
self.lastSeenAt = utils.toDatetime(data.attrib.get())
self.numLibraries = utils.cast(int, data.attrib.get())
self.allLibraries = utils.cast(bool, data.attrib.get())
self.owned = utils.cast(bool, data.attrib.get())
self.pending = utils.cast(bool, data.attrib.get()) | Load attribute values from Plex XML response. |
def ensure_started(self):
if self.state in [, ]:
self._wait_for_state_change([, ])
if self.state == :
self.start()
self._wait_for_state_change([])
if self.state == :
return True
else:
raise Exception( + self.state) | Start a server and waits (blocking wait) until it is fully started. |
def kdeconf(kde,conf=0.683,xmin=None,xmax=None,npts=500,
shortest=True,conftol=0.001,return_max=False):
if xmin is None:
xmin = kde.dataset.min()
if xmax is None:
xmax = kde.dataset.max()
x = np.linspace(xmin,xmax,npts)
return conf_interval(x,kde(x),shortest=shortest,conf=conf,
conftol=conftol,return_max=return_max) | Returns desired confidence interval for provided KDE object |
def from_row(row):
subject = (row[5][0].upper() + row[5][1:]) if row[5] else row[5]
return Advice.objects.create(
id=row[0],
administration=cleanup(row[1]),
type=row[2],
session=datetime.strptime(row[4], ),
subject=cleanup(subject),
topics=[t.title() for t in cleanup(row[6]).split()],
tags=[tag.strip() for tag in row[7].split() if tag.strip()],
meanings=cleanup(row[8]).replace(, ).split(),
part=_part(row[9]),
content=cleanup(row[10]),
) | Create an advice from a CSV row |
def get_class_students(self, xqdm, kcdm, jxbh):
return self.query(GetClassStudents(xqdm, kcdm, jxbh)) | 教学班查询, 查询指定教学班的所有学生
@structure {'学期': str, '班级名称': str, '学生': [{'姓名': str, '学号': int}]}
:param xqdm: 学期代码
:param kcdm: 课程代码
:param jxbh: 教学班号 |
def increase_indent(func):
def wrapper(*args, **kwargs):
global _debug_indent
_debug_indent += 1
result = func(*args, **kwargs)
_debug_indent -= 1
return result
return wrapper | Decorator for makin |
def move(self, target, home_flagged_axes=False):
s internal
homing-status flags (`True` means it has already homed). All axes
from numpy import isclose
self.run_flag.wait()
def valid_movement(coords, axis):
return not (
(axis in DISABLE_AXES) or
(coords is None) or
isclose(coords, self.position[axis])
)
def create_coords_list(coords_dict):
return [
axis + str(round(coords, GCODE_ROUNDING_PRECISION))
for axis, coords in sorted(coords_dict.items())
if valid_movement(coords, axis)
]
backlash_target = target.copy()
backlash_target.update({
axis: value + PLUNGER_BACKLASH_MM
for axis, value in sorted(target.items())
if axis in and self.position[axis] < value
})
target_coords = create_coords_list(target)
backlash_coords = create_coords_list(backlash_target)
if target_coords:
non_moving_axes = .join([
ax
for ax in AXES
if ax not in target.keys()
])
self.dwell_axes(non_moving_axes)
self.activate_axes(target.keys())
self._send_command(command, timeout=DEFAULT_MOVEMENT_TIMEOUT)
finally:
plunger_axis_moved = .join(set() & set(target.keys()))
if plunger_axis_moved:
self.dwell_axes(plunger_axis_moved)
self._set_saved_current()
self._update_position(target) | Move to the `target` Smoothieware coordinate, along any of the size
axes, XYZABC.
target: dict
dict setting the coordinate that Smoothieware will be at when
`move()` returns. `target` keys are the axis in upper-case, and the
values are the coordinate in millimeters (float)
home_flagged_axes: boolean (default=False)
If set to `True`, each axis included within the target coordinate
may be homed before moving, determined by Smoothieware's internal
homing-status flags (`True` means it has already homed). All axes'
flags are set to `False` by Smoothieware under three conditions:
1) Smoothieware boots or resets, 2) if a HALT gcode or signal
is sent, or 3) a homing/limitswitch error occured. |
def find_last(fileobj, serial):
try:
fileobj.seek(-256*256, 2)
except IOError:
fileobj.seek(0)
data = fileobj.read()
try:
index = data.rindex(b"OggS")
except ValueError:
raise error("unable to find final Ogg header")
bytesobj = cBytesIO(data[index:])
best_page = None
try:
page = OggPage(bytesobj)
except error:
pass
else:
if page.serial == serial:
if page.last:
return page
else:
best_page = page
else:
best_page = None
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page | Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first. |
def setMood(self, mood):
self.conn("POST", "{0}/users/{1}/profile/partial".format(SkypeConnection.API_USER, self.userId),
auth=SkypeConnection.Auth.SkypeToken, json={"payload": {"mood": mood or ""}})
self.user.mood = SkypeUser.Mood(plain=mood) if mood else None | Update the activity message for the current user.
Args:
mood (str): new mood message |
def configs_for_writer(writer=None, ppp_config_dir=None):
search_paths = (ppp_config_dir,) if ppp_config_dir else tuple()
if writer is not None:
if not isinstance(writer, (list, tuple)):
writer = [writer]
config_files = [w if w.endswith() else w + for w in writer]
else:
writer_configs = glob_config(os.path.join(, ),
*search_paths)
config_files = set(writer_configs)
for config_file in config_files:
config_basename = os.path.basename(config_file)
writer_configs = config_search_paths(
os.path.join("writers", config_basename), *search_paths)
if not writer_configs:
LOG.warning("No writer configs found for ", writer)
continue
yield writer_configs | Generator of writer configuration files for one or more writers
Args:
writer (Optional[str]): Yield configs only for this writer
ppp_config_dir (Optional[str]): Additional configuration directory
to search for writer configuration files.
Returns: Generator of lists of configuration files |
def update_expenditure_entry(database, entry):
entry = clean_entry(entry)
database.expenditures.update(
{: entry[]},
{: entry},
upsert=True
) | Update a record of a expenditure report in the provided database.
@param db: The MongoDB database to operate on. The expenditures collection
will be used from this database.
@type db: pymongo.database.Database
@param entry: The entry to insert into the database, updating the entry with
the same recordID if one exists.
@type entry: dict |
def absl_flags():
flags_dict = flags.FLAGS.flags_by_module_dict()
def _relevant_module(module_name):
if __package__ and __package__ in module_name:
return True
if module_name == sys.argv[0]:
return True
return False
return {
flag.name: flag.value for module, flags in flags_dict.items()
for flag in flags if _relevant_module(module)} | Extracts absl-py flags that the user has specified and outputs their
key-value mapping.
By default, extracts only those flags in the current __package__
and mainfile.
Useful to put into a trial's param_map. |
def initialize(self):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
self.start_server() | Start pydoc server |
def _decrypt_data_key(
self, encrypted_data_key: EncryptedDataKey, algorithm: AlgorithmSuite, encryption_context: Dict[Text, Text]
) -> DataKey:
if encrypted_data_key.encrypted_data_key != self._encrypted_data_key:
raise DecryptKeyError(
.format(provider=self.key_provider)
)
return self._generate_data_key(algorithm, encryption_context) | Decrypt an encrypted data key and return the plaintext.
:param data_key: Encrypted data key
:type data_key: aws_encryption_sdk.structures.EncryptedDataKey
:param algorithm: Algorithm object which directs how this Master Key will encrypt the data key
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param dict encryption_context: Encryption context to use in decryption
:returns: Data key containing decrypted data key
:rtype: aws_encryption_sdk.structures.DataKey
:raises DecryptKeyError: if Master Key is unable to decrypt data key |
def json_normalize(data, record_path=None, meta=None,
meta_prefix=None,
record_prefix=None,
errors=,
sep=):
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
if isinstance(data, list) and not data:
return DataFrame()
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in y.values()] for y in data):
data = nested_to_record(data, sep=sep)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
meta = [m if isinstance(m, list) else [m] for m in meta]
records = []
lengths = []
meta_vals = defaultdict(list)
if not isinstance(sep, str):
sep = str(sep)
meta_keys = [sep.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if isinstance(data, dict):
data = [data]
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:],
seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == :
meta_val = np.nan
else:
raise KeyError("Try running with "
"errors= as key "
"{err} is not always present"
.format(err=e))
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result = result.rename(
columns=lambda x: "{p}{c}".format(p=record_prefix, c=x))
for k, v in meta_vals.items():
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError(
.format(name=k))
result[k] = np.array(v, dtype=object).repeat(lengths)
return result | Normalize semi-structured JSON data into a flat table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
meta_prefix : string, default None
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
>>> data = {'A': [1, 2]}
>>> json_normalize(data, 'A', record_prefix='Prefix.')
Prefix.0
0 1
1 2 |
def start(self):
from .nurest_session import NURESTSession
session = NURESTSession.get_current_session()
if self.async:
thread = threading.Thread(target=self._make_request, kwargs={: session})
thread.is_daemon = False
thread.start()
return self.transaction_id
return self._make_request(session=session) | Make an HTTP request with a specific method |
def _ls_sites(path):
with cd(path):
sites = run().split()
doms = [d.name for d in domain_sites()]
dom_sites = []
for s in sites:
ds = s.split()[0]
ds = ds.replace(,)
if ds in doms and s not in dom_sites:
dom_sites.append(s)
return dom_sites | List only sites in the domain_sites() to ensure we co-exist with other projects |
def daemonize():
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError, e:
log.exception("first fork() failed: %d (%s)", e.errno, e.strerror)
sys.exit(1)
os.setsid()
os.umask(0)
os.chdir("/")
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError, e:
log.exception("second fork() failed: %d (%s)", e.errno, e.strerror)
sys.exit(1)
try:
devnull_fd = os.open(os.devnull, os.O_RDWR)
for stdf in (sys.__stdout__, sys.__stderr__):
try:
stdf.flush()
except Exception:
pass
for stdf in (sys.__stdin__, sys.__stdout__, sys.__stderr__):
try:
os.dup2(devnull_fd, stdf.fileno())
except OSError:
pass
except Exception:
log.exception("error during file descriptor redirection") | Daemonize the program, ie. make it run in the "background", detach
it from its controlling terminal and from its controlling process
group session.
NOTES:
- This function also umask(0) and chdir("/")
- stdin, stdout, and stderr are redirected from/to /dev/null
SEE ALSO:
http://www.unixguide.net/unix/programming/1.7.shtml |
def get_instance(self, payload):
return WorkflowRealTimeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution[],
workflow_sid=self._solution[],
) | Build an instance of WorkflowRealTimeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance |
def is_transaction_invalidated(transaction, state_change):
is_our_failed_update_transfer = (
isinstance(state_change, ContractReceiveChannelSettled) and
isinstance(transaction, ContractSendChannelUpdateTransfer) and
state_change.token_network_identifier == transaction.token_network_identifier and
state_change.channel_identifier == transaction.channel_identifier
)
if is_our_failed_update_transfer:
return True
return False | True if the `transaction` is made invalid by `state_change`.
Some transactions will fail due to race conditions. The races are:
- Another transaction which has the same side effect is executed before.
- Another transaction which *invalidates* the state of the smart contract
required by the local transaction is executed before it.
The first case is handled by the predicate `is_transaction_effect_satisfied`,
where a transaction from a different source which does the same thing is
considered. This predicate handles the second scenario.
A transaction can **only** invalidate another iff both share a valid
initial state but a different end state.
Valid example:
A close can invalidate a deposit, because both a close and a deposit
can be executed from an opened state (same initial state), but a close
transaction will transition the channel to a closed state which doesn't
allow for deposits (different end state).
Invalid example:
A settle transaction cannot invalidate a deposit because a settle is
only allowed for the closed state and deposits are only allowed for
the open state. In such a case a deposit should never have been sent.
The deposit transaction for an invalid state is a bug and not a
transaction which was invalidated. |
def _construct_role(self, managed_policy_map):
execution_role = IAMRole(self.logical_id + , attributes=self.get_passthrough_resource_attributes())
execution_role.AssumeRolePolicyDocument = IAMRolePolicies.lambda_assume_role_policy()
managed_policy_arns = [ArnGenerator.generate_aws_managed_policy_arn()]
if self.Tracing:
managed_policy_arns.append(ArnGenerator.generate_aws_managed_policy_arn())
function_policies = FunctionPolicies({"Policies": self.Policies},
policy_template_processor=None)
policy_documents = []
if self.DeadLetterQueue:
policy_documents.append(IAMRolePolicies.dead_letter_queue_policy(
self.dead_letter_queue_policy_actions[self.DeadLetterQueue[]],
self.DeadLetterQueue[]))
for index, policy_entry in enumerate(function_policies.get()):
if policy_entry.type is PolicyTypes.POLICY_STATEMENT:
policy_documents.append({
: execution_role.logical_id + + str(index),
: policy_entry.data
})
elif policy_entry.type is PolicyTypes.MANAGED_POLICY:
policy_arn = policy_entry.data
if isinstance(policy_entry.data, string_types) and policy_entry.data in managed_policy_map:
policy_arn = managed_policy_map[policy_entry.data]
if policy_arn not in managed_policy_arns:
managed_policy_arns.append(policy_arn)
else:
raise InvalidResourceException(
self.logical_id,
"Policy at index {} in the property is not valid".format(index))
execution_role.ManagedPolicyArns = list(managed_policy_arns)
execution_role.Policies = policy_documents or None
execution_role.PermissionsBoundary = self.PermissionsBoundary
return execution_role | Constructs a Lambda execution role based on this SAM function's Policies property.
:returns: the generated IAM Role
:rtype: model.iam.IAMRole |
def _tosubs(self, ixlist):
n = len(ixlist)
N = self._n
ss = []
ms = []
if n == 0:
return ss, ms
j = 0
ix = ixlist[j]
if ix >= N or ix < -N:
raise IndexError(
% (ix, N))
if ix < 0:
ix += N
while j < n:
for s in range(0, self._n):
low = self._si[s]
high = self._si[s + 1]
if ix >= low and ix < high:
ss.append(s)
msj = [ix - low]
j += 1
while j < n:
ix = ixlist[j]
if ix >= N or ix < -N:
raise IndexError(
% (
ix, N))
if ix < 0:
ix += N
if ix < low or ix >= high:
break
msj.append(ix - low)
j += 1
ms.append(msj)
if ix < low:
break
return ss, ms | Maps a list of integer indices to sub-indices.
ixlist can contain repeated indices and does not need to be sorted.
Returns pair (ss, ms) where ss is a list of subsim numbers and ms is a
list of lists of subindices m (one list for each subsim in ss). |
def map_fit(interface, state, label, inp):
import numpy as np
combiner = {}
out = interface.output(0)
for row in inp:
row = row.strip().split(state["delimiter"])
if len(row) > 1:
for i, j in enumerate(state["X_indices"]):
if row[j] not in state["missing_vals"]:
pair = row[state["y_index"]] + state["delimiter"] + str(j)
if state["X_meta"][i] == "c":
if pair in combiner:
combiner[pair].append(np.float32(row[j]))
else:
combiner[pair] = [np.float32(row[j])]
else:
pair += state["delimiter"] + row[j]
combiner[pair] = combiner.get(pair, 0) + 1
combiner[row[state["y_index"]]] = combiner.get(row[state["y_index"]], 0) + 1
for k, v in combiner.iteritems():
if len(k.split(state["delimiter"])) == 2:
out.add(k, (np.size(v), np.mean(v, dtype=np.float32), np.var(v, dtype=np.float32)))
else:
out.add(k, v) | Function counts occurrences of feature values for every row in given data chunk. For continuous features it returns
number of values and it calculates mean and variance for every feature.
For discrete features it counts occurrences of labels and values for every feature. It returns occurrences of pairs:
label, feature index, feature values. |
def simplify_other(major, minor, dist):
result = deepcopy(major)
if major[] == and minor[] == :
arc = dist/6371000*180/math.pi*2
for minorfeature in minor[]:
minorgeom = minorfeature[]
minorlng = minorgeom[][0]
minorlat = minorgeom[][1]
is_accept = True
for mainfeature in major[]:
maingeom = mainfeature[]
mainlng = maingeom[][0]
mainlat = maingeom[][1]
if abs(minorlat-mainlat) <= arc and abs(minorlng-mainlng) <= arc:
distance = point_distance(maingeom, minorgeom)
if distance < dist:
is_accept = False
break
if is_accept:
result["features"].append(minorfeature)
return result | Simplify the point featurecollection of poi with another point features accoording by distance.
Attention: point featurecollection only
Keyword arguments:
major -- major geojson
minor -- minor geojson
dist -- distance
return a geojson featurecollection with two parts of featurecollection |
def _get_id(owner, date, content):
h = hashlib.sha256()
h.update("github.com/spacetelescope/asv".encode())
for x in content:
if x is None:
h.update(",".encode())
else:
h.update(x.encode())
h.update(",".encode())
if date is None:
date = datetime.datetime(1970, 1, 1)
return "tag:{0},{1}:/{2}".format(owner, date.strftime(), h.hexdigest()) | Generate an unique Atom id for the given content |
def lsfiles(root=".", **kwargs):
paths = ls(root=root, **kwargs)
if isfile(root):
return paths
return [_path for _path in paths if isfile(path(root, _path))] | Return only files from a directory listing.
Arguments:
root (str): Path to directory. Can be relative or absolute.
**kwargs: Any additional arguments to be passed to ls().
Returns:
list of str: A list of file paths.
Raises:
OSError: If root directory does not exist. |
def run(self):
self.main_task.thread.start()
self.main_task.thread.join() | Run the schedule |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.