code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def _drop_axis(self, labels, axis, level=None, errors=):
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError()
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError()
indexer = ~axis.get_level_values(level).isin(labels)
if errors == and indexer.all():
raise KeyError(.format(labels))
else:
indexer = ~axis.isin(labels)
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result | Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped. |
def read_file(filename):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, , filename), ) as f:
return f.read() | Read package file as text to get name and version |
def validate(cls, mapper_spec):
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many for mapper input")
if not blob_keys:
raise BadReaderParamsError("No specified for mapper input")
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key) | Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid. |
def _add_zone(self, zone, name=, status=Zone.CLEAR, expander=False):
if not zone in self._zones:
self._zones[zone] = Zone(zone=zone, name=name, status=None, expander=expander)
self._update_zone(zone, status=status) | Adds a zone to the internal zone list.
:param zone: zone number
:type zone: int
:param name: human readable zone name
:type name: string
:param status: zone status
:type status: int |
def group_by_match(self, variant):
locusrefalt
locus = to_locus(variant)
if len(variant.ref) != len(locus.positions):
logging.warning(
"Ref is length %d but locus has %d bases in variant: %s" %
(len(variant.ref), len(locus.positions), str(variant)))
alleles_dict = self.group_by_allele(locus)
single_base_loci = [
Locus.from_interbase_coordinates(locus.contig, position)
for position in locus.positions
]
empty_pileups = dict(
(locus, Pileup(locus=locus, elements=[]))
for locus in single_base_loci)
empty_collection = PileupCollection(pileups=empty_pileups, parent=self)
ref = {variant.ref: alleles_dict.pop(variant.ref, empty_collection)}
alt = {variant.alt: alleles_dict.pop(variant.alt, empty_collection)}
other = alleles_dict
return MatchingEvidence(ref, alt, other) | Given a variant, split the PileupCollection based on whether it the
data supports the reference allele, the alternate allele, or neither.
Parameters
----------
variant : Variant
The variant. Must have fields 'locus', 'ref', and 'alt'.
Returns
----------
A MatchingEvidence named tuple with fields (ref, alt, other),
each of which is a string -> PileupCollection dict mapping alleles
to the PileupCollection of evidence supporting them. |
def dec_decimal_to_sexegesimal(
self,
dec,
delimiter=":"):
self.log.info()
import math
try:
self.log.debug("attempting to convert RA to float")
dec = float(dec)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
if dec > -90. and dec < 90.:
pass
else:
self.log.error(
"DEC must be between -90 - 90 degrees")
return -1
if (dec >= 0):
hemisphere =
else:
hemisphere =
dec *= -1
decimalLen = len(repr(dec).split(".")[-1])
precision = decimalLen - 4
dec_deg = int(dec)
dec_mm = int((dec - dec_deg) * 60)
dec_ss = int(((dec - dec_deg) * 60 - dec_mm) * 60)
dec_f = (((dec - dec_deg) * 60 - dec_mm) * 60) - dec_ss
dec_f = repr(dec_f)[2:]
dec_f = dec_f[:precision]
if len(dec_f):
dec_f = "." + dec_f
if precision < 0:
dec_f = ""
sexegesimal = hemisphere + % dec_deg + delimiter + \
% dec_mm + delimiter + % dec_ss + dec_f
self.log.info()
return sexegesimal | *Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8 |
def penalize_boundary_complexity(shp, w=20, mask=None, C=0.5):
def inner(T):
arr = T("input")
if mask is None:
mask_ = np.ones(shp)
mask_[:, w:-w, w:-w] = 0
else:
mask_ = mask
blur = _tf_blur(arr, w=5)
diffs = (blur-arr)**2
diffs += 0.8*(arr-C)**2
return -tf.reduce_sum(diffs*mask_)
return inner | Encourage the boundaries of an image to have less variation and of color C.
Args:
shp: shape of T("input") because this may not be known.
w: width of boundary to penalize. Ignored if mask is set.
mask: mask describing what area should be penalized.
Returns:
Objective. |
def _calc_sizes(self, cnv_file, items):
bp_per_bin = 100000
range_map = {"target": (100, 250), "antitarget": (10000, 1000000)}
target_bps = []
anti_bps = []
checked_beds = set([])
for data in items:
region_bed = tz.get_in(["depth", "variant_regions", "regions"], data)
if region_bed and region_bed not in checked_beds:
with utils.open_gzipsafe(region_bed) as in_handle:
for r in pybedtools.BedTool(in_handle).intersect(cnv_file):
if r.stop - r.start > range_map["target"][0]:
target_bps.append(float(r.name))
with utils.open_gzipsafe(region_bed) as in_handle:
for r in pybedtools.BedTool(in_handle).intersect(cnv_file, v=True):
if r.stop - r.start > range_map["target"][1]:
anti_bps.append(float(r.name))
checked_beds.add(region_bed)
def scale_in_boundary(raw, round_interval, range_targets):
min_val, max_val = range_targets
out = int(math.ceil(raw / float(round_interval)) * round_interval)
if out > max_val:
return max_val
elif out < min_val:
return min_val
else:
return out
if target_bps and np.median(target_bps) > 0:
raw_target_bin = bp_per_bin / float(np.median(target_bps))
target_bin = scale_in_boundary(raw_target_bin, 50, range_map["target"])
else:
target_bin = range_map["target"][1]
if anti_bps and np.median(anti_bps) > 0:
raw_anti_bin = bp_per_bin / float(np.median(anti_bps))
anti_bin = scale_in_boundary(raw_anti_bin, 10000, range_map["antitarget"])
else:
anti_bin = range_map["antitarget"][1]
return target_bin, anti_bin | Retrieve target and antitarget bin sizes based on depth.
Similar to CNVkit's do_autobin but tries to have a standard set of
ranges (50bp intervals for target and 10kb intervals for antitarget). |
def to_timestamp(val):
if isinstance(val, numbers.Number):
return val
elif isinstance(val, six.string_types):
dt = _parse_datetime_string(val)
else:
dt = val
return time.mktime(dt.timetuple()) | Takes a value that is either a Python date, datetime, or a string
representation of a date/datetime value. Returns a standard Unix timestamp
corresponding to that value. |
def setValues(nxG, nyG, iBeg, iEnd, jBeg, jEnd, data):
nxGHalf = nxG/2.
nyGHalf = nyG/2.
nxGQuart = nxGHalf/2.
nyGQuart = nyGHalf/2.
for i in range(data.shape[0]):
iG = iBeg + i
di = iG - nxG
for j in range(data.shape[1]):
jG = jBeg + j
dj = jG - 0.8*nyG
data[i, j] = numpy.floor(1.9*numpy.exp(-di**2/nxGHalf**2 - dj**2/nyGHalf**2)) | Set setValues
@param nxG number of global cells in x
@param nyG number of global cells in y
@param iBeg global starting index in x
@param iEnd global ending index in x
@param jBeg global starting index in y
@param jEnd global ending index in y
@param data local array |
def deconstruct(self, including_private: bool=False) -> bytes:
data = self._deconstruct_v1(including_private=including_private)
return compress_datablob(DATA_BLOB_MAGIC, 1, data) | Return state of this FinTSClient instance as an opaque datablob. You should not
use this object after calling this method.
Information about the connection is implicitly retrieved from the bank and
cached in the FinTSClient. This includes: system identifier, bank parameter
data, user parameter data. It's not strictly required to retain this information
across sessions, but beneficial. If possible, an API user SHOULD use this method
to serialize the client instance before destroying it, and provide the serialized
data next time an instance is constructed.
Parameter `including_private` should be set to True, if the storage is sufficiently
secure (with regards to confidentiality) to include private data, specifically,
account numbers and names. Most often this is the case.
Note: No connection information is stored in the datablob, neither is the PIN. |
def metric_delete(self, project, metric_name):
path = "projects/%s/metrics/%s" % (project, metric_name)
self._gapic_api.delete_log_metric(path) | API call: delete a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric |
def export_default_probes(path, module_name = , raise_errors = False):
raise NotImplementedError
import b26_toolkit.b26_toolkit.instruments as instruments
from pylabcontrol.core import Probe
for name, obj in inspect.getmembers(instruments):
if inspect.isclass(obj):
try:
instrument = obj()
print((, obj.__name__, ))
for probe_name, probe_info in instrument._PROBES.items():
probe = Probe(instrument, probe_name, info = probe_info)
filename = os.path.join(path, .format(instrument.name))
probe.save(filename)
except:
print((.format(obj.__name__)))
print((.format(obj.__name__))) | NOT IMPLEMENTED YET
tries to instantiate all the instruments that are imported in /instruments/__init__.py
and the probes of each instrument that could be instantiated into a .b26 file in the folder path
Args:
path: target path for .b26 files |
def init_parsecmdline(argv=[]):
parser = argparse.ArgumentParser(prog=PKG_NAME)
parser.add_argument(, action=, version=version)
parser.add_argument("-c", "--config",
action="store",
dest="config_file", default=config.CONF_DEFAULT_FILE,
help="specify configuration file to use")
parser.add_argument("-d", "--dry-run",
action="store_true", dest="dry_run", default=False,
help="don{config_file}{config_file}' not found!"
.format(config_file=config_file)) | Parse arguments from the command line
:param argv: list of arguments |
def comment_magic(source, language=, global_escape_flag=True):
parser = StringParser(language)
next_is_magic = False
for pos, line in enumerate(source):
if not parser.is_quoted() and (next_is_magic or is_magic(line, language, global_escape_flag)):
source[pos] = _COMMENT[language] + + line
next_is_magic = language == and _LINE_CONTINUATION_RE.match(line)
parser.read_line(line)
return source | Escape Jupyter magics with '# |
def put(self, items, indexes=True):
actions = []
for cid, fc in items:
}),
})
bulk(self.conn, actions, timeout=60, request_timeout=60) | Adds feature collections to the store.
This efficiently adds multiple FCs to the store. The iterable
of ``items`` given should yield tuples of ``(content_id, FC)``.
:param items: Iterable of ``(content_id, FC)``.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed. |
def _build_connection_args(self, klass):
bases = [base for base in klass.__bases__ if base is not object]
all_args = []
for cls in [klass] + bases:
try:
args = inspect.getfullargspec(cls.__init__).args
except AttributeError:
args = inspect.getargspec(cls.__init__).args
for arg in args:
if arg in all_args:
continue
all_args.append(arg)
all_args.remove()
return all_args | Read connection args spec, exclude self from list of possible
:param klass: Redis connection class. |
def set_target_temperature(self, temperature, mode=config.SCHEDULE_HOLD):
if temperature < self.min_temperature:
temperature = self.min_temperature
if temperature > self.max_temperature:
temperature = self.max_temperature
modes = [config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD]
if mode not in modes:
raise Exception("Invalid mode. Please use one of: {}".format(modes))
self.set_data({
"SetPointTemp": temperature,
"ScheduleMode": mode
}) | Updates the target temperature on the NuHeat API
:param temperature: The desired temperature in NuHeat format
:param permanent: Permanently hold the temperature. If set to False, the schedule will
resume at the next programmed event |
def surface_evaluate_cartesian_multi(surface, points):
if NO_IMAGES:
return
ax = surface.plot(256)
ax.plot(
points[0, :], points[1, :], color="black", linestyle="None", marker="o"
)
delta = 1.0 / 32.0
font_size = 18
ax.text(
points[0, 0],
points[1, 0],
r"$w_0$",
fontsize=font_size,
verticalalignment="top",
horizontalalignment="right",
)
ax.text(
points[0, 1] + 2 * delta,
points[1, 1],
r"$w_1$",
fontsize=font_size,
verticalalignment="center",
horizontalalignment="left",
)
ax.text(
points[0, 2],
points[1, 2] + delta,
r"$w_2$",
fontsize=font_size,
verticalalignment="bottom",
horizontalalignment="left",
)
ax.axis("scaled")
ax.set_xlim(-3.125, 2.375)
ax.set_ylim(-0.25, 2.125)
save_image(ax.figure, "surface_evaluate_cartesian_multi.png") | Image for :meth`.Surface.evaluate_cartesian_multi` docstring. |
def _row_should_be_placed(self, row, position):
placed_row = self._rows_in_grid.get(row)
return placed_row is None or placed_row.y < position.y | :return: whether to place this instruction |
def correlation(P, obs1, obs2=None, times=[1], k=None):
r
M = P.shape[0]
T = np.asarray(times).max()
if T < M:
return correlation_matvec(P, obs1, obs2=obs2, times=times)
else:
return correlation_decomp(P, obs1, obs2=obs2, times=times, k=k) | r"""Time-correlation for equilibrium experiment.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
k : int (optional)
Number of eigenvectors and eigenvalues to use for computation
Returns
-------
correlations : ndarray
Correlation values at given times |
def query(self):
output = Query()
for column, op, plugin, editor in self._plugins:
query = Query(column)
if plugin.setupQuery(query, op, editor):
output &= query
return output | Builds the query for this quick filter.
:return <orb.Query> |
def discover():
if CFG["plugins"]["autoload"]:
report_plugins = CFG["plugins"]["reports"].value
for plugin in report_plugins:
try:
importlib.import_module(plugin)
LOG.debug("Found report: %s", plugin)
except ImportError:
LOG.error("Could not find ", plugin) | Import all experiments listed in *_PLUGINS_REPORTS.
Tests:
>>> from benchbuild.settings import CFG
>>> from benchbuild.reports import discover
>>> import logging as lg
>>> import sys
>>> l = lg.getLogger('benchbuild')
>>> l.setLevel(lg.DEBUG)
>>> l.handlers = [lg.StreamHandler(stream=sys.stdout)]
>>> CFG["plugins"]["reports"] = ["benchbuild.non.existing", "benchbuild.reports.raw"]
>>> discover()
Could not find 'benchbuild.non.existing'
Found report: benchbuild.reports.raw |
def remove_known_hosts(overcloud_ip):
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = [, , overcloud_ip, , known_hosts]
subprocess.check_call(command) | For a given IP address remove SSH keys from the known_hosts file |
def haslayer(self, cls):
if cls == "NTP":
if isinstance(self, NTP):
return True
elif issubtype(cls, NTP):
if isinstance(self, cls):
return True
return super(NTP, self).haslayer(cls) | Specific: NTPHeader().haslayer(NTP) should return True. |
def request_permission(cls, permissions):
app = AndroidApplication.instance()
f = app.create_future()
def on_result(perms):
allowed = True
for p in permissions:
allowed = allowed and perms.get(p, False)
f.set_result(allowed)
app.request_permissions(permissions).then(on_result)
return f | Requests permission and returns an future result that returns a
boolean indicating if all the given permission were granted or denied. |
def find(self, name):
if name.__class__ is or name.__class__ is :
pattern = name.vm_name
else:
pattern = name
self.last_search_result = [vm for vm in self if pattern in vm.vm_name]
return self.last_search_result | Return a list of subset of VM that match the pattern name
@param name (str): the vm name of the virtual machine
@param name (Obj): the vm object that represent the virtual
machine (can be Pro or Smart)
@return (list): the subset containing the serach result. |
def body_lines(self):
if not self.message.is_multipart():
body = self.message.get_payload(None, decode=True)
else:
_, _, body = self.message.as_string().partition("\n\n")
if isinstance(body, bytes):
for enc in [, ]:
try:
body = body.decode(enc)
break
except UnicodeDecodeError:
continue
else:
body = self.message.get_payload(None, decode=False)
return body.splitlines(True) | Return a normalized list of lines from message's body. |
def setup_http_session():
if args.http_proxy:
streamlink.set_option("http-proxy", args.http_proxy)
if args.https_proxy:
streamlink.set_option("https-proxy", args.https_proxy)
if args.http_cookie:
streamlink.set_option("http-cookies", dict(args.http_cookie))
if args.http_header:
streamlink.set_option("http-headers", dict(args.http_header))
if args.http_query_param:
streamlink.set_option("http-query-params", dict(args.http_query_param))
if args.http_ignore_env:
streamlink.set_option("http-trust-env", False)
if args.http_no_ssl_verify:
streamlink.set_option("http-ssl-verify", False)
if args.http_disable_dh:
streamlink.set_option("http-disable-dh", True)
if args.http_ssl_cert:
streamlink.set_option("http-ssl-cert", args.http_ssl_cert)
if args.http_ssl_cert_crt_key:
streamlink.set_option("http-ssl-cert", tuple(args.http_ssl_cert_crt_key))
if args.http_timeout:
streamlink.set_option("http-timeout", args.http_timeout)
if args.http_cookies:
streamlink.set_option("http-cookies", args.http_cookies)
if args.http_headers:
streamlink.set_option("http-headers", args.http_headers)
if args.http_query_params:
streamlink.set_option("http-query-params", args.http_query_params) | Sets the global HTTP settings, such as proxy and headers. |
def send_email(sender, receivers, subject, text=None, html=None, charset=, config=Injected):
smtp_config = config[]
if not isinstance(receivers, list) and not isinstance(receivers, tuple):
receivers = [receivers]
msgs = []
if text is not None:
msgs.append(MIMEText(text, , charset))
if html is not None:
msgs.append(MIMEText(html, , charset))
if len(msgs) == 0:
raise Exception("No message is given.")
if len(msgs) == 1:
msg = msgs[0]
else:
msg = MIMEMultipart()
for m in msgs:
msg.attach(m)
if sender is None:
sender = smtp_config[]
msg[] = subject
msg[] = sender
msg[] = ", ".join(receivers)
smtp_server = smtplib.SMTP(**(smtp_config[]))
smtp_server.sendmail(sender, receivers, msg.as_string())
smtp_server.quit() | Sends an email.
:param sender: Sender as string or None for default got from config.
:param receivers: String or array of recipients.
:param subject: Subject.
:param text: Plain text message.
:param html: Html message.
:param charset: Charset.
:param config: Current configuration |
def fromISO8601TimeAndDate(klass, iso8601string, tzinfo=None):
def calculateTimezone():
if groups[] == :
return FixedOffset(0, 0)
else:
tzhour = groups.pop()
tzmin = groups.pop()
if tzhour is not None:
return FixedOffset(int(tzhour), int(tzmin or 0))
return tzinfo or FixedOffset(0, 0)
def coerceGroups():
groups[] = groups[] or groups[]
groups[] = groups[] or groups[]
defaultTo0 = [, , ]
defaultTo1 = [, , , , ]
if groups[] is None:
groups[] =
for key in defaultTo0:
if groups[key] is None:
groups[key] = 0
for key in defaultTo1:
if groups[key] is None:
groups[key] = 1
groups[] = float(+groups[])
for key in defaultTo0 + defaultTo1 + []:
groups[key] = int(groups[key])
for group, min, max in [
(, 1, 53),
(, 1, 7),
(, 1, 12),
(, 1, 31),
(, 0, 24),
(, 0, 59),
(, 0, 61),
timezone = calculateTimezone()
else:
timezone = None
self = klass.fromDatetime(calculateDtime(timezone))
self.resolution = determineResolution()
return self | Return a new Time instance from a string formated as in ISO 8601.
If the given string contains no timezone, it is assumed to be in the
timezone specified by the parameter `tzinfo`, or UTC if tzinfo is None.
An input string with an explicit timezone will always override tzinfo.
If the given iso8601string does not contain all parts of the time, they
will default to 0 in the timezone given by `tzinfo`.
WARNING: this function is incomplete. ISO is dumb and their standards
are not free. Only a subset of all valid ISO 8601 dates are parsed,
because I can't find a formal description of the format. However,
common ones should work. |
def _read_tags_for_revset(self, spec):
cmd = [
, , , , ,
, spec]
res = self._invoke(*cmd)
header_pattern = re.compile(r)
match_res = map(header_pattern.match, res.splitlines())
matched_lines = filter(None, match_res)
matches = (match.groupdict() for match in matched_lines)
for match in matches:
if match[] == :
id, sep, rev = match[].partition()
if match[] == :
tag = match[]
yield TaggedRevision(tag, rev) | Return TaggedRevision for each tag/rev combination in the revset spec |
def _plaintext_data_key():
response = getattr(_plaintext_data_key, , None)
cache_hit = response is not None
if not cache_hit:
response = _api_decrypt()
setattr(_plaintext_data_key, , response)
key_id = response[]
plaintext = response[]
if hasattr(plaintext, ):
plaintext = plaintext.encode(__salt_system_encoding__)
log.debug(, key_id, if cache_hit else )
return plaintext | Return the configured KMS data key decrypted and encoded in urlsafe base64.
Cache the result to minimize API calls to AWS. |
def apply_cut(self, cut):
return Subsystem(self.network, self.state, self.node_indices,
cut=cut, mice_cache=self._mice_cache) | Return a cut version of this |Subsystem|.
Args:
cut (Cut): The cut to apply to this |Subsystem|.
Returns:
Subsystem: The cut subsystem. |
def new_event(event):
op_name = event.EventType.DESCRIPTOR.values_by_number[event.type].name
if op_name == :
cls = PutEvent
elif op_name == :
cls = DeleteEvent
else:
raise Exception()
return cls(event) | Wrap a raw gRPC event in a friendlier containing class.
This picks the appropriate class from one of PutEvent or DeleteEvent and
returns a new instance. |
def _codes_to_ints(self, codes):
codes <<= self.offsets
if codes.ndim == 1:
return np.bitwise_or.reduce(codes)
return np.bitwise_or.reduce(codes, axis=1) | Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each). |
def copy_meta_from(self, ido):
self._active_scalar_info = ido.active_scalar_info
self._active_vectors_info = ido.active_vectors_info
if hasattr(ido, ):
self._textures = ido._textures | Copies vtki meta data onto this object from another object |
def GetResourceIdOrFullNameFromLink(resource_link):
if IsNameBased(resource_link):
return TrimBeginningAndEndingSlashes(resource_link)
if resource_link[-1] != :
resource_link = resource_link +
if resource_link[0] != :
resource_link = + resource_link
path_parts = resource_link.split("/")
if len(path_parts) % 2 == 0:
return str(path_parts[-2])
return None | Gets resource id or full name from resource link.
:param str resource_link:
:return:
The resource id or full name from the resource link.
:rtype: str |
def set_end(self,time,pass_to_command_line=True):
if pass_to_command_line:
self.add_var_opt(,time)
self.__end = time
self.__data_end = time | Set the GPS end time of the analysis node by setting a --gps-end-time
option to the node when it is executed.
@param time: GPS end time of job.
@bool pass_to_command_line: add gps-end-time as variable option. |
def create_input(option, template_name, template_location="template"):
jinja2_input = {}
for item in option:
try:
jinja2_input.update(item)
except ValueError:
raise RuntimeError(
("inputs.py, create_input : format of item is not "
"supported. Expecting a dictionary.".format(str(item))))
import jinja2
try:
template_loader = jinja2.FileSystemLoader(searchpath=template_location)
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template(template_name)
output_text = template.render(jinja2_input)
except jinja2.TemplateNotFound:
raise RuntimeError("template not found".format(template_name))
return output_text | create an input file using jinja2 by filling a template
with the values from the option variable passed in. |
def _cutout_expnum(observation, sky_coord, radius):
uri = observation.get_image_uri()
cutout_filehandle = tempfile.NamedTemporaryFile()
disposition_filename = client.copy(uri + "({},{},{})".format(sky_coord.ra.to().value,
sky_coord.dec.to().value,
radius.to().value),
cutout_filehandle.name,
disposition=True)
cutouts = decompose_content_decomposition(disposition_filename)
cutout_filehandle.seek(0)
hdulist = fits.open(cutout_filehandle)
hdulist.verify()
logger.debug("Initial Length of HDUList: {}".format(len(hdulist)))
if len(hdulist) == 1:
phdu = fits.PrimaryHDU()
phdu.header[] = "OSSOS"
hdulist.insert(0, phdu)
logger.debug("Final Length of HDUList: {}".format(len(hdulist)))
if len(cutouts) != len(hdulist) - 1:
raise ValueError("Wrong number of cutout structures found in Content-Disposition response.")
for hdu in hdulist[1:]:
cutout = cutouts.pop(0)
if not in hdu.header:
print("WARNING: ******* NO ASTLEVEL KEYWORD ********** for {0} ********".format(observation.get_image_uri))
hdu.header[] = 0
hdu.header[] = cutout[0]
naxis1 = hdu.header[]
naxis2 = hdu.header[]
default_datasec = "[{}:{},{}:{}]".format(1, naxis1, 1, naxis2)
datasec = hdu.header.get(, default_datasec)
datasec = datasec_to_list(datasec)
corners = datasec
for idx in range(len(corners)):
try:
corners[idx] = int(cutout[idx+1])
except Exception:
pass
hdu.header[] = reset_datasec("[{}:{},{}:{}]".format(corners[0],
corners[1],
corners[2],
corners[3]),
hdu.header.get(, default_datasec),
hdu.header[],
hdu.header[])
hdu.header[] = int(corners[0]) - 1
hdu.header[] = int(corners[2]) - 1
hdu.converter = CoordinateConverter(hdu.header[], hdu.header[])
try:
hdu.wcs = WCS(hdu.header)
except Exception as ex:
logger.error("Failed trying to initialize the WCS for {}".format(uri))
raise ex
logger.debug("Sending back {}".format(hdulist))
return hdulist | Get a cutout from an exposure based on the RA/DEC location.
@param observation: The Observation object that contains the expusre number information.
@type observation: Observation
@param sky_coord: which RA/DEC is needed,
@type sky_coord: SkyCoord
@param radius:
@type radius: Quantity
@return: HDUList containing the cutout image.
@rtype: list(HDUList) |
def __get_keys(self, name=, passphrase=None):
path = os.path.join(self.opts[],
name + )
if not os.path.exists(path):
log.info(, name, self.opts[])
gen_keys(self.opts[],
name,
self.opts[],
self.opts.get(),
passphrase)
if HAS_M2:
key_error = RSA.RSAError
else:
key_error = ValueError
try:
key = get_rsa_key(path, passphrase)
except key_error as e:
message = .format(path)
log.error(message)
raise MasterExit(message)
log.debug(, name, path)
return key | Returns a key object for a key in the pki-dir |
def almost_hermitian(gate: Gate) -> bool:
return np.allclose(asarray(gate.asoperator()),
asarray(gate.H.asoperator())) | Return true if gate tensor is (almost) Hermitian |
def cupy_wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
args = list(args)
for n, a in enumerate(args):
if isinstance(a, np.ndarray):
args[n] = cp.asarray(a)
for k, v in kwargs.items():
if isinstance(v, np.ndarray):
kwargs[k] = cp.asarray(v)
rtn = func(*args, **kwargs)
if isinstance(rtn, (list, tuple)):
for n, a in enumerate(rtn):
if isinstance(a, cp.core.core.ndarray):
rtn[n] = cp.asnumpy(a)
else:
if isinstance(rtn, cp.core.core.ndarray):
rtn = cp.asnumpy(rtn)
return rtn
return wrapped | A wrapper function that converts numpy ndarray arguments to cupy
arrays, and convert any cupy arrays returned by the wrapped
function into numpy ndarrays. |
def setall(self, key, values):
key = self._conform_key(key)
values = [self._conform_value(x) for x in values]
ids = self._key_ids[key][:]
while ids and values:
id = ids.pop(0)
value = values.pop(0)
self._pairs[id] = (key, value)
if ids:
self._key_ids[key] = self._key_ids[key][:-len(ids)]
self._remove_pairs(ids)
for value in values:
self._key_ids[key].append(len(self._pairs))
self._pairs.append((key, value)) | Set more than one value for a given key.
Replaces all the existing values for the given key with new values,
removes extra values that are already set if we don't suply enough,
and appends values to the end if there are not enough existing spots.
>>> m = MutableMultiMap(a=1, b=2, c=3)
>>> m.sort()
>>> m.keys()
['a', 'b', 'c']
>>> m.append(('b', 4))
>>> m.setall('b', [5, 6, 7])
>>> m.allitems()
[('a', 1), ('b', 5), ('c', 3), ('b', 6), ('b', 7)] |
def _next(self):
self.summaries.rotate(-1)
current_summary = self.summaries[0]
self._update_summary(current_summary) | Get the next summary and present it. |
def addContentLen(self, content, len):
libxml2mod.xmlNodeAddContentLen(self._o, content, len) | Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContentLen(), @content is supposed to
be raw text, so unescaped XML special chars are allowed,
entity references are not supported. |
def _cli():
parser = _argparse.ArgumentParser(
description=.format(_VERSION),
epilog=
)
parser.add_argument(
,
help=,
nargs=
)
parser.add_argument(
, ,
help=,
action=
)
parser.add_argument(
, ,
help=,
default=,
metavar=
)
parser.add_argument(
, ,
help=,
default=_NO_ARG,
metavar=
)
parser.add_argument(
, ,
help=,
type=int,
metavar=
)
parser.add_argument(
, ,
help=,
action=
)
parser.add_argument(
, ,
help=(
+
),
action=
)
parser.add_argument(
, ,
help=,
action=
)
parser.add_argument(
,
help=,
action=
)
args = parser.parse_args()
options = args.option
stream = _sys.stdout if args.stdout else _sys.stderr
if args.version:
stream.write(.format(_VERSION))
exit(0)
except KeyboardInterrupt:
_sys.stderr.write("\nCTRL-C detected. Exiting.\n")
_sys.stderr.flush()
except Exception as e:
_sys.stdout.write("ERROR: {}\n".format(e))
exit(1) | CLI interface |
def _select(self, event):
def event_axes_data(event, ax):
point = event.x, event.y
x, y = ax.transData.inverted().transform_point(point)
event = copy.copy(event)
event.xdata, event.ydata = x, y
return event
def contains(artist, event):
if event.canvas is artist.figure.canvas:
return artist.contains(event)
else:
return False, {}
for anno in list(self.annotations.values()):
fixed_event = event_axes_data(event, anno.axes)
if contains(anno, fixed_event)[0]:
if event.button == self.hide_button:
self._hide_box(anno)
elif self.draggable:
return
for artist in self.artists:
fixed_event = event_axes_data(event, artist.axes)
inside, info = contains(artist, fixed_event)
if inside:
fig = artist.figure
new_event = PickEvent(, fig.canvas, fixed_event,
artist, **info)
self(new_event)
break
if self.hover:
artists = itertools.chain(self.artists, self.annotations.values())
over_something = [contains(artist, event)[0] for artist in artists]
if not any(over_something):
self.hide() | This is basically a proxy to trigger a pick event. This function is
connected to either a mouse motion or mouse button event (see
"self.enable") depending on "self.hover". If we're over a point, it
fires a pick event.
This probably seems bizarre, but it's required for hover mode (no mouse
click) and otherwise it's a workaround for picking artists in twinned
or overlapping axes.
Even if we're not in hover mode, pick events won't work properly for
twinned axes. Therefore, we manually go through all artists managed by
this datacursor and fire a pick event if the mouse is over an a managed
artist. |
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
) | This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused |
def run_scalpel(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
if region is None:
message = ("A region must be provided for Scalpel")
raise ValueError(message)
if is_paired_analysis(align_bams, items):
call_file = _run_scalpel_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
call_file = _run_scalpel_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file | Run Scalpel indel calling, either paired tumor/normal or germline calling. |
def call_fset(self, obj, value) -> None:
vars(obj)[self.name] = self.fset(obj, value) | Store the given custom value and call the setter function. |
def delete(self, cascade=False):
if self.id:
self.connection.post(, playlist_id=self.id,
cascade=cascade)
self.id = None | Deletes this playlist. |
def mount_volume(volume, device=, mountpoint=, fstype=):
_ec2().attach_volume(volume, _host_node()[], device)
time.sleep(1)
sudo( % mountpoint)
sudo( % (fstype, device, mountpoint)) | Mount an EBS volume
Args:
volume (str): EBS volume ID
device (str): default /dev/xvdf
mountpoint (str): default /mnt/data
fstype (str): default ext4 |
def get_or_create_by_title(self, title):
try:
obj = self.get_by_title(title)
created = False
except DoesNotExistError:
obj = self.create(title=title)
created = True
return obj, created | Fetch a title, if it exists. Create it if it doesn't.
Returns a tuple with the object first, and then a boolean that
indicates whether or not the object was created fresh. True means it's
brand new. |
def find_hist2d_offset(filename, reference, refwcs=None, refnames=[, ],
match_tolerance=5., chip_catalog=True, search_radius=15.0,
min_match=10, classify=True):
if isinstance(filename, str):
image = pf.open(filename)
rootname = filename.split("_")[0]
else:
image = filename
rootname = image[0].header[]
if not os.path.exists(reference):
log.info("Could not find input reference catalog: {}".format(reference))
raise FileNotFoundError
if refwcs is None:
refwcs = build_self_reference(image, clean_wcs=True)
log.info("Computing offset for field-of-view defined by:")
log.info(refwcs)
if isinstance(reference, str):
refcat = ascii.read(reference)
else:
refcat = reference
log.info("\nRead in reference catalog with {} sources.".format(len(refcat)))
ref_ra = refcat[refnames[0]]
ref_dec = refcat[refnames[1]]
img_cat = generate_source_catalog(image, refwcs, output=chip_catalog, classify=classify)
img_cat.write(filename.replace(".fits", "_xy.cat"), format=,
overwrite=True)
seg_xy = np.column_stack((img_cat[], img_cat[]))
seg_xy = seg_xy[~np.isnan(seg_xy[:, 0])]
xref, yref = refwcs.all_world2pix(ref_ra, ref_dec, 1)
xref, yref = within_footprint(image, refwcs, xref, yref)
ref_xy = np.column_stack((xref, yref))
log.info("\nWorking with {} astrometric sources for this field".format(len(ref_xy)))
ref_ra_img, ref_dec_img = refwcs.all_pix2world(xref, yref, 1)
ref_tab = Table([ref_ra_img, ref_dec_img, xref, yref], names=[, , , ])
ref_tab.write(reference.replace(, .format(rootname)),
format=, overwrite=True)
searchrad = search_radius / refwcs.pscale
xp, yp, nmatches, zpqual = build_xy_zeropoint(seg_xy, ref_xy,
searchrad=searchrad,
histplot=False, figure_id=1,
plotname=None, interactive=False)
hist2d_offset = (xp, yp)
log.info(.format(hist2d_offset, nmatches))
return hist2d_offset, seg_xy, ref_xy | Iteratively look for the best cross-match between the catalog and ref.
Parameters
----------
filename : `~astropy.io.fits.HDUList` or str
Single image to extract sources for matching to
the external astrometric catalog.
reference : str or `~astropy.table.Table`
Reference catalog, either as a filename or ``astropy.Table``
containing astrometrically accurate sky coordinates for astrometric
standard sources.
refwcs : `~stwcs.wcsutil.HSTWCS`
This WCS will define the coordinate frame which will
be used to determine the offset. If None is specified, use the
WCS from the input image `filename` to build this WCS using
`build_self_reference()`.
refnames : list
List of table column names for sky coordinates of astrometric
standard sources from reference catalog.
match_tolerance : float
Tolerance (in pixels) for recognizing that a source position matches
an astrometric catalog position. Larger values allow for lower
accuracy source positions to be compared to astrometric catalog
chip_catalog : bool
Specify whether or not to write out individual source catalog for
each chip in the image.
search_radius : float
Maximum separation (in arcseconds) from source positions to look
for valid cross-matches with reference source positions.
min_match : int
Minimum number of cross-matches for an acceptable determination of
the offset.
classify : bool
Specify whether or not to use central_moments classification to
ignore likely cosmic-rays/bad-pixels when generating the source
catalog.
Returns
-------
best_offset : tuple
Offset in input image pixels between image source positions and
astrometric catalog positions that results in largest number of
matches of astrometric sources with image sources
seg_xy, ref_xy : astropy.Table
Source catalog and reference catalog, respectively, used for
determining the offset. Each catalog includes sources for the entire
field-of-view, not just a single chip. |
def add(self, match, handler):
self.routes.append((match, (
Route(handler) if not isinstance(handler, Route)
else handler
))) | Register a handler with the Router.
:param match: The first argument passed to the :meth:`match` method
when checking against this handler.
:param handler: A callable or :class:`Route` instance that will handle
matching calls. If not a Route instance, will be wrapped in one. |
def derived_from_all(self, identities: List[QualName]) -> MutableSet[QualName]:
if not identities:
return set()
res = self.derived_from(identities[0])
for id in identities[1:]:
res &= self.derived_from(id)
return res | Return list of identities transitively derived from all `identity`. |
def grasstruth(args):
p = OptionParser(grasstruth.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
james, = args
fp = open(james)
pairs = set()
for row in fp:
atoms = row.split()
genes = []
idx = {}
for i, a in enumerate(atoms):
aa = a.split("||")
for ma in aa:
idx[ma] = i
genes.extend(aa)
genes = [x for x in genes if ":" not in x]
Os = [x for x in genes if x.startswith("Os")]
for o in Os:
for g in genes:
if idx[o] == idx[g]:
continue
pairs.add(tuple(sorted((o, g))))
for a, b in sorted(pairs):
print("\t".join((a, b))) | %prog grasstruth james-pan-grass.txt
Prepare truth pairs for 4 grasses. |
def content_list(self, key, model):
path = PROVISION_MANAGE_CONTENT + model +
return self._request(path, key, , , self._manage_by_cik) | Returns the list of content IDs for a given model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#get---list-content-ids
Args:
key: The CIK or Token for the device
model: |
def exerciseOptions(self, id, contract, exerciseAction, exerciseQuantity, account, override):
return _swigibpy.EClient_exerciseOptions(self, id, contract, exerciseAction, exerciseQuantity, account, override) | exerciseOptions(EClient self, TickerId id, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override) |
def execute(self, fragment, pretty_format=True):
self.fragments = (self.fragments + "\n" + fragment).lstrip()
try:
line_parser.parseString(self.fragments)
except ParseException:
pass
else:
self.last_query = self.fragments.strip()
self.fragments = ""
return super(FragmentEngine, self).execute(self.last_query, pretty_format)
return None | Run or aggregate a query fragment
Concat the fragment to any stored fragments. If they form a complete
query, run it and return the result. If not, store them and return
None. |
def startup_walk_for_missed_files(self):
for site in self.config["backup_sites"]:
compressed_xlog_path, _ = self.create_backup_site_paths(site)
uncompressed_xlog_path = compressed_xlog_path + "_incoming"
for filename in os.listdir(uncompressed_xlog_path):
full_path = os.path.join(uncompressed_xlog_path, filename)
if not wal.WAL_RE.match(filename) and not wal.TIMELINE_RE.match(filename):
self.log.warning("Found invalid file %r from incoming xlog directory", full_path)
continue
compression_event = {
"delete_file_after_compression": True,
"full_path": full_path,
"site": site,
"src_path": "{}.partial",
"type": "MOVE",
}
self.log.debug("Found: %r when starting up, adding to compression queue", compression_event)
self.compression_queue.put(compression_event)
full_path = os.path.join(compressed_xlog_path, filename)
metadata_path = full_path + ".metadata"
is_xlog = wal.WAL_RE.match(filename)
is_timeline = wal.TIMELINE_RE.match(filename)
if not ((is_xlog or is_timeline) and os.path.exists(metadata_path)):
self.log.warning("Found invalid file %r from compressed xlog directory", full_path)
continue
with open(metadata_path, "r") as fp:
metadata = json.load(fp)
transfer_event = {
"file_size": os.path.getsize(full_path),
"filetype": "xlog" if is_xlog else "timeline",
"local_path": full_path,
"metadata": metadata,
"site": site,
"type": "UPLOAD",
}
self.log.debug("Found: %r when starting up, adding to transfer queue", transfer_event)
self.transfer_queue.put(transfer_event) | Check xlog and xlog_incoming directories for files that receivexlog has received but not yet
compressed as well as the files we have compressed but not yet uploaded and process them. |
def check_subprocess(cmd, source, outname):
logger = logging.getLogger(__name__)
try:
res = subprocess.run(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except KeyboardInterrupt:
logger.debug(, outname)
if os.path.isfile(outname):
os.remove(outname)
raise
if res.returncode:
logger.debug(, res.stdout.decode())
logger.debug(, res.stderr.decode())
if os.path.isfile(outname):
logger.debug(, outname)
os.remove(outname)
raise SubprocessException( + source) | Run the command to resize the video and remove the output file if the
processing fails. |
def load_labware(
self, labware_obj: Labware,
location: types.DeckLocation) -> Labware:
self._deck_layout[location] = labware_obj
return labware_obj | Specify the presence of a piece of labware on the OT2 deck.
This function loads the labware specified by `labware`
(previously loaded from a configuration file) to the location
specified by `location`.
:param Labware labware: The labware object to load
:param location: The slot into which to load the labware such as
1 or '1'
:type location: int or str |
def data(offset, bytes):
assert 0 <= offset < 65536
assert 0 < len(bytes) < 256
b = [len(bytes), (offset>>8)&0x0FF, offset&0x0FF, 0x00] + bytes
return Record._from_bytes(b) | Return Data record. This constructs the full record, including
the length information, the record type (0x00), the
checksum, and the offset.
@param offset load offset of first byte.
@param bytes list of byte values to pack into record.
@return String representation of one HEX record |
def show_menu(self, status_icon, button, activate_time):
menu = self.get_widget()
menu.popup(None, None, None, Gtk.StatusIcon.position_menu, button, activate_time) | Show the tray icon menu. |
def draw_hydrogen_bonds(self,color="black"):
self.draw_hbonds=""
if self.hbonds!=None:
for bond in self.hbonds.hbonds_for_drawing:
x = str((self.molecule.x_dim-self.molecule.molsize1)/2)
y = str((self.molecule.y_dim-self.molecule.molsize2)/2)
self.draw_hbonds ="<g id= class= transform= x= y=>"+str(bond[0])+"HBonds"+str(int(self.molecule.nearest_points_coords[residue][0]))+""+str(int(self.molecule.nearest_points_coords[residue][1]))+""+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+""+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"stroke:white;stroke-width:15"+str(bond[0])+"HBonds"+str(int(self.molecule.nearest_points_coords[residue][0]))+""+str(int(self.molecule.nearest_points_coords[residue][1]))+""+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+""+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"stroke:"+color+";stroke-width:4"+str(bond[0])+"HBonds"+str(int(self.molecule.nearest_points_coords[residue][0]))+""+str(int(self.molecule.nearest_points_coords[residue][1]))+""+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+""+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"stroke:white;stroke-width:15"+str(bond[0])+"HBonds5,5"+str(int(self.molecule.nearest_points_coords[residue][0]))+""+str(int(self.molecule.nearest_points_coords[residue][1]))+""+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+""+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"stroke:"+color+";stroke-width:4' />"
self.draw_hbonds+="</g>" | For each bond that has been determined to be important, a line gets drawn. |
def vrrp_config(app, interface, config):
config_request = vrrp_event.EventVRRPConfigRequest(interface, config)
config_request.sync = True
return app.send_request(config_request) | create an instance.
returns EventVRRPConfigReply(instance.name, interface, config)
on success.
returns EventVRRPConfigReply(None, interface, config)
on failure. |
def field_type_schema(
field: Field,
*,
by_alias: bool,
model_name_map: Dict[Type[], str],
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
definitions = {}
ref_prefix = ref_prefix or default_prefix
if field.shape is Shape.LIST:
f_schema, f_definitions = field_singleton_schema(
field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(f_definitions)
return {: , : f_schema}, definitions
elif field.shape is Shape.SET:
f_schema, f_definitions = field_singleton_schema(
field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(f_definitions)
return {: , : True, : f_schema}, definitions
elif field.shape is Shape.MAPPING:
dict_schema: Dict[str, Any] = {: }
key_field = cast(Field, field.key_field)
regex = getattr(key_field.type_, , None)
f_schema, f_definitions = field_singleton_schema(
field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(f_definitions)
if regex:
dict_schema[] = {regex.pattern: f_schema}
elif f_schema:
dict_schema[] = f_schema
return dict_schema, definitions
elif field.shape is Shape.TUPLE:
sub_schema = []
sub_fields = cast(List[Field], field.sub_fields)
for sf in sub_fields:
sf_schema, sf_definitions = field_type_schema(
sf, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(sf_definitions)
sub_schema.append(sf_schema)
if len(sub_schema) == 1:
sub_schema = sub_schema[0]
return {: , : sub_schema}, definitions
else:
assert field.shape is Shape.SINGLETON, field.shape
f_schema, f_definitions = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
)
definitions.update(f_definitions)
return f_schema, definitions | Used by ``field_schema()``, you probably should be using that function.
Take a single ``field`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models. |
def _delete(self, url, data, scope):
self._create_session(scope)
response = self.session.delete(url, data=data)
return response.status_code, response.text | Make a DELETE request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a DELETE request to.
data (str): The json encoded payload to DELETE.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE` |
def init_app(self, app):
self.init_config(app)
app.cli.add_command(cmd)
app.extensions[] = self
app.jinja_env.add_extension()
self.register_signals(app) | Flask application initialization. |
def _step_decorator_args(self, decorator):
args = decorator.children[3:-2]
step = None
if len(args) == 1:
try:
step = ast.literal_eval(args[0].get_code())
except (ValueError, SyntaxError):
pass
if isinstance(step, six.string_types+(list,)):
return step
logging.error("Decorator step accepts either a string or a list of strings - %s:%d",
self.file_path, decorator.start_pos[0])
else:
logging.error("Decorator step accepts only one argument - %s:%d",
self.file_path, decorator.start_pos[0]) | Get the arguments passed to step decorators
converted to python objects. |
def hex_from(val):
if isinstance(val, integer_types):
hex_str = % val
if len(hex_str) % 2:
hex_str = + hex_str
return hex_str
return hexlify(val) | Returns hex string representation for a given value.
:param bytes|str|unicode|int|long val:
:rtype: bytes|str |
def instantiate(self, params, auth=None):
try:
endpoint = (self.Meta.collection_endpoint
+ "{template_id}"). \
format(template_id=self.template_id)
if (auth is not None):
endpoint = (endpoint + "?auth_token={auth_token}"). \
format(auth_token=auth)
self.send(endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e) | Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:param auth: The auth client
:type params: dict
:type auth: :class:`carto.auth.APIKeyAuthClient`
:return:
:raise: CartoException |
def require(self, key: str) -> str:
v = self.get(key)
if v is None:
raise ConfigMissingError(self.full_key(key))
return v | Returns a configuration value by its given key. If it doesn't exist, an error is thrown.
:param str key: The requested configuration key.
:return: The configuration key's value.
:rtype: str
:raises ConfigMissingError: The configuration value did not exist. |
def _handle_no_candidates(self):
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment) | If we fail to find a good candidate we need to find something else. |
def run(self):
device = self.model_config.torch_device()
learner = Learner(device, self.model_factory.instantiate())
callbacks = self.gather_callbacks()
metrics = learner.metrics()
training_info, hidden_state = self.resume_training(learner, callbacks, metrics)
current_phase_idx = self._select_phase_left_bound(training_info.start_epoch_idx)
current_phase = self.phases[current_phase_idx]
local_idx = training_info.start_epoch_idx - self.ladder[current_phase_idx]
current_phase.set_up_phase(training_info, learner.model, self.source)
print(current_phase.banner())
if training_info.start_epoch_idx > 0:
current_phase.restore(training_info, local_idx, learner.model, hidden_state)
training_info.on_train_begin()
for global_epoch_idx in range(training_info.start_epoch_idx + 1, self.full_number_of_epochs + 1):
iteration_phase_idx = self._select_phase_right_bound(global_epoch_idx-1)
local_idx = global_epoch_idx - self.ladder[iteration_phase_idx]
while current_phase_idx != iteration_phase_idx:
current_phase.tear_down_phase(training_info, learner.model)
current_phase_idx += 1
current_phase = self.phases[current_phase_idx]
current_phase.set_up_phase(training_info, learner.model, self.source)
print(current_phase.banner())
epoch_info = current_phase.epoch_info(training_info, global_epoch_idx, local_idx)
current_phase.execute_epoch(epoch_info, learner)
self.storage.checkpoint(epoch_info, learner.model)
if current_phase is not None:
current_phase.tear_down_phase(training_info, learner.model)
training_info.on_train_end()
return training_info | Run the command with supplied configuration |
def cmap(rgbin, N=256):
if not isinstance(rgbin[0], _string_types):
if rgbin.max() > 1:
rgbin = rgbin/256.
cmap = mpl.colors.LinearSegmentedColormap.from_list(, rgbin, N=N)
return cmap | Input an array of rgb values to generate a colormap.
:param rgbin: An [mx3] array, where m is the number of input color triplets which
are interpolated between to make the colormap that is returned. hex values
can be input instead, as [mx1] in single quotes with a #.
:param N=10: The number of levels to be interpolated to. |
def add_io_hook(self, hook):
def proxy(*args):
hook(*args)
self._io_hooks.append(proxy)
return self.HookRemover(lambda: self._io_hooks.remove(proxy)) | Args:
hook: This hook will be invoked for every incoming and outgoing CAN frame.
Hook arguments: (direction, frame)
See FRAME_DIRECTION_*, CANFrame. |
def to_dataset(self, dim=None, name=None):
if dim is not None and dim not in self.dims:
warnings.warn(
,
FutureWarning, stacklevel=2)
name = dim
dim = None
if dim is not None:
if name is not None:
raise TypeError()
return self._to_dataset_split(dim)
else:
return self._to_dataset_whole(name) | Convert a DataArray to a Dataset.
Parameters
----------
dim : str, optional
Name of the dimension on this array along which to split this array
into separate variables. If not provided, this array is converted
into a Dataset of one variable.
name : str, optional
Name to substitute for this array's name. Only valid if ``dim`` is
not provided.
Returns
-------
dataset : Dataset |
def is_broker_action_done(action, rid=None, unit=None):
rdata = relation_get(rid, unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key())
if not broker_rsp:
return False
rsp = CephBrokerRsp(broker_rsp)
unit_name = local_unit().partition()[2]
key = "unit_{}_ceph_broker_action.{}".format(unit_name, action)
kvstore = kv()
val = kvstore.get(key=key)
if val and val == rsp.request_id:
return True
return False | Check whether broker action has completed yet.
@param action: name of action to be performed
@returns True if action complete otherwise False |
def from_base62(s):
result = 0
for c in s:
if c not in BASE62_MAP:
raise Exception( % s)
result = result * 62 + BASE62_MAP.index(c)
return result | Convert a base62 String back into a number
:param s: The base62 encoded String
:return: The number encoded in the String (integer) |
def simxAuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode):
return c_AuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode) | Please have a look at the function description/documentation in the V-REP user manual |
def trunk_angles(nrn, neurite_type=NeuriteType.all):
vectors = trunk_vectors(nrn, neurite_type=neurite_type)
if not vectors.size:
return []
def _sort_angle(p1, p2):
ang1 = np.arctan2(*p1[::-1])
ang2 = np.arctan2(*p2[::-1])
return (ang1 - ang2)
order = np.argsort(np.array([_sort_angle(i / np.linalg.norm(i), [0, 1])
for i in vectors[:, 0:2]]))
ordered_vectors = vectors[order][:, [COLS.X, COLS.Y]]
return [morphmath.angle_between_vectors(ordered_vectors[i], ordered_vectors[i - 1])
for i, _ in enumerate(ordered_vectors)] | Calculates the angles between all the trunks of the neuron.
The angles are defined on the x-y plane and the trees
are sorted from the y axis and anticlock-wise. |
def operations(*operations):
def decorator(method):
def wrapper(cls, request, start_response, **kwargs):
result_cache = []
try:
yield from method(cls, request, **kwargs)
except Respond as e:
status = e.status
msg = utils.parse_return_annotation(method)[status][]
if status / 100 == 2:
e.description = msg
raise e
else:
raise CODES_TO_EXCEPTIONS[status](msg)
method.swagger_ops = operations
method.signature = inspect.signature(method)
method.source = inspect.getsource(method)
method.path_vars = utils.extract_pathvars(method)
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
wrapper.__annotations__ = method.__annotations__
wrapper.swagger_ops = method.swagger_ops
wrapper.signature = method.signature
wrapper.source = method.source
wrapper.path_vars = method.path_vars
return classmethod(wrapper)
return decorator | Decorator for marking Resource methods as HTTP operations.
This decorator does a number of different things:
- It transfer onto itself docstring and annotations from the decorated
method, so as to be "transparent" with regards to introspection.
- It tranform the method so as to make it a classmethod.
- It invokes the method within a try-except condition, so as to
intercept and populate the Fail(<code>) conditions. |
def permission_required(perm, fn=None, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME):
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if isinstance(perm, six.string_types):
perms = (perm,)
else:
perms = perm
if callable(fn):
obj = fn(request, *args, **kwargs)
else:
obj = fn
user = request.user
if not user.has_perms(perms, obj):
if raise_exception:
raise PermissionDenied()
else:
return _redirect_to_login(request, view_func.__name__,
login_url, redirect_field_name)
else:
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator | View decorator that checks for the given permissions before allowing the
view to execute. Use it like this::
from django.shortcuts import get_object_or_404
from rules.contrib.views import permission_required
from posts.models import Post
def get_post_by_pk(request, post_id):
return get_object_or_404(Post, pk=post_id)
@permission_required('posts.change_post', fn=get_post_by_pk)
def post_update(request, post_id):
# ...
``perm`` is either a permission name as a string, or a list of permission
names.
``fn`` is an optional callback that receives the same arguments as those
passed to the decorated view and must return the object to check
permissions against. If omitted, the decorator behaves just like Django's
``permission_required`` decorator, i.e. checks for model-level permissions.
``raise_exception`` is a boolean specifying whether to raise a
``django.core.exceptions.PermissionDenied`` exception if the check fails.
You will most likely want to set this argument to ``True`` if you have
specified a custom 403 response handler in your urlconf. If ``False``,
the user will be redirected to the URL specified by ``login_url``.
``login_url`` is an optional custom URL to redirect the user to if
permissions check fails. If omitted or empty, ``settings.LOGIN_URL`` is
used. |
def get_id_head(self):
id_head = None
for target_node in self:
if target_node.is_head():
id_head = target_node.get_id()
break
return id_head | Returns the id of the target that is set as "head"
@rtype: string
@return: the target id (or None) of the head target |
def parse_parameters(self, parameters):
self.parameters = []
for param_name, param_value in parameters.items():
p = Parameter(param_name, param_value)
if p:
self.parameters.append(p) | Parses and sets parameters in the model. |
def cmode(self, channel, modes=):
with self.lock:
self.is_in_channel(channel)
if not modes:
self.send( % channel)
modes =
mode_set_time = None
while self.readable():
msg = self._recv(rm_colon=True, \
expected_replies=(, ))
if msg[0] == :
modes = msg[2].split()[1].replace(, , 1)
elif msg[0] == :
mode_set_time = self._m_time.localtime( \
int(msg[2].split()[1]))
return modes, mode_set_time
else:
self.send( % (channel, modes))
if self.readable():
msg = self._recv(expected_replies=(,), \
ignore_unexpected_replies=True)
if msg[0]:
mode = msg[2]
self.parse_cmode_string(mode, msg[1])
if not self.hide_called_events:
self.stepback() | Sets or gets the channel mode.
Required arguments:
* channel - Channel to set/get modes of.
Optional arguments:
* modes='' - Modes to set.
If not specified return the modes of the channel. |
def kwargs_only(func):
if hasattr(inspect, ):
signature = inspect.signature(func)
first_arg_name = list(signature.parameters.keys())[0]
else:
signature = inspect.getargspec(func)
first_arg_name = signature.args[0]
if first_arg_name in (, ):
allowable_args = 1
else:
allowable_args = 0
@wraps(func)
def wrapper(*args, **kwargs):
if len(args) > allowable_args:
raise TypeError("{} should only be called with keyword args".format(func.__name__))
return func(*args, **kwargs)
return wrapper | Make a function only accept keyword arguments.
This can be dropped in Python 3 in lieu of:
def foo(*, bar=default): |
def _make_module_refnode(self, builder, fromdocname, name, contnode):
docname, synopsis, platform, deprecated = self.data[][name]
title = name
if synopsis:
title += + synopsis
if deprecated:
title += _()
if platform:
title += + platform +
return make_refnode(builder, fromdocname, docname,
+ name, contnode, title) | Helper function to generate new xref node based on
current environment. |
def _build_pyramid(self, image, levels):
pyramid = [image]
for l in range(levels-1):
if any(x < 20 for x in pyramid[-1].shape[:2]):
break
pyramid.append(cv2.pyrDown(pyramid[-1]))
return list(reversed(pyramid)) | Returns a list of reduced-size images, from smallest to original size |
def make_plot(self):
plot = Plot(figsize=self.figsize, dpi=self.dpi)
ax = plot.gca(xscale=)
if self.args.legend:
nlegargs = len(self.args.legend[0])
else:
nlegargs = 0
if nlegargs > 0 and nlegargs != self.n_datasets:
warnings.warn(
.format(
len(self.timeseries), len(self.args.legend)))
nlegargs = 0
for i in range(0, self.n_datasets):
series = self.timeseries[i]
if nlegargs:
label = self.args.legend[0][i]
else:
label = series.channel.name
if self.usetex:
label = label_to_latex(label)
ax.plot(series, label=label)
return plot | Generate the plot from time series and arguments |
def update_hosting_device_status(self, context, host, status_info):
for status, hd_ids in six.iteritems(status_info):
hd_spec = {: {: status}}
for hd_id in hd_ids:
self._dmplugin.update_hosting_device(context, hd_id, hd_spec)
if status == const.HD_DEAD or status == const.HD_ERROR:
self._dmplugin.handle_non_responding_hosting_devices(
context, host, hd_ids) | Report status changes for hosting devices.
:param context: contains user information
:param host: originator of callback
:param status_info: Dictionary with list of hosting device ids for
each type of hosting device status to be updated i.e.::
{
HD_ACTIVE: list_of_ids_of_active_hds,
HD_NOT_RESPONDING: list_of_ids_of_not_responding_hds,
HD_DEAD: list_of_ids_of_dead_hds,
...
} |
def _send_command(self, command, raw_text=False):
return self.device.show(command, raw_text=raw_text) | Wrapper for NX-API show method.
Allows more code sharing between NX-API and SSH. |
def reset(self):
with tf.name_scope(self._name + ):
return tf.group(
self._count.assign(0),
self._mean.assign(tf.zeros_like(self._mean)),
self._var_sum.assign(tf.zeros_like(self._var_sum))) | Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation. |
def resolves_for(self, session):
if self.url:
self.actual_path = session.current_url
else:
result = urlparse(session.current_url)
if self.only_path:
self.actual_path = result.path
else:
request_uri = result.path
if result.query:
request_uri += "?{0}".format(result.query)
self.actual_path = request_uri
if isregex(self.expected_path):
return self.expected_path.search(self.actual_path)
else:
return normalize_url(self.actual_path) == normalize_url(self.expected_path) | Returns whether this query resolves for the given session.
Args:
session (Session): The session for which this query should be executed.
Returns:
bool: Whether this query resolves. |
def _move_agent(self, agent, direction, wrap_allowed=True):
x,y = agent.coords[], agent.coords[]
print(, agent.name, , direction, , wrap_allowed)
agent.coords[] = x + direction[0]
agent.coords[] = y + direction[1] | moves agent 'agent' in 'direction' |
def peer_store(key, value, relation_name=):
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
relation_set(relation_id=cluster_rid,
relation_settings={key: value})
else:
raise ValueError(
.format(relation_name)) | Store the key/value pair on the named peer relation `relation_name`. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.