code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def alpha(self, x, y, kwargs, k=None):
return self.lens_model.alpha(x, y, kwargs, k=k) | deflection angles
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: deflection angles in units of arcsec |
def microsoft(self, key, x86=False):
node64 = if self.pi.current_is_x86() or x86 else
return os.path.join(, node64, , key) | Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: str
Force x86 software registry.
Return
------
str: value |
def get(self, wheel=False):
try:
url = get_url(self.client, self.name, self.version,
wheel, hashed_format=True)[0]
except exceptions.MissingUrlException as e:
raise SystemExit(e)
if wheel:
self.temp_dir = tempfile.mkdtemp()
save_dir = self.temp_dir
else:
save_dir = self.save_dir
save_file = .format(save_dir, url.split()[-1])
request.urlretrieve(url, save_file)
logger.info(.format(save_file))
return save_file | Downloads the package from PyPI.
Returns:
Full path of the downloaded file.
Raises:
PermissionError if the save_dir is not writable. |
def check_engine(handle):
if handle == :
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print( % (handle,), file=sys.stderr)
sys.exit(1) | Check availability of requested template engine. |
def get_model(client, model_id):
from google.cloud import bigquery
model = client.get_model(model_id)
full_model_id = "{}.{}.{}".format(model.project, model.dataset_id, model.model_id)
friendly_name = model.friendly_name
print(
"Got model with friendly_name .".format(full_model_id, friendly_name)
) | Sample ID: go/samples-tracker/1510 |
def _download_sdss_image(
self):
self.log.info()
opt = ""
if self.grid:
opt += "G"
if self.label:
opt += "L"
if self.photocat:
opt += "P"
if self.speccat:
opt += "S"
if self.invertColors:
opt += "I"
if len(opt):
opt = "opt=%(opt)s&" % locals()
width = self.pixelWidth
scale = (self.arcminWidth * 60.) / width
converter = unit_conversion(
log=self.log
)
ra = converter.ra_sexegesimal_to_decimal(
ra=self.ra
)
dec = converter.dec_sexegesimal_to_decimal(
dec=self.dec
)
url = % locals(
)
from fundamentals.download import multiobject_download
localUrls = multiobject_download(
urlList=[url],
downloadDirectory=self.downloadDirectory,
log=self.log,
timeStamp=False,
timeout=180,
concurrentDownloads=10,
resetFilename=[self.filename],
credentials=False,
longTime=True,
indexFilenames=False
)
print url
self.log.info()
return None | *download sdss image* |
def get(self, requirement):
filename = self.generate_filename(requirement)
for backend in list(self.backends):
try:
pathname = backend.get(filename)
if pathname is not None:
return pathname
except CacheBackendDisabledError as e:
logger.debug("Disabling %s because it requires configuration: %s", backend, e)
self.backends.remove(backend)
except Exception as e:
logger.exception("Disabling %s because it failed: %s", backend, e)
self.backends.remove(backend) | Get a distribution archive from any of the available caches.
:param requirement: A :class:`.Requirement` object.
:returns: The absolute pathname of a local file or :data:`None` when the
distribution archive is missing from all available caches. |
def _find_files(self):
files = []
for ext in self.extensions:
ext_files = util.find_files(self.root, "*" + ext)
log.debug("found {} files in ".format(
len(ext_files), ext, self.root)
)
files.extend(ext_files)
return files | Find files recursively in the root path
using provided extensions.
:return: list of absolute file paths |
def get_enumerations_from_bit_mask(enumeration, mask):
return [x for x in enumeration if (x.value & mask) == x.value] | A utility function that creates a list of enumeration values from a bit
mask for a specific mask enumeration class.
Args:
enumeration (class): The enumeration class from which to draw
enumeration values.
mask (int): The bit mask from which to identify enumeration values.
Returns:
list: A list of enumeration values corresponding to the bit mask. |
def create(self, fullname, shortname, category_id, **kwargs):
allowed_options = [, ,
, ,
, ,
, ,
, ,
, ,
, ,
, ,
]
if valid_options(kwargs, allowed_options):
option_params = {}
for index, key in enumerate(kwargs):
val = kwargs.get(key)
if isinstance(val, bool):
val = int(val)
option_params.update({ + key + : val})
params = {: ,
: fullname,
: shortname,
: category_id}
params.update(option_params)
params.update(self.request_params)
return requests.post(self.api_url, params=params, verify=False) | Create a new course
:param string fullname: The course's fullname
:param string shortname: The course's shortname
:param int category_id: The course's category
:keyword string idnumber: (optional) Course ID number. \
Yes, it's a string, blame Moodle.
:keyword int summaryformat: (optional) Defaults to 1 (HTML). \
Summary format options: (1 = HTML, 0 = Moodle, 2 = Plain, \
or 4 = Markdown)
:keyword string format: (optional) Defaults to "topics"
Topic options: (weeks, topics, social, site)
:keyword bool showgrades: (optional) Defaults to True. \
Determines if grades are shown
:keyword int newsitems: (optional) Defaults to 5. \
Number of recent items appearing on the course page
:keyword bool startdate: (optional) Timestamp when the course start
:keyword int maxbytes: (optional) Defaults to 83886080. \
Largest size of file that can be uploaded into the course
:keyword bool showreports: Default to True. Are activity report shown?
:keyword bool visible: (optional) Determines if course is \
visible to students
:keyword int groupmode: (optional) Defaults to 2.
options: (0 = no group, 1 = separate, 2 = visible)
:keyword bool groupmodeforce: (optional) Defaults to False. \
Force group mode
:keyword int defaultgroupingid: (optional) Defaults to 0. \
Default grouping id
:keyword bool enablecompletion: (optional) Enable control via \
completion in activity settings.
:keyword bool completionstartonenrol: (optional) \
Begin tracking a student's progress in course completion after
:keyword bool completionnotify: (optional) Default? Dunno. \
Presumably notifies course completion
:keyword string lang: (optional) Force course language.
:keyword string forcetheme: (optional) Name of the force theme
Example Usage::
>>> import muddle
>>> muddle.course().create('a new course', 'new-course', 20) |
def _extract_packages(self):
if not hasattr(self, "retrieved_packages_unpacked"):
self.retrieved_packages_unpacked = [self.package_name]
for path in self.retrieved_packages_unpacked:
package_name = basename(path)
self.path_unpacked = join(CFG_UNPACKED_FILES,
package_name.split()[0])
self.logger.debug("Extracting package: %s"
% (path.split("/")[-1],))
try:
if "_archival_pdf" in self.path_unpacked:
self.path_unpacked = (self.path_unpacked
.rstrip("_archival_pdf"))
ZipFile(path).extractall(join(self.path_unpacked,
"archival_pdfs"))
else:
ZipFile(path).extractall(self.path_unpacked)
except Exception:
register_exception(alert_admin=True,
prefix="OUP error extracting package.")
self.logger.error("Error extraction package file: %s"
% (path,))
if hasattr(self, "path_unpacked"):
return self.path_unpacked | Extract a package in a new directory. |
def do_commits(self):
git_clone_command = "git clone " + str(self.git_repo_url)
subprocess.call(git_clone_command, shell=True)
subprocess.check_call(
[, ], cwd=self.repository_name)
self.append_onto_file(self.repository_name+"/gitHeart.txt", HEADER)
subprocess.check_call(
[, , ], cwd=self.repository_name)
subprocess.check_call(
[, , , ], cwd=self.repository_name)
for commit_number in range(1, len(MARKED_DAYS)*self.max_commits+1):
heart_msg = HEART.format(commit_number=str(commit_number))
self.append_onto_file(
self.repository_name+"/gitHeart.txt", heart_msg)
subprocess.check_call(
[, , ], cwd=self.repository_name)
subprocess.check_call([, , , .format(
commit_number=commit_number)], cwd=self.repository_name)
subprocess.check_call(
[, , , ], cwd=self.repository_name) | Perform len(MARKED_DAYS)*self.max_commits and Push to the Repository |
def get_protein_coding_genes(
path_or_buffer,
include_polymorphic_pseudogenes=True,
remove_duplicates=True,
**kwargs):
r
valid_biotypes = set([])
if include_polymorphic_pseudogenes:
valid_biotypes.add()
df = get_genes(path_or_buffer, valid_biotypes,
remove_duplicates=remove_duplicates, **kwargs)
return df | r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes. |
def get_all_host_templates(resource_root, cluster_name="default"):
return call(resource_root.get,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, api_version=3) | Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3 |
def _init_metadata(self):
super(EulerRotationAnswerFormRecord, self)._init_metadata()
self._euler_rotation_metadata = {
: Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
),
: ,
: ,
: True,
: False,
: True,
: False,
: [{}],
: ,
: []
} | stub |
def append(self, child, *args, **kwargs):
if isinstance(child, (Word, Morpheme, Phoneme)) and WordReference in self.ACCEPTED_DATA:
pass
self.data.insert(insertionpoint, child)
return child
elif isinstance(child, AbstractSpanAnnotation):
insertionpoint = len(self.data)
try:
firstword = child.wrefs(0)
except IndexError:
return super(AbstractSpanAnnotation,self).append(child, *args, **kwargs)
insertionpoint = len(self.data)
for i, sibling in enumerate(self.data):
if isinstance(sibling, (Word, Morpheme, Phoneme)):
try:
if not sibling.precedes(firstword):
insertionpoint = i
except:
pass
return super(AbstractSpanAnnotation,self).insert(insertionpoint, child, *args, **kwargs)
else:
return super(AbstractSpanAnnotation,self).append(child, *args, **kwargs) | See :meth:`AbstractElement.append` |
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
msg = ("convert_objects is deprecated. To re-infer data dtypes for "
"object columns, use {klass}.infer_objects()\nFor all "
"other conversions use the data-type specific converters "
"pd.to_datetime, pd.to_timedelta and pd.to_numeric."
).format(klass=self.__class__.__name__)
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._constructor(
self._data.convert(convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy)).__finalize__(self) | Attempt to infer better dtype for object columns.
.. deprecated:: 0.21.0
Parameters
----------
convert_dates : boolean, default True
If True, convert to date where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
convert_numeric : boolean, default False
If True, attempt to coerce to numbers (including strings), with
unconvertible values becoming NaN.
convert_timedeltas : boolean, default True
If True, convert to timedelta where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type. |
def conv(self,
num_out_channels,
k_height,
k_width,
d_height=1,
d_width=1,
mode="SAME",
input_layer=None,
num_channels_in=None,
use_batch_norm=None,
stddev=None,
activation="relu",
bias=0.0):
if input_layer is None:
input_layer = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
kernel_initializer = None
if stddev is not None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
name = "conv" + str(self.counts["conv"])
self.counts["conv"] += 1
with tf.variable_scope(name):
strides = [1, d_height, d_width, 1]
if self.data_format == "NCHW":
strides = [strides[0], strides[3], strides[1], strides[2]]
if mode != "SAME_RESNET":
conv = self._conv2d_impl(
input_layer,
num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width],
padding=mode,
kernel_initializer=kernel_initializer)
else:
if d_height == 1 and d_width == 1:
conv = self._conv2d_impl(
input_layer,
num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width],
padding="SAME",
kernel_initializer=kernel_initializer)
else:
rate = 1
kernel_height_effective = k_height + (k_height - 1) * (
rate - 1)
pad_h_beg = (kernel_height_effective - 1) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = k_width + (k_width - 1) * (
rate - 1)
pad_w_beg = (kernel_width_effective - 1) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [[0, 0], [pad_h_beg, pad_h_end],
[pad_w_beg, pad_w_end], [0, 0]]
if self.data_format == "NCHW":
padding = [
padding[0], padding[3], padding[1], padding[2]
]
input_layer = tf.pad(input_layer, padding)
conv = self._conv2d_impl(
input_layer,
num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width],
padding="VALID",
kernel_initializer=kernel_initializer)
if use_batch_norm is None:
use_batch_norm = self.use_batch_norm
if not use_batch_norm:
if bias is not None:
biases = self.get_variable(
"biases", [num_out_channels],
self.variable_dtype,
self.dtype,
initializer=tf.constant_initializer(bias))
biased = tf.reshape(
tf.nn.bias_add(
conv, biases, data_format=self.data_format),
conv.get_shape())
else:
biased = conv
else:
self.top_layer = conv
self.top_size = num_out_channels
biased = self.batch_norm(**self.batch_norm_config)
if activation == "relu":
conv1 = tf.nn.relu(biased)
elif activation == "linear" or activation is None:
conv1 = biased
elif activation == "tanh":
conv1 = tf.nn.tanh(biased)
else:
raise KeyError("Invalid activation type \"%s\"" % activation)
self.top_layer = conv1
self.top_size = num_out_channels
return conv1 | Construct a conv2d layer on top of cnn. |
def any_shared(enum_one, enum_two):
if not is_collection(enum_one) or not is_collection(enum_two):
return False
enum_one = enum_one if isinstance(enum_one, (set, dict)) else set(enum_one)
enum_two = enum_two if isinstance(enum_two, (set, dict)) else set(enum_two)
return any(e in enum_two for e in enum_one) | Truthy if any element in enum_one is present in enum_two |
def string(self) -> bytes:
if self._raw is not None:
return self._raw
self._raw = raw = BytesFormat(b).join(
[b, b] + self.capabilities)
return raw | The capabilities string without the enclosing square brackets. |
def register_hook(self, hook, priority=):
assert isinstance(hook, Hook)
if hasattr(hook, ):
raise ValueError()
priority = get_priority(priority)
hook.priority = priority
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook) | Register a hook into the hook list.
Args:
hook (:obj:`Hook`): The hook to be registered.
priority (int or str or :obj:`Priority`): Hook priority.
Lower value means higher priority. |
def parse_duration(duration):
duration = str(duration).upper().strip()
elements = ELEMENTS.copy()
for pattern in (SIMPLE_DURATION, COMBINED_DURATION):
if pattern.match(duration):
found = pattern.match(duration).groupdict()
del found[]
elements.update(dict((k, int(v or 0))
for k, v
in found.items()))
return datetime.timedelta(days=(elements[] +
_months_to_days(elements[]) +
_years_to_days(elements[])),
hours=elements[],
minutes=elements[],
seconds=elements[])
return ParseError() | Attepmts to parse an ISO8601 formatted ``duration``.
Returns a ``datetime.timedelta`` object. |
def run_radia_with_merge(job, rna_bam, tumor_bam, normal_bam, univ_options, radia_options):
spawn = job.wrapJobFn(run_radia, rna_bam[], tumor_bam,
normal_bam, univ_options, radia_options, disk=,
memory=).encapsulate()
merge = job.wrapJobFn(merge_perchrom_vcfs, spawn.rv(), univ_options, disk=, memory=)
job.addChild(spawn)
spawn.addChild(merge)
return merge.rv() | A wrapper for the the entire RADIA sub-graph.
:param dict rna_bam: Dict dicts of bam and bai for tumor RNA-Seq obtained by running STAR within
ProTECT.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: fsID to the merged RADIA calls
:rtype: toil.fileStore.FileID |
def uncomment(path,
regex,
char=,
backup=):
*ALL: PARANOID
return comment_line(path=path,
regex=regex,
char=char,
cmnt=False,
backup=backup) | .. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Uncomment specified commented lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()).
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
CLI Example:
.. code-block:: bash
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID' |
def _initialized_adjustments(self, prstGeom):
if prstGeom is None:
return []
davs = AutoShapeType.default_adjustment_values(prstGeom.prst)
adjustments = [Adjustment(name, def_val) for name, def_val in davs]
self._update_adjustments_with_actuals(adjustments, prstGeom.gd_lst)
return adjustments | Return an initialized list of adjustment values based on the contents
of *prstGeom* |
def _structure_attr_from_tuple(self, a, name, value):
type_ = a.type
if type_ is None:
return value
return self._structure_func.dispatch(type_)(value, type_) | Handle an individual attrs attribute. |
def ssl_required(allow_non_ssl=False):
def wrapper(view_func):
def _checkssl(request, *args, **kwargs):
if hasattr(settings, ) and settings.SSL_ENABLED \
and not request.is_secure() and not allow_non_ssl:
return HttpResponseRedirect(
request.build_absolute_uri().replace(, ))
return view_func(request, *args, **kwargs)
return _checkssl
return wrapper | Views decorated with this will always get redirected to https
except when allow_non_ssl is set to true. |
def _caveat_v1_to_dict(c):
serialized = {}
if len(c.caveat_id) > 0:
serialized[] = c.caveat_id
if c.verification_key_id:
serialized[] = utils.raw_urlsafe_b64encode(
c.verification_key_id).decode()
if c.location:
serialized[] = c.location
return serialized | Return a caveat as a dictionary for export as the JSON
macaroon v1 format. |
def sweep(self, mode, speed=None):
sweep_modes = [, , , , ]
sweep_speed = [, , None]
if not mode in sweep_modes:
raise ValueError()
if not speed in sweep_speed:
raise ValueError()
if speed is None:
self._write(.format(mode))
else:
self._write(.format(mode, speed)) | Starts the output current sweep.
:param mode: The sweep mode. Valid entries are `'UP'`, `'DOWN'`,
`'PAUSE'`or `'ZERO'`. If in shim mode, `'LIMIT'` is valid as well.
:param speed: The sweeping speed. Valid entries are `'FAST'`, `'SLOW'`
or `None`. |
def cli(env, identifier, allocation, port, routing_type, routing_method):
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, group_id = loadbal.parse_id(identifier)
if not any([allocation, port, routing_type, routing_method]):
raise exceptions.CLIAbort(
)
mgr.edit_service_group(loadbal_id,
group_id,
allocation=allocation,
port=port,
routing_type=routing_type,
routing_method=routing_method)
env.fout( % identifier) | Edit an existing load balancer service group. |
def canonical_extension(fmt_ext):
if MimeType.has_value(fmt_ext):
return fmt_ext
try:
return {
: MimeType.TIFF.value,
: MimeType.JPG.value,
: MimeType.HDF.value,
: MimeType.HDF.value
}[fmt_ext]
except KeyError:
raise ValueError(.format(fmt_ext)) | Canonical extension of file format extension
Converts the format extension fmt_ext into the canonical extension for that format. For example,
``canonical_extension('tif') == 'tiff'``. Here we agree that the canonical extension for format F is F.value
:param fmt_ext: A string representing an extension (e.g. ``'txt'``, ``'png'``, etc.)
:type fmt_ext: str
:return: The canonical form of the extension (e.g. if ``fmt_ext='tif'`` then we return ``'tiff'``)
:rtype: str |
def process_request(
self, path: str, request_headers: Headers
) -> Union[Optional[HTTPResponse], Awaitable[Optional[HTTPResponse]]]:
if self._process_request is not None:
return self._process_request(path, request_headers)
return None | Intercept the HTTP request and return an HTTP response if needed.
``request_headers`` is a :class:`~websockets.http.Headers` instance.
If this method returns ``None``, the WebSocket handshake continues.
If it returns a status code, headers and a response body, that HTTP
response is sent and the connection is closed.
The HTTP status must be a :class:`~http.HTTPStatus`.
HTTP headers must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs.
The HTTP response body must be :class:`bytes`. It may be empty.
This method may be overridden to check the request headers and set a
different status, for example to authenticate the request and return
``HTTPStatus.UNAUTHORIZED`` or ``HTTPStatus.FORBIDDEN``.
It can be declared as a function or as a coroutine because such
authentication checks are likely to require network requests.
It may also be overridden by passing a ``process_request`` argument to
the :class:`WebSocketServerProtocol` constructor or the :func:`serve`
function. |
def order_id(self, order_id):
if order_id is None:
raise ValueError("Invalid value for `order_id`, must not be `None`")
if len(order_id) > 192:
raise ValueError("Invalid value for `order_id`, length must be less than `192`")
self._order_id = order_id | Sets the order_id of this ChargeRequest.
The ID of the order to associate with this transaction. If you provide this value, the `amount_money` value of your request must __exactly match__ the value of the order's `total_money` field.
:param order_id: The order_id of this ChargeRequest.
:type: str |
def get_total_DOS(self):
warnings.warn("Phonopy.get_total_DOS is deprecated. "
"Use Phonopy.get_total_dos_dict.", DeprecationWarning)
dos = self.get_total_dos_dict()
return dos[], dos[] | Return frequency points and total DOS as a tuple.
Returns
-------
A tuple with (frequency_points, total_dos).
frequency_points: ndarray
shape=(frequency_sampling_points, ), dtype='double'
total_dos:
shape=(frequency_sampling_points, ), dtype='double' |
def load_nameserver_credentials(self, working_directory, num_tries=60, interval=1):
fn = os.path.join(working_directory, %self.run_id)
for i in range(num_tries):
try:
with open(fn, ) as fh:
self.nameserver, self.nameserver_port = pickle.load(fh)
return
except FileNotFoundError:
self.logger.warning(%(fn, i+1, num_tries))
time.sleep(interval)
except:
raise
raise RuntimeError("Could not find the nameserver information, aborting!") | loads the nameserver credentials in cases where master and workers share a filesystem
Parameters
----------
working_directory: str
the working directory for the HPB run (see master)
num_tries: int
number of attempts to find the file (default 60)
interval: float
waiting period between the attempts |
def _postQueuedEvents(self, interval=0.01):
while len(self.eventList) > 0:
(nextEvent, args) = self.eventList.popleft()
nextEvent(*args)
time.sleep(interval) | Private method to post queued events (e.g. Quartz events).
Each event in queue is a tuple (event call, args to event call). |
def get_paginator(self, operation_name):
if not self.can_paginate(operation_name):
raise OperationNotPageableError(operation_name=operation_name)
else:
Paginator.PAGE_ITERATOR_CLS = AioPageIterator
actual_operation_name = self._PY_TO_OP_NAME[operation_name]
def paginate(self, **kwargs):
return Paginator.paginate(self, **kwargs)
paginator_config = self._cache[][
actual_operation_name]
paginator_class_name = str( % (
get_service_module_name(self.meta.service_model),
actual_operation_name))
documented_paginator_cls = type(
paginator_class_name, (Paginator,), {: paginate})
operation_model = self._service_model.\
operation_model(actual_operation_name)
paginator = documented_paginator_cls(
getattr(self, operation_name),
paginator_config,
operation_model)
return paginator | Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object. |
def iterate_storyline(ctx):
logger.debug()
compiled_story = ctx.compiled_story()
if not compiled_story:
return
for step in range(ctx.current_step(),
len(compiled_story.story_line)):
ctx = ctx.clone()
tail = ctx.stack_tail()
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack[:-1] + [{
: tail[],
: step,
: tail[],
}])
logger.debug(.format(step))
logger.debug(ctx)
ctx = yield ctx | iterate the last storyline from the last visited story part
:param ctx:
:return: |
def frac(x, context=None):
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_frac,
(BigFloat._implicit_convert(x),),
context,
) | Return the fractional part of ``x``.
The result has the same sign as ``x``. |
def error(self, error):
if self.direction not in [, , ] and error is not None:
raise ValueError("error only accepted for x, y, z dimensions")
if isinstance(error, u.Quantity):
error = error.to(self.unit).value
self._error = error | set the error |
def create_server_and_run_forever(self, loop=None, **server_config):
if loop is None:
import asyncio
loop = asyncio.get_event_loop()
self.create_server(loop=loop, **server_config)
try:
loop.run_forever()
except KeyboardInterrupt:
pass | Helper function which constructs an HTTP server and listens the
loop forever.
This function exists only to remove boilerplate code for starting
up a growler app.
Args:
**server_config: These keyword arguments are forwarded
directly to the BaseEventLoop.create_server function.
Consult their documentation for details.
Parameters:
loop (asyncio.BaseEventLoop): Optional parameter for specifying
an event loop which will handle socket setup.
**server_config: These keyword arguments are forwarded directly to
the create_server function. |
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst | datetime in UTC -> datetime in local time. |
def l2_regularizer(weight=1.0, scope=None):
def regularizer(tensor):
with tf.name_scope(scope, , [tensor]):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name=)
return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name=)
return regularizer | Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function. |
def _get_columns(self, blueprint):
columns = []
for column in blueprint.get_added_columns():
sql = self.wrap(column) + + self._get_type(column)
columns.append(self._add_modifiers(sql, blueprint, column))
return columns | Get the blueprint's columns definitions.
:param blueprint: The blueprint
:type blueprint: Blueprint
:rtype: list |
def reset(self):
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d" | set sensible defaults |
def load_from_config(cp, model, **kwargs):
name = cp.get(, )
return samplers[name].from_config(cp, model, **kwargs) | Loads a sampler from the given config file.
This looks for a name in the section ``[sampler]`` to determine which
sampler class to load. That sampler's ``from_config`` is then called.
Parameters
----------
cp : WorkflowConfigParser
Config parser to read from.
model : pycbc.inference.model
Which model to pass to the sampler.
\**kwargs :
All other keyword arguments are passed directly to the sampler's
``from_config`` file.
Returns
-------
sampler :
The initialized sampler. |
def to_bytes(value, encoding=):
result = (value.encode(encoding)
if isinstance(value, six.text_type) else value)
if isinstance(result, six.binary_type):
return result
else:
raise ValueError(.format(value)) | Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python 2 because it does not modify ``unicode`` objects.
Args:
value (Union[str, bytes]): The value to be converted.
encoding (str): The encoding to use to convert unicode to bytes.
Defaults to "utf-8".
Returns:
bytes: The original value converted to bytes (if unicode) or as
passed in if it started out as bytes.
Raises:
ValueError: If the value could not be converted to bytes. |
def GetFileEntryByPath(self, path):
if path is None:
return None
file_entry_type, _ = self._paths.get(path, (None, None))
if not file_entry_type:
return None
path_spec = fake_path_spec.FakePathSpec(location=path)
return fake_file_entry.FakeFileEntry(
self._resolver_context, self, path_spec,
file_entry_type=file_entry_type) | Retrieves a file entry for a path.
Args:
path (str): path of the file entry.
Returns:
FakeFileEntry: a file entry or None if not available. |
def find_default_container(builder,
default_container=None,
use_biocontainers=None,
):
if not default_container and use_biocontainers:
default_container = get_container_from_software_requirements(
use_biocontainers, builder)
return default_container | Default finder for default containers. |
def observableFractionCMDX(self, mask, distance_modulus, mass_min=0.1):
mass_init_array,mass_pdf_array,mass_act_array,mag_1_array,mag_2_array = self.sample(mass_min=mass_min,full_data_range=False)
mag = mag_1_array if self.band_1_detection else mag_2_array
color = mag_1_array - mag_2_array
pixels = mask.roi.pixels_interior
mag_1_mask = mask.mask_1.mask_roi_sparse[mask.roi.pixel_interior_cut]
mag_2_mask = mask.mask_2.mask_roi_sparse[mask.roi.pixel_interior_cut]
cmd_cut = ugali.utils.binning.take2D(mask.solid_angle_cmd,color,mag+distance_modulus,
mask.roi.bins_color, mask.roi.bins_mag) > 0
mass_pdf_cut = mass_pdf_array*cmd_cut
mask_1_cut = (mag_1_array+distance_modulus)[:,np.newaxis] < mag_1_mask
mask_2_cut = (mag_2_array+distance_modulus)[:,np.newaxis] < mag_2_mask
mask_cut_repeat = mask_1_cut & mask_2_cut
observable_fraction = (mass_pdf_cut[:,np.newaxis]*mask_cut_repeat).sum(axis=0)
return observable_fraction | Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: Could this function be even faster / more readable?
ADW: Should this include magnitude error leakage? |
def risearch(self):
"instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials"
if self._risearch is None:
self._risearch = ResourceIndex(self.fedora_root, self.username, self.password)
return self._risearch | instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials |
def set_log_level(logger_name: str, log_level: str, propagate: bool = False):
log = logging.getLogger(logger_name)
log.propagate = propagate
log.setLevel(log_level) | Set the log level of the specified logger. |
def _gen_last_current_relation(self, post_id):
last_post_id = self.get_secure_cookie()
if last_post_id:
last_post_id = last_post_id.decode()
self.set_secure_cookie(, post_id)
if last_post_id and MPost.get_by_uid(last_post_id):
self._add_relation(last_post_id, post_id) | Generate the relation for the post and last post viewed. |
def allLayers(self):
url = self._url + "/layers"
params = {
"f" : "json"
}
res = self._get(url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return_dict = {
"layers" : [],
"tables" : []
}
for k, v in res.items():
if k == "layers":
for val in v:
return_dict[].append(
FeatureLayer(url=self._url + "/%s" % val[],
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
elif k == "tables":
for val in v:
return_dict[].append(
TableLayer(url=self._url + "/%s" % val[],
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
del k,v
return return_dict | returns all layers for the service |
def torrents(self, **filters):
params = {}
for name, value in filters.items():
name = if name == else name
params[name] = value
return self._get(, params=params) | Returns a list of torrents matching the supplied filters.
:param filter: Current status of the torrents.
:param category: Fetch all torrents with the supplied label.
:param sort: Sort torrents by.
:param reverse: Enable reverse sorting.
:param limit: Limit the number of torrents returned.
:param offset: Set offset (if less than 0, offset from end).
:return: list() of torrent with matching filter. |
def get_nice_alert(self, value):
value = str(value)
try:
if value in self.get_limit():
return
except KeyError:
pass
try:
if value in self.get_limit():
return
except KeyError:
pass
try:
if value in self.get_limit():
return
except KeyError:
pass
return | Return the alert relative to the Nice configuration list |
def tag(self, tokens):
tags = []
for token in tokens:
normalized = self.lexicon[token].normalized
for regex, tag in self.regexes:
if regex.match(normalized):
tags.append((token, tag))
break
else:
tags.append((token, None))
return tags | Return a list of (token, tag) tuples for a given list of tokens. |
def _station_load(network, station, crit_stations):
if isinstance(station, LVStation):
grid_level =
else:
grid_level =
s_station = sum([_.type.S_nom for _ in station.transformers])
s_station_allowed_per_case = {}
s_station_allowed_per_case[] = s_station * network.config[
][.format(
grid_level)]
s_station_allowed_per_case[] = s_station * network.config[
][.format(
grid_level)]
s_station_allowed = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: s_station_allowed_per_case[_])
try:
if isinstance(station, LVStation):
s_station_pfa = network.results.s_res(
station.transformers).sum(axis=1)
else:
s_station_pfa = network.results.s_res([station]).iloc[:, 0]
s_res = s_station_allowed - s_station_pfa
s_res = s_res[s_res < 0]
if not s_res.empty:
load_factor = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: network.config[
][
.format(grid_level, _)])
relative_s_res = load_factor * s_res
crit_stations = crit_stations.append(pd.DataFrame(
{: s_station_pfa.loc[relative_s_res.idxmin()],
: relative_s_res.idxmin()},
index=[station]))
except KeyError:
logger.debug(.format(
grid_level.upper()))
return crit_stations | Checks for over-loading of stations.
Parameters
----------
network : :class:`~.grid.network.Network`
station : :class:`~.grid.components.LVStation` or :class:`~.grid.components.MVStation`
crit_stations : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`. |
def executeMetricsQuery(self, tmaster, queryString, start_time, end_time, callback=None):
query = Query(self.tracker)
metrics = yield query.execute_query(tmaster, queryString, start_time, end_time)
ret = {}
ret["starttime"] = start_time
ret["endtime"] = end_time
ret["timeline"] = []
for metric in metrics:
tl = {
"data": metric.timeline
}
if metric.instance:
tl["instance"] = metric.instance
ret["timeline"].append(tl)
raise tornado.gen.Return(ret) | Get the specified metrics for the given query in this topology.
Returns the following dict on success:
{
"timeline": [{
"instance": <instance>,
"data": {
<start_time> : <numeric value>,
<start_time> : <numeric value>,
...
}
}, {
...
}, ...
"starttime": <numeric value>,
"endtime": <numeric value>,
},
Returns the following dict on failure:
{
"message": "..."
} |
def prepare(self):
request_time = 1000.0 * self.request.request_time()
access_log.info(
"%d %s %.2fms", self.get_status(),
self._request_summary(), request_time) | Log access. |
def p_ind8_I(p):
expr = p[4]
if p[3] == :
expr = Expr.makenode(Container(, p.lineno(3)), expr)
p[0] = ( % p[2], expr) | reg8_I : LP IX PLUS expr RP
| LP IX MINUS expr RP
| LP IY PLUS expr RP
| LP IY MINUS expr RP
| LP IX PLUS pexpr RP
| LP IX MINUS pexpr RP
| LP IY PLUS pexpr RP
| LP IY MINUS pexpr RP |
def get_size(vm_):
s size. Used by create_node().
sizeNo size specified for this VM.InstanceTypeIdThe specified size, \, could not be found.'.format(vm_size)
) | Return the VM's size. Used by create_node(). |
def build(values):
idxstore = IndexStore()
idxstore._i2val = list(values)
idxstore._val2i = {term:i for i,term in enumerate(values)}
idxstore._next_i = len(values)
return idxstore | Parameters
----------
values: [term, ...]
Returns
-------
IndexStore |
def query_relative(self, query, event_time=None, relative_duration_before=None, relative_duration_after=None):
assert event_time is None or isinstance(event_time, datetime.datetime)
assert relative_duration_before is None or isinstance(relative_duration_before, str)
assert relative_duration_after is None or isinstance(relative_duration_after, str)
if event_time is None:
event_time = datetime.datetime.now()
if relative_duration_before is None:
relative_duration_before = self.relative_duration_before
if relative_duration_after is None:
relative_duration_after = self.relative_duration_after
time_start = event_time - create_timedelta(relative_duration_before)
time_end = event_time + create_timedelta(relative_duration_after)
return self.query_with_time(query, time_start, time_end) | Perform the query and calculate the time range based on the relative values. |
def delete_date(self, date):
self.lines = [
line for line in self.lines
if not isinstance(line, DateLine) or line.date != date
]
self.lines = trim(self.lines) | Remove the date line from the textual representation. This doesn't
remove any entry line. |
def update_from_sam(self, sam, sam_reader):
if sam.is_unmapped \
or sam.mate_is_unmapped \
or (sam.reference_id == sam.next_reference_id):
return
new_link = link.Link(sam, sam_reader, self.ref_lengths)
read_name = sam.query_name
if read_name in self.partial_links:
new_link.merge(self.partial_links[read_name])
del self.partial_links[read_name]
key = tuple(sorted((new_link.refnames[0], new_link.refnames[1])))
if key not in self.links:
self.links[key] = []
new_link.sort()
self.links[key].append(new_link)
else:
self.partial_links[read_name] = new_link | Updates graph info from a pysam.AlignedSegment object |
def build_kcorrection_array(
log,
redshiftArray,
snTypesArray,
snLightCurves,
pathToOutputDirectory,
plot=True):
import yaml
import numpy as np
dataDir = pathToOutputDirectory + "/k_corrections/"
filters = [, , , ]
fileName = pathToOutputDirectory + "/transient_light_curves.yaml"
stream = file(fileName, )
generatedLCs = yaml.load(stream)
models = generatedLCs.keys()
kCorList = []
for i in range(len(redshiftArray)):
redshift = redshiftArray[i]
kCorDict = {}
for model in models:
for ffilter in filters:
filterDir = dataDir + model + "/" + ffilter
strRed = "%0.3f" % (redshift,)
fileName = filterDir + "/z" + \
str(strRed).replace(".", "pt") + "_poly.yaml"
try:
stream = file(fileName, )
yamlContent = yaml.load(stream)
stream.close()
flatPoly = np.poly1d(yamlContent[])
except:
flatPoly = None
kCorDict[ffilter] = flatPoly
kCorList.append(kCorDict)
kCorArray = np.array(kCorList)
return kCorArray | *Given the random redshiftArray and snTypeArray, generate a dictionary of k-correction polynomials (one for each filter) for every object.*
**Key Arguments:**
- ``log`` -- logger
- ``redshiftArray`` -- the pre-generated redshift array
- ``snTypesArray`` -- the pre-generated array of random sn types
- ``snLightCurves`` -- yaml style dictionary of SN lightcurve info
- ``pathToOutputDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- None |
def list_users(app, appbuilder):
_appbuilder = import_application(app, appbuilder)
echo_header("List of users")
for user in _appbuilder.sm.get_all_users():
click.echo(
"username:{0} | email:{1} | role:{2}".format(
user.username, user.email, user.roles
)
) | List all users on the database |
def get_user_roles(user):
if user:
groups = user.groups.all()
roles = (RolesManager.retrieve_role(group.name) for group in groups if group.name in RolesManager.get_roles_names())
return sorted(roles, key=lambda r: r.get_name() )
else:
return [] | Get a list of a users's roles. |
def env():
if cij.ssh.env():
cij.err("cij.nvme.env: invalid SSH environment")
return 1
nvme = cij.env_to_dict(PREFIX, REQUIRED)
nvme["DEV_PATH"] = os.path.join("/dev", nvme["DEV_NAME"])
try:
sysfs = os.path.join("/sys/class/block", nvme["DEV_NAME"], "lightnvm")
nvme["LNVM_VERSION"] = cat_file(os.path.join(sysfs, "version"))
if nvme["LNVM_VERSION"] == "2.0":
luns = "punits"
chs = "groups"
elif nvme["LNVM_VERSION"] == "1.2":
luns = "num_luns"
chs = "num_channels"
else:
raise RuntimeError("cij.nvme.env: invalid lnvm version: %s" % nvme["LNVM_VERSION"])
nvme["LNVM_NUM_CHUNKS"] = cat_file(os.path.join(sysfs, "chunks"))
nvme["LNVM_NUM_LUNS"] = cat_file(os.path.join(sysfs, luns))
nvme["LNVM_NUM_CHS"] = cat_file(os.path.join(sysfs, chs))
nvme["LNVM_TOTAL_LUNS"] = str(int(nvme["LNVM_NUM_LUNS"]) * int(nvme["LNVM_NUM_CHS"]))
nvme["LNVM_TOTAL_CHUNKS"] = str(int(nvme["LNVM_TOTAL_LUNS"]) * int(nvme["LNVM_NUM_CHUNKS"]))
if nvme["LNVM_VERSION"] == "2.0":
cmd = ["nvme", "id-ctrl", nvme["DEV_PATH"], "--raw-binary"]
status, stdout, _ = cij.ssh.command(cmd, shell=True)
if status:
raise RuntimeError("cij.nvme.env: nvme id-ctrl fail")
buff = cij.bin.Buffer(types=IdentifyCDS, length=1)
buff.memcopy(stdout)
if buff[0].VS[1023] == 0x5a:
nvme["SPEC_VERSION"] = "Denali"
else:
nvme["SPEC_VERSION"] = "Spec20"
else:
nvme["SPEC_VERSION"] = "Spec12"
nvme["LNVM_CHUNK_META_LENGTH"] = str(get_sizeof_descriptor_table(nvme["SPEC_VERSION"]))
nvme["LNVM_CHUNK_META_SIZE"] = str(int(nvme["LNVM_CHUNK_META_LENGTH"]) *
int(nvme["LNVM_TOTAL_CHUNKS"]))
except StandardError:
traceback.print_exc()
return 1
cij.env_export(PREFIX, EXPORTED, nvme)
return 0 | Verify NVME variables and construct exported variables |
def get(self, timeout=None):
self.wait(timeout)
if isinstance(self._result, Exception):
raise self._result
return self._result | Return the result or raise the error the function has produced |
def has_edge_evidence(self, u: BaseEntity, v: BaseEntity, key: str) -> bool:
return self._has_edge_attr(u, v, key, EVIDENCE) | Check if the given edge has an evidence. |
def process_request(self, req, resp):
if goldman.config.TLS_REQUIRED and req.protocol != :
abort(TLSRequired) | Process the request before routing it.
We always enforce the use of SSL. |
def inner_product(vec0: QubitVector, vec1: QubitVector) -> bk.BKTensor:
if vec0.rank != vec1.rank or vec0.qubit_nb != vec1.qubit_nb:
raise ValueError()
vec1 = vec1.permute(vec0.qubits)
return bk.inner(vec0.tensor, vec1.tensor) | Hilbert-Schmidt inner product between qubit vectors
The tensor rank and qubits must match. |
def block_matrix(A, B, C, D):
r
return vstackm((hstackm((A, B)), hstackm((C, D)))) | r"""Generate the operator matrix with quadrants
.. math::
\begin{pmatrix} A B \\ C D \end{pmatrix}
Args:
A (Matrix): Matrix of shape ``(n, m)``
B (Matrix): Matrix of shape ``(n, k)``
C (Matrix): Matrix of shape ``(l, m)``
D (Matrix): Matrix of shape ``(l, k)``
Returns:
Matrix: The combined block matrix ``[[A, B], [C, D]]``. |
def exam_reliability_by_datetime(
datetime_axis, datetime_new_axis, reliable_distance):
numeric_datetime_axis = [
totimestamp(a_datetime) for a_datetime in datetime_axis
]
numeric_datetime_new_axis = [
totimestamp(a_datetime) for a_datetime in datetime_new_axis
]
return exam_reliability(numeric_datetime_axis, numeric_datetime_new_axis,
reliable_distance, precision=0) | A datetime-version that takes datetime object list as x_axis
reliable_distance equals to the time difference in seconds. |
def openBiocamFile(filename, verbose=False):
rf = h5py.File(filename, )
recVars = rf.require_group()
nFrames = recVars[][0]
samplingRate = recVars[][0]
signalInv = recVars[][0]
chipVars = rf.require_group()
nCols = chipVars[][0]
file_format = rf[].attrs.get()
if file_format == 100:
nRecCh = len(rf[][0])
elif file_format == 101:
nRecCh = int(1. * rf[].shape[0] / nFrames)
else:
raise Exception()
if verbose:
print(, file_format, , signalInv)
print(, recVars[][0], , recVars[][0])
print(, nRecCh)
print(, nFrames)
print(, samplingRate)
r = rf[][()][]
c = rf[][()][]
rawIndices = np.vstack((r, c)).T
chIndices = np.array([(x - 1) + (y - 1) * nCols for (y, x) in rawIndices])
if verbose:
print("
print("
if file_format == 100:
if signalInv == -1:
read_function = readHDF5t_100
else:
read_function = readHDF5t_100_i
else:
if signalInv == -1:
read_function = readHDF5t_101_i
else:
read_function = readHDF5t_101
return (rf, nFrames, samplingRate, nRecCh, chIndices, file_format, signalInv, rawIndices, read_function) | Open a Biocam hdf5 file, read and return the recording info, pick te correct method to access raw data, and return this to the caller. |
def compile_dependencies(self, sourcepath, include_self=False):
items = self.inspector.parents(sourcepath)
if include_self:
items.add(sourcepath)
return filter(None, [self.compile_source(item) for item in items]) | Apply compile on all dependencies
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Keyword Arguments:
include_self (bool): If ``True`` the given sourcepath is add to
items to compile, else only its dependencies are compiled. |
def pop_object(self, element):
redacted_text = "Redacted. Object contained TLP value higher than allowed."
element[] =
element[] =
element[] =
element[] = []
element[] = None
element[] = redacted_text
element[] = element[]
element[] =
element[] =
element[] = redacted_text
element[] = []
element[][] =
element[][] =
element[][] = redacted_text
element[][] = redacted_text
return element | Pop the object element if the object contains an higher TLP then allowed. |
def run(self, subdirectory=None):
with tmp_chdir(self.gssha_directory):
if self.hotstart_minimal_mode:
for gssha_optional_output_card in self.GSSHA_OPTIONAL_OUTPUT_CARDS:
self._delete_card(gssha_optional_output_card)
self._update_card(, )
if subdirectory is None:
subdirectory = "minimal_hotstart_run_{0}to{1}" \
.format(self.event_manager.simulation_start.strftime("%Y%m%d%H%M"),
self.event_manager.simulation_end.strftime("%Y%m%d%H%M"))
else:
subdirectory = "run_{0}to{1}".format(self.event_manager.simulation_start.strftime("%Y%m%d%H%M"),
self.event_manager.simulation_end.strftime("%Y%m%d%H%M"))
prj_evt_manager = self.project_manager.projectFileEventManager
prj_event = prj_evt_manager.add_event(name=subdirectory,
subfolder=subdirectory,
session=self.db_session)
eventyml_path = self.project_manager.getCard() \
.value.strip(""
map_table_object.write(session=self.db_session,
directory=working_directory,
name=map_table_filename,
writeIndexMaps=False)
for gssha_card in self.project_manager.projectCards:
if gssha_card.name not in self.GSSHA_REQUIRED_OUTPUT_PATH_CARDS + \
self.GSSHA_OPTIONAL_OUTPUT_PATH_CARDS + \
tuple(self.simulation_modified_input_cards):
if gssha_card.value:
updated_value = gssha_card.value.strip().strip(".dht.qht"{0}""{0}"
if os.path.exists(updated_path):
new_path = os.path.join("..", os.path.basename(updated_path))
try:
gssha_card.value = .format(new_path, path_split[1])
except:
except subprocess.CalledProcessError as ex:
log.error("{0}: {1}".format(ex.returncode, ex.output))
else:
missing_exe_error = ("GSSHA executable not found. "
"Skipping GSSHA simulation run ...")
log.error(missing_exe_error)
raise ValueError(missing_exe_error)
return working_directory | Write out project file and run GSSHA simulation |
def add_tileset(self, tileset):
assert (isinstance(tileset, TiledTileset))
self.tilesets.append(tileset) | Add a tileset to the map
:param tileset: TiledTileset |
def audio_visual_key(name=None):
if name is None:
name =
society_code = basic.numeric(3)
society_code = society_code.setName() \
.setResultsName()
av_number = basic.alphanum(15, extended=True, isLast=True)
field_empty = pp.Regex()
field_empty.setParseAction(pp.replaceWith())
av_number = av_number | field_empty
av_number = av_number.setName() \
.setResultsName()
field = pp.Group(society_code + pp.Optional(av_number))
field.setParseAction(lambda v: _to_avi(v[0]))
field = field.setName(name)
return field.setResultsName() | Creates the grammar for an Audio Visual Key code.
This is a variation on the ISAN (International Standard Audiovisual Number)
:param name: name for the field
:return: grammar for an ISRC field |
def report_by_year(self, summary_fct=None, years=None, ltd=1, prior_n_yrs=None, first_n_yrs=None, ranges=None,
bm_rets=None):
if years and np.isscalar(years):
years = [years]
if summary_fct is None:
def summary_fct(pl):
monthly = pl.monthly_details
dly = pl.dly_details
data = OrderedDict()
data[] = monthly.mean
data[] = monthly.std_ann
data[] = dly.maxdd
data[] = dly.maxdd_dt
data[] = dly.dd_avg
data[] = monthly.max
data[] = monthly.min
data[] = dly.max
data[] = dly.min
data[] = monthly.cnt
return data
results = OrderedDict()
if years is not False:
for yr, pandl in self.iter_by_year():
if years is None or yr in years:
results[yr] = summary_fct(pandl)
if first_n_yrs:
first_n_yrs = first_n_yrs if not np.isscalar(first_n_yrs) else [first_n_yrs]
for first in first_n_yrs:
after = % (self.dly.index[0].year + first)
firstN = self.truncate(after=after)
results[.format(first)] = summary_fct(firstN)
if ranges:
for range in ranges:
yr_start, yr_end = range
rng_rets = self.truncate( % yr_start, % yr_end)
results[.format(yr_start, yr_end)] = summary_fct(rng_rets)
if prior_n_yrs:
prior_n_yrs = prior_n_yrs if not np.isscalar(prior_n_yrs) else [prior_n_yrs]
for prior in prior_n_yrs:
before = % (self.dly.index[-1].year - prior)
priorN = self.truncate(before)
results[.format(prior)] = summary_fct(priorN)
if ltd:
results[] = summary_fct(self)
return pd.DataFrame(results, index=results.values()[0].keys()).T | Summarize the profit and loss by year
:param summary_fct: function(ProfitAndLoss) and returns a dict or Series
:param years: int, array, boolean or None. If boolean and False, then show no years. If int or array
show only those years, else show all years if None
:param ltd: include live to date summary
:param prior_n_years: integer or list. Include summary for N years of return data prior to end date
:param first_n_years: integer or list. Include summary for N years of return data after start date
:param ranges: list of ranges. The range consists of a year start and year end
:param dm_dly_rets: daily return series for the benchmark for beta/alpha calcs
:return: DataFrame |
def down(self, migration_id):
if not self.check_directory():
return
for migration in self.get_migrations_to_down(migration_id):
logger.info( % migration.filename)
migration_module = self.load_migration_file(migration.filename)
if hasattr(migration_module, ):
migration_module.down(self.db)
else:
logger.info( % migration.filename)
self.collection.remove({: migration.filename}) | Rollback to migration. |
def _get_timestamp(dirname_full, remove):
record_filename = os.path.join(dirname_full, RECORD_FILENAME)
if not os.path.exists(record_filename):
return None
mtime = os.stat(record_filename).st_mtime
mtime_str = datetime.fromtimestamp(mtime)
print(.format(dirname_full, mtime_str))
if Settings.record_timestamp and remove:
OLD_TIMESTAMPS.add(record_filename)
return mtime | Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one. |
def humanize_filesize(bytes_size):
if not bytes_size:
return
names = (, , , , , , , , )
name_idx = int(math.floor(math.log(bytes_size, 1024)))
size = round(bytes_size / math.pow(1024, name_idx), 2)
return % (size, names[name_idx]) | Returns human readable filesize.
:param int bytes_size:
:rtype: str |
def _onNavigate(self, index):
if index > 0:
self.setLocation(
self._locationWidget.itemData(index), interactive=True
) | Handle selection of path segment. |
def evalsha(self, sha, numkeys, *keys_and_args):
if not self.script_exists(sha)[0]:
raise RedisError("Sha not registered")
script_callable = Script(self, self.shas[sha], self.load_lua_dependencies)
numkeys = max(numkeys, 0)
keys = keys_and_args[:numkeys]
args = keys_and_args[numkeys:]
return script_callable(keys, args) | Emulates evalsha |
def handle(self, request_headers={}, signature_header=None):
if self.client.webhook_secret is None:
raise ValueError()
encoded_header = self._get_signature_header(signature_header, request_headers)
decoded_request = self._decode_request(encoded_header)
if not in decoded_request:
raise ValueError("Error invalid request: no type field found.")
handler = self._getHandlerForEvent(decoded_request[])
if handler is None:
return
if (self._get_fct_number_of_arg(handler) == 1):
handler(decoded_request)
return
handler(decoded_request, decoded_request[]) | Handle request. |
def _compute_magnitude_scaling_term(self, C, mag):
c1 = self.CONSTS[]
if mag <= c1:
return C[] + C[] * (mag - c1) + C[] * (8.5 - mag) ** 2
else:
return C[] + C[] * (mag - c1) + C[] * (8.5 - mag) ** 2 | Compute and return magnitude scaling term in equation 2,
page 970. |
def random_sn_types_array(
log,
sampleNumber,
relativeSNRates,
pathToOutputPlotDirectory,
plot=False):
import numpy as np
import matplotlib.pyplot as plt
randomSNTypeList = []
counters = {}
for k, v in relativeSNRates.iteritems():
counters[k] = 0
for i in range(sampleNumber):
randNum = np.random.rand()
cumulative = 0.
for k, v in relativeSNRates.iteritems():
cumulative = cumulative + v
if (randNum <= cumulative):
randType = k
counters[k] += 1
break
randomSNTypeList.append(randType)
snTypeArray = np.array(randomSNTypeList)
if plot:
numTypes = len(relativeSNRates)
x = np.arange(1, numTypes + 1, 1)
heights = []
xticks = []
for k, v in relativeSNRates.iteritems():
xticks.append(k)
heights.append(counters[k])
fig = plt.figure(
num=None,
figsize=(8, 8),
dpi=None,
facecolor=None,
edgecolor=None,
frameon=True)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8])
ax.bar(
x,
heights,
width=0.8,
bottom=0)
plt.xticks(x + 0.5, xticks)
ax.set_xlabel()
ax.set_ylabel()
ax.grid(True)
title = "Weighted SN Distribution"
plt.title(title)
fileName = pathToOutputPlotDirectory + title.replace(" ", "_") + ".png"
plt.savefig(fileName)
plt.clf()
return snTypeArray | *Generate random supernova types from the weighted distributions set in the simulation settings file*
**Key Arguments:**
- ``log`` -- logger
- ``sampleNumber`` -- the sample number, i.e. array size
- ``relativeSNRates`` -- dictionary of the rates
- ``pathToOutputPlotDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- ``snTypesArray`` -- numpy array of the random SN types |
def generate_or_fail(self):
t1 = self.generate_random_table()
t2 = self.generate_random_table()
f1 = self.generate_func(t1)
f2 = self.generate_func(t2)
edges = [(f1(word), f2(word)) for word in self.words]
graph = forest.ForestGraph(edges=edges)
associations = {}
for num in range(len(self.words)):
edge = edges[num]
word = self.words[num]
associations[graph.canonical_order(edge)] = (num, word)
for name in (, , , , , ):
self.__dict__[name] = locals()[name] | Attempts to generate a random acyclic graph, raising an
InvariantError if unable to. |
def get(cls, uni_char):
uni_char = unicod(uni_char)
code_point = ord(uni_char)
if Block._RANGE_KEYS is None:
Block._RANGE_KEYS = sorted(Block._RANGES.keys())
idx = bisect.bisect_left(Block._RANGE_KEYS, code_point)
if (idx > 0 and
code_point >= Block._RANGES[Block._RANGE_KEYS[idx - 1]].start and
code_point <= Block._RANGES[Block._RANGE_KEYS[idx - 1]].end):
return Block._RANGES[Block._RANGE_KEYS[idx - 1]]
elif (idx < len(Block._RANGES) and
code_point >= Block._RANGES[Block._RANGE_KEYS[idx]].start and
code_point <= Block._RANGES[Block._RANGE_KEYS[idx]].end):
return Block._RANGES[Block._RANGE_KEYS[idx]]
else:
return Block.UNKNOWN | Return the Unicode block of the given Unicode character |
def is_interface_up(interface):
if sys.platform.startswith("linux"):
if interface not in psutil.net_if_addrs():
return False
import fcntl
SIOCGIFFLAGS = 0x8913
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
result = fcntl.ioctl(s.fileno(), SIOCGIFFLAGS, interface + * 256)
flags, = struct.unpack(, result[16:18])
if flags & 1:
return True
return False
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Exception when checking if {} is up: {}".format(interface, e))
else:
return True | Checks if an interface is up.
:param interface: interface name
:returns: boolean |
def delete(self, storagemodel:object, modeldefinition = None) -> bool:
deleted = False
blobservice = modeldefinition[]
container_name = modeldefinition[]
blob_name = storagemodel.name
try:
if blobservice.exists(container_name, blob_name):
blob = blobservice.delete_blob(container_name, blob_name)
deleted = True
except Exception as e:
msg = .format(blob_name, e)
raise AzureStorageWrapException(storagemodel, msg=msg)
return deleted | delete the blob from storage |
def x(self, position=None):
p = self.GetPosition()
if position is None:
return p[0]
self.SetPosition(position, p[1], p[2])
if self.trail:
self.updateTrail()
return self | Set/Get actor position along x axis. |
def players(self, postgame, game_type):
for i, attributes in self._players():
yield self._parse_player(i, attributes, postgame, game_type) | Return parsed players. |
def _hash_outputs(self, index, sighash_type):
if sighash_type == shared.SIGHASH_ALL:
outputs = ByteData()
for tx_out in self.tx_outs:
outputs += tx_out.to_bytes()
return utils.hash256(outputs.to_bytes())
elif (sighash_type == shared.SIGHASH_SINGLE
and index < len(self.tx_outs)):
return utils.hash256(self.tx_outs[index].to_bytes())
else:
raise NotImplementedError(
) | BIP143 hashOutputs implementation
Args:
index (int): index of input being signed
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
Returns:
(bytes): the hashOutputs, a 32 byte hash |
def constantrotating_to_static(frame_r, frame_i, w, t=None):
return _constantrotating_static_helper(frame_r=frame_r, frame_i=frame_i,
w=w, t=t, sign=-1.) | Transform from a constantly rotating frame to a static, inertial frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in static, inertial frame.
vel : `~astropy.units.Quantity`
Velocity in static, inertial frame. |
def get_google_songs(self, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
logger.info("Loading Google Music songs...")
google_songs = self.api.get_all_songs()
matched_songs, filtered_songs = filter_google_songs(
google_songs, include_filters=include_filters, exclude_filters=exclude_filters,
all_includes=all_includes, all_excludes=all_excludes
)
logger.info("Filtered {0} Google Music songs".format(len(filtered_songs)))
logger.info("Loaded {0} Google Music songs".format(len(matched_songs)))
return matched_songs, filtered_songs | Create song list from user's Google Music library.
Parameters:
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.