code stringlengths 70 11.9k | docstring stringlengths 4 7.08k | text stringlengths 128 15k |
|---|---|---|
def mask_roi_unique(self):
A = np.vstack([self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse]).T
B = A[np.lexsort(A.T[::-1])]
return B[np.concatenate(([True],np.any(B[1:]!=B[:-1],axis=1)))] | Assemble a set of unique magnitude tuples for the ROI | ### Input:
Assemble a set of unique magnitude tuples for the ROI
### Response:
def mask_roi_unique(self):
A = np.vstack([self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse]).T
B = A[np.lexsort(A.T[::-1])]
return B[np.concatenate(([True],np.any(B[1:]!=B[:-1],axis=1)))] |
def add_context(request):
try:
PRESIDENT = Manager.objects.filter(
incumbent__user=request.user,
president=True,
).count() > 0
except TypeError:
PRESIDENT = False
if request.user.username == ANONYMOUS_USERNAME:
request.session["ANONYMOUS_SESSION"] = True
ANONYMOUS_SESSION = request.session.get("ANONYMOUS_SESSION", False)
request_types = list()
if request.user.is_authenticated():
for request_type in RequestType.objects.filter(enabled=True):
requests = Request.objects.filter(
request_type=request_type,
status=Request.OPEN,
)
if not request_type.managers.filter(incumbent__user=request.user):
requests = requests.exclude(
~Q(owner__user=request.user),
private=True,
)
request_types.append((request_type, requests.count()))
profile_requests_count = ProfileRequest.objects.all().count()
admin_unread_count = profile_requests_count
return {
"REQUEST_TYPES": request_types,
"HOUSE": settings.HOUSE_NAME,
"ANONYMOUS_USERNAME": ANONYMOUS_USERNAME,
"SHORT_HOUSE": settings.SHORT_HOUSE_NAME,
"ADMIN": settings.ADMINS[0],
"NUM_OF_PROFILE_REQUESTS": profile_requests_count,
"ADMIN_UNREAD_COUNT": admin_unread_count,
"ANONYMOUS_SESSION": ANONYMOUS_SESSION,
"PRESIDENT": PRESIDENT,
} | Add variables to all dictionaries passed to templates. | ### Input:
Add variables to all dictionaries passed to templates.
### Response:
def add_context(request):
try:
PRESIDENT = Manager.objects.filter(
incumbent__user=request.user,
president=True,
).count() > 0
except TypeError:
PRESIDENT = False
if request.user.username == ANONYMOUS_USERNAME:
request.session["ANONYMOUS_SESSION"] = True
ANONYMOUS_SESSION = request.session.get("ANONYMOUS_SESSION", False)
request_types = list()
if request.user.is_authenticated():
for request_type in RequestType.objects.filter(enabled=True):
requests = Request.objects.filter(
request_type=request_type,
status=Request.OPEN,
)
if not request_type.managers.filter(incumbent__user=request.user):
requests = requests.exclude(
~Q(owner__user=request.user),
private=True,
)
request_types.append((request_type, requests.count()))
profile_requests_count = ProfileRequest.objects.all().count()
admin_unread_count = profile_requests_count
return {
"REQUEST_TYPES": request_types,
"HOUSE": settings.HOUSE_NAME,
"ANONYMOUS_USERNAME": ANONYMOUS_USERNAME,
"SHORT_HOUSE": settings.SHORT_HOUSE_NAME,
"ADMIN": settings.ADMINS[0],
"NUM_OF_PROFILE_REQUESTS": profile_requests_count,
"ADMIN_UNREAD_COUNT": admin_unread_count,
"ANONYMOUS_SESSION": ANONYMOUS_SESSION,
"PRESIDENT": PRESIDENT,
} |
def sanity_check(self):
from .wire import Input, Const, Output
from .helperfuncs import get_stack, get_stacks
for net in self.logic:
self.sanity_check_net(net)
for w in self.wirevector_subset():
if w.bitwidth is None:
raise PyrtlError(
% (w.name, get_stack(w)))
wirevector_names_set = set(x.name for x in self.wirevector_set)
if len(self.wirevector_set) != len(wirevector_names_set):
wirevector_names_list = [x.name for x in self.wirevector_set]
for w in wirevector_names_set:
wirevector_names_list.remove(w)
raise PyrtlError(
% repr(wirevector_names_list))
all_input_and_consts = self.wirevector_subset((Input, Const))
wire_src_dict, wire_dst_dict = self.net_connections()
dest_set = set(wire_src_dict.keys())
arg_set = set(wire_dst_dict.keys())
full_set = dest_set | arg_set
connected_minus_allwires = full_set.difference(self.wirevector_set)
if len(connected_minus_allwires) > 0:
bad_wire_names = .join(str(x) for x in connected_minus_allwires)
raise PyrtlError( % (bad_wire_names,
get_stacks(*connected_minus_allwires)))
allwires_minus_connected = self.wirevector_set.difference(full_set)
allwires_minus_connected = allwires_minus_connected.difference(all_input_and_consts)
if len(allwires_minus_connected) > 0:
bad_wire_names = .join(str(x) for x in allwires_minus_connected)
raise PyrtlError( % (bad_wire_names,
get_stacks(*allwires_minus_connected)))
ins = arg_set.difference(dest_set)
undriven = ins.difference(all_input_and_consts)
if len(undriven) > 0:
raise PyrtlError( %
([w.name for w in undriven], get_stacks(*undriven)))
self.sanity_check_memory_sync(wire_src_dict)
if debug_mode:
outs = dest_set.difference(arg_set)
unused = outs.difference(self.wirevector_subset(Output))
if len(unused) > 0:
names = [w.name for w in unused]
print( % names)
print(get_stacks(*unused)) | Check block and throw PyrtlError or PyrtlInternalError if there is an issue.
Should not modify anything, only check data structures to make sure they have been
built according to the assumptions stated in the Block comments. | ### Input:
Check block and throw PyrtlError or PyrtlInternalError if there is an issue.
Should not modify anything, only check data structures to make sure they have been
built according to the assumptions stated in the Block comments.
### Response:
def sanity_check(self):
from .wire import Input, Const, Output
from .helperfuncs import get_stack, get_stacks
for net in self.logic:
self.sanity_check_net(net)
for w in self.wirevector_subset():
if w.bitwidth is None:
raise PyrtlError(
% (w.name, get_stack(w)))
wirevector_names_set = set(x.name for x in self.wirevector_set)
if len(self.wirevector_set) != len(wirevector_names_set):
wirevector_names_list = [x.name for x in self.wirevector_set]
for w in wirevector_names_set:
wirevector_names_list.remove(w)
raise PyrtlError(
% repr(wirevector_names_list))
all_input_and_consts = self.wirevector_subset((Input, Const))
wire_src_dict, wire_dst_dict = self.net_connections()
dest_set = set(wire_src_dict.keys())
arg_set = set(wire_dst_dict.keys())
full_set = dest_set | arg_set
connected_minus_allwires = full_set.difference(self.wirevector_set)
if len(connected_minus_allwires) > 0:
bad_wire_names = .join(str(x) for x in connected_minus_allwires)
raise PyrtlError( % (bad_wire_names,
get_stacks(*connected_minus_allwires)))
allwires_minus_connected = self.wirevector_set.difference(full_set)
allwires_minus_connected = allwires_minus_connected.difference(all_input_and_consts)
if len(allwires_minus_connected) > 0:
bad_wire_names = .join(str(x) for x in allwires_minus_connected)
raise PyrtlError( % (bad_wire_names,
get_stacks(*allwires_minus_connected)))
ins = arg_set.difference(dest_set)
undriven = ins.difference(all_input_and_consts)
if len(undriven) > 0:
raise PyrtlError( %
([w.name for w in undriven], get_stacks(*undriven)))
self.sanity_check_memory_sync(wire_src_dict)
if debug_mode:
outs = dest_set.difference(arg_set)
unused = outs.difference(self.wirevector_subset(Output))
if len(unused) > 0:
names = [w.name for w in unused]
print( % names)
print(get_stacks(*unused)) |
def _get_ip():
cmd_netstat = [, ]
p1 = subprocess.Popen(cmd_netstat, stdout=subprocess.PIPE)
cmd_grep = [, ]
p2 = subprocess.Popen(cmd_grep, stdin=p1.stdout, stdout=subprocess.PIPE)
cmd_awk = [, ]
p3 = subprocess.Popen(cmd_awk, stdin=p2.stdout, stdout=subprocess.PIPE)
galaxy_ip = p3.stdout.read()
log.debug(, galaxy_ip)
return galaxy_ip | Get IP address for the docker host | ### Input:
Get IP address for the docker host
### Response:
def _get_ip():
cmd_netstat = [, ]
p1 = subprocess.Popen(cmd_netstat, stdout=subprocess.PIPE)
cmd_grep = [, ]
p2 = subprocess.Popen(cmd_grep, stdin=p1.stdout, stdout=subprocess.PIPE)
cmd_awk = [, ]
p3 = subprocess.Popen(cmd_awk, stdin=p2.stdout, stdout=subprocess.PIPE)
galaxy_ip = p3.stdout.read()
log.debug(, galaxy_ip)
return galaxy_ip |
def collect_members(module_to_name):
members = {}
for module, module_name in module_to_name.items():
all_names = getattr(module, "__all__", None)
for name, member in inspect.getmembers(module):
if ((inspect.isfunction(member) or inspect.isclass(member)) and
not _always_drop_symbol_re.match(name) and
(all_names is None or name in all_names)):
fullname = % (module_name, name)
if name in members:
other_fullname, other_member = members[name]
if member is not other_member:
raise RuntimeError("Short name collision between %s and %s" %
(fullname, other_fullname))
if len(fullname) == len(other_fullname):
raise RuntimeError("Can't decide whether to use %s or %s for %s: "
"both full names have length %d" %
(fullname, other_fullname, name, len(fullname)))
if len(fullname) > len(other_fullname):
continue
members[name] = fullname, member
return members | Collect all symbols from a list of modules.
Args:
module_to_name: Dictionary mapping modules to short names.
Returns:
Dictionary mapping name to (fullname, member) pairs. | ### Input:
Collect all symbols from a list of modules.
Args:
module_to_name: Dictionary mapping modules to short names.
Returns:
Dictionary mapping name to (fullname, member) pairs.
### Response:
def collect_members(module_to_name):
members = {}
for module, module_name in module_to_name.items():
all_names = getattr(module, "__all__", None)
for name, member in inspect.getmembers(module):
if ((inspect.isfunction(member) or inspect.isclass(member)) and
not _always_drop_symbol_re.match(name) and
(all_names is None or name in all_names)):
fullname = % (module_name, name)
if name in members:
other_fullname, other_member = members[name]
if member is not other_member:
raise RuntimeError("Short name collision between %s and %s" %
(fullname, other_fullname))
if len(fullname) == len(other_fullname):
raise RuntimeError("Can't decide whether to use %s or %s for %s: "
"both full names have length %d" %
(fullname, other_fullname, name, len(fullname)))
if len(fullname) > len(other_fullname):
continue
members[name] = fullname, member
return members |
def execute(self, fn, *args, **kwargs):
if not self.asynchronous:
return fn(*args, **kwargs)
raise NotImplementedError | Execute an operation and return the result. | ### Input:
Execute an operation and return the result.
### Response:
def execute(self, fn, *args, **kwargs):
if not self.asynchronous:
return fn(*args, **kwargs)
raise NotImplementedError |
def tilt_axes(self):
tilt_x = self._libinput.libinput_event_tablet_tool_get_tilt_x(
self._handle)
tilt_y = self._libinput.libinput_event_tablet_tool_get_tilt_y(
self._handle)
x_changed = self._libinput. \
libinput_event_tablet_tool_tilt_x_has_changed(self._handle)
y_changed = self._libinput. \
libinput_event_tablet_tool_tilt_y_has_changed(self._handle)
return (tilt_x, tilt_y), x_changed or y_changed | The current tilt along the (X, Y) axes of the tablet's
current logical orientation, in degrees off the tablet's Z axis
and whether they have changed in this event.
That is, if the tool is perfectly orthogonal to the tablet,
the tilt angle is 0. When the top tilts towards the logical top/left
of the tablet, the x/y tilt angles are negative, if the top tilts
towards the logical bottom/right of the tablet, the x/y tilt angles
are positive.
If these axes do not exist on the current tool, this property returns
((0, 0), :obj:`False`).
Returns:
((float, float), bool): The current value of the axes in degrees
and whether it has changed. | ### Input:
The current tilt along the (X, Y) axes of the tablet's
current logical orientation, in degrees off the tablet's Z axis
and whether they have changed in this event.
That is, if the tool is perfectly orthogonal to the tablet,
the tilt angle is 0. When the top tilts towards the logical top/left
of the tablet, the x/y tilt angles are negative, if the top tilts
towards the logical bottom/right of the tablet, the x/y tilt angles
are positive.
If these axes do not exist on the current tool, this property returns
((0, 0), :obj:`False`).
Returns:
((float, float), bool): The current value of the axes in degrees
and whether it has changed.
### Response:
def tilt_axes(self):
tilt_x = self._libinput.libinput_event_tablet_tool_get_tilt_x(
self._handle)
tilt_y = self._libinput.libinput_event_tablet_tool_get_tilt_y(
self._handle)
x_changed = self._libinput. \
libinput_event_tablet_tool_tilt_x_has_changed(self._handle)
y_changed = self._libinput. \
libinput_event_tablet_tool_tilt_y_has_changed(self._handle)
return (tilt_x, tilt_y), x_changed or y_changed |
def _entry_offset(self, index, entries, description):
description = description.lower()
for (offset, infos) in entries[index:]:
for info in infos:
if info[].lower().startswith(description):
return offset
return -1 | Gets the offset of the first entry that matches the description.
@index - Index into the entries list to begin searching.
@entries - Dictionary of result entries.
@description - Case insensitive description.
Returns the offset, if a matching description is found.
Returns -1 if a matching description is not found. | ### Input:
Gets the offset of the first entry that matches the description.
@index - Index into the entries list to begin searching.
@entries - Dictionary of result entries.
@description - Case insensitive description.
Returns the offset, if a matching description is found.
Returns -1 if a matching description is not found.
### Response:
def _entry_offset(self, index, entries, description):
description = description.lower()
for (offset, infos) in entries[index:]:
for info in infos:
if info[].lower().startswith(description):
return offset
return -1 |
def store_media(self, filename, mediafile):
temp_filename = os.path.join(
,
filename
)
fileops.create_dirs(
os.path.dirname(temp_filename)
)
mediafile.fetch_to_file(temp_filename)
s3_key = Key(self.bucket)
s3_key.key = os.path.join(
,
"%s.%s" % (filename, mediafile.fileext)
if mediafile.fileext is not None
else filename
)
s3_key.set_contents_from_filename(
mediafile.filename
)
os.remove(mediafile.filename)
mediafile.filename = s3_key.generate_url(
expires_in=0, query_auth=False
) | Store media files. | ### Input:
Store media files.
### Response:
def store_media(self, filename, mediafile):
temp_filename = os.path.join(
,
filename
)
fileops.create_dirs(
os.path.dirname(temp_filename)
)
mediafile.fetch_to_file(temp_filename)
s3_key = Key(self.bucket)
s3_key.key = os.path.join(
,
"%s.%s" % (filename, mediafile.fileext)
if mediafile.fileext is not None
else filename
)
s3_key.set_contents_from_filename(
mediafile.filename
)
os.remove(mediafile.filename)
mediafile.filename = s3_key.generate_url(
expires_in=0, query_auth=False
) |
def has_ncols(
state,
incorrect_msg="Your query returned a table with {{n_stu}} column{{ if n_stu > 1 else }} while it should return a table with {{n_sol}} column{{ if n_sol > 1 else }}.",
):
has_result(state)
n_stu = len(state.student_result)
n_sol = len(state.solution_result)
if n_stu != n_sol:
_msg = state.build_message(
incorrect_msg, fmt_kwargs={"n_stu": n_stu, "n_sol": n_sol}
)
state.do_test(_msg)
return state | Test whether the student and solution query results have equal numbers of columns.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the number of columns in the student and solution query don't match.
:Example:
Consider the following solution and SCT: ::
# solution
SELECT artist_id as id, name FROM artists
# sct
Ex().has_ncols()
# passing submission
SELECT artist_id as id, name FROM artists
# failing submission (too little columns)
SELECT artist_id as id FROM artists
# passing submission (two columns, even though not correct ones)
SELECT artist_id, label FROM artists | ### Input:
Test whether the student and solution query results have equal numbers of columns.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the number of columns in the student and solution query don't match.
:Example:
Consider the following solution and SCT: ::
# solution
SELECT artist_id as id, name FROM artists
# sct
Ex().has_ncols()
# passing submission
SELECT artist_id as id, name FROM artists
# failing submission (too little columns)
SELECT artist_id as id FROM artists
# passing submission (two columns, even though not correct ones)
SELECT artist_id, label FROM artists
### Response:
def has_ncols(
state,
incorrect_msg="Your query returned a table with {{n_stu}} column{{ if n_stu > 1 else }} while it should return a table with {{n_sol}} column{{ if n_sol > 1 else }}.",
):
has_result(state)
n_stu = len(state.student_result)
n_sol = len(state.solution_result)
if n_stu != n_sol:
_msg = state.build_message(
incorrect_msg, fmt_kwargs={"n_stu": n_stu, "n_sol": n_sol}
)
state.do_test(_msg)
return state |
def addresses_from_address_families(address_mapper, specs):
snapshot = yield Get(Snapshot, PathGlobs, _spec_to_globs(address_mapper, specs))
dirnames = {dirname(f) for f in snapshot.files}
address_families = yield [Get(AddressFamily, Dir(d)) for d in dirnames]
address_family_by_directory = {af.namespace: af for af in address_families}
matched_addresses = OrderedSet()
for spec in specs:
try:
addr_families_for_spec = spec.matching_address_families(address_family_by_directory)
except Spec.AddressFamilyResolutionError as e:
raise raise_from(ResolveError(e), e)
try:
all_addr_tgt_pairs = spec.address_target_pairs_from_address_families(addr_families_for_spec)
except Spec.AddressResolutionError as e:
raise raise_from(AddressLookupError(e), e)
except SingleAddress._SingleAddressResolutionError as e:
_raise_did_you_mean(e.single_address_family, e.name, source=e)
matched_addresses.update(
addr for (addr, tgt) in all_addr_tgt_pairs
if specs.matcher.matches_target_address_pair(addr, tgt)
)
yield BuildFileAddresses(tuple(matched_addresses)) | Given an AddressMapper and list of Specs, return matching BuildFileAddresses.
:raises: :class:`ResolveError` if:
- there were no matching AddressFamilies, or
- the Spec matches no addresses for SingleAddresses.
:raises: :class:`AddressLookupError` if no targets are matched for non-SingleAddress specs. | ### Input:
Given an AddressMapper and list of Specs, return matching BuildFileAddresses.
:raises: :class:`ResolveError` if:
- there were no matching AddressFamilies, or
- the Spec matches no addresses for SingleAddresses.
:raises: :class:`AddressLookupError` if no targets are matched for non-SingleAddress specs.
### Response:
def addresses_from_address_families(address_mapper, specs):
snapshot = yield Get(Snapshot, PathGlobs, _spec_to_globs(address_mapper, specs))
dirnames = {dirname(f) for f in snapshot.files}
address_families = yield [Get(AddressFamily, Dir(d)) for d in dirnames]
address_family_by_directory = {af.namespace: af for af in address_families}
matched_addresses = OrderedSet()
for spec in specs:
try:
addr_families_for_spec = spec.matching_address_families(address_family_by_directory)
except Spec.AddressFamilyResolutionError as e:
raise raise_from(ResolveError(e), e)
try:
all_addr_tgt_pairs = spec.address_target_pairs_from_address_families(addr_families_for_spec)
except Spec.AddressResolutionError as e:
raise raise_from(AddressLookupError(e), e)
except SingleAddress._SingleAddressResolutionError as e:
_raise_did_you_mean(e.single_address_family, e.name, source=e)
matched_addresses.update(
addr for (addr, tgt) in all_addr_tgt_pairs
if specs.matcher.matches_target_address_pair(addr, tgt)
)
yield BuildFileAddresses(tuple(matched_addresses)) |
def coerce_value(type, value):
if isinstance(type, GraphQLNonNull):
return coerce_value(type.of_type, value)
if value is None:
return None
if isinstance(type, GraphQLList):
item_type = type.of_type
if not isinstance(value, string_types) and isinstance(value, Iterable):
return [coerce_value(item_type, item) for item in value]
else:
return [coerce_value(item_type, value)]
if isinstance(type, GraphQLInputObjectType):
fields = type.fields
obj = {}
for field_name, field in fields.items():
if field_name not in value:
if field.default_value is not None:
field_value = field.default_value
obj[field.out_name or field_name] = field_value
else:
field_value = coerce_value(field.type, value.get(field_name))
obj[field.out_name or field_name] = field_value
return type.create_container(obj)
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), "Must be input type"
return type.parse_value(value) | Given a type and any value, return a runtime value coerced to match the type. | ### Input:
Given a type and any value, return a runtime value coerced to match the type.
### Response:
def coerce_value(type, value):
if isinstance(type, GraphQLNonNull):
return coerce_value(type.of_type, value)
if value is None:
return None
if isinstance(type, GraphQLList):
item_type = type.of_type
if not isinstance(value, string_types) and isinstance(value, Iterable):
return [coerce_value(item_type, item) for item in value]
else:
return [coerce_value(item_type, value)]
if isinstance(type, GraphQLInputObjectType):
fields = type.fields
obj = {}
for field_name, field in fields.items():
if field_name not in value:
if field.default_value is not None:
field_value = field.default_value
obj[field.out_name or field_name] = field_value
else:
field_value = coerce_value(field.type, value.get(field_name))
obj[field.out_name or field_name] = field_value
return type.create_container(obj)
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), "Must be input type"
return type.parse_value(value) |
def get_unweighted_upstream_leaves(graph: BELGraph, key: Optional[str] = None) -> Iterable[BaseEntity]:
if key is None:
key = WEIGHT
return filter_nodes(graph, [node_is_upstream_leaf, data_missing_key_builder(key)]) | Get nodes with no incoming edges, one outgoing edge, and without the given key in its data dictionary.
.. seealso :: :func:`data_does_not_contain_key_builder`
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:return: An iterable over leaves (nodes with an in-degree of 0) that don't have the given annotation | ### Input:
Get nodes with no incoming edges, one outgoing edge, and without the given key in its data dictionary.
.. seealso :: :func:`data_does_not_contain_key_builder`
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:return: An iterable over leaves (nodes with an in-degree of 0) that don't have the given annotation
### Response:
def get_unweighted_upstream_leaves(graph: BELGraph, key: Optional[str] = None) -> Iterable[BaseEntity]:
if key is None:
key = WEIGHT
return filter_nodes(graph, [node_is_upstream_leaf, data_missing_key_builder(key)]) |
def array(self, dimensions=None):
if dimensions is None:
dims = [d for d in self.kdims + self.vdims]
else:
dims = [self.get_dimension(d, strict=True) for d in dimensions]
columns, types = [], []
for dim in dims:
column = self.dimension_values(dim)
columns.append(column)
types.append(column.dtype.kind)
if len(set(types)) > 1:
columns = [c.astype() for c in columns]
return np.column_stack(columns) | Convert dimension values to columnar array.
Args:
dimensions: List of dimensions to return
Returns:
Array of columns corresponding to each dimension | ### Input:
Convert dimension values to columnar array.
Args:
dimensions: List of dimensions to return
Returns:
Array of columns corresponding to each dimension
### Response:
def array(self, dimensions=None):
if dimensions is None:
dims = [d for d in self.kdims + self.vdims]
else:
dims = [self.get_dimension(d, strict=True) for d in dimensions]
columns, types = [], []
for dim in dims:
column = self.dimension_values(dim)
columns.append(column)
types.append(column.dtype.kind)
if len(set(types)) > 1:
columns = [c.astype() for c in columns]
return np.column_stack(columns) |
def _all_queue_names(self):
queues = set()
endpoints = self.config.get()
for e in endpoints:
for q in endpoints[e][]:
queues.add(q)
return sorted(queues) | Return a list of all unique queue names in our config.
:return: list of all queue names (str)
:rtype: :std:term:`list` | ### Input:
Return a list of all unique queue names in our config.
:return: list of all queue names (str)
:rtype: :std:term:`list`
### Response:
def _all_queue_names(self):
queues = set()
endpoints = self.config.get()
for e in endpoints:
for q in endpoints[e][]:
queues.add(q)
return sorted(queues) |
def delta_e_cie2000(lab_color_vector, lab_color_matrix, Kl=1, Kc=1, Kh=1):
L, a, b = lab_color_vector
avg_Lp = (L + lab_color_matrix[:, 0]) / 2.0
C1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
avg_C1_C2 = (C1 + C2) / 2.0
G = 0.5 * (1 - numpy.sqrt(numpy.power(avg_C1_C2, 7.0) / (numpy.power(avg_C1_C2, 7.0) + numpy.power(25.0, 7.0))))
a1p = (1.0 + G) * a
a2p = (1.0 + G) * lab_color_matrix[:, 1]
C1p = numpy.sqrt(numpy.power(a1p, 2) + numpy.power(b, 2))
C2p = numpy.sqrt(numpy.power(a2p, 2) + numpy.power(lab_color_matrix[:, 2], 2))
avg_C1p_C2p = (C1p + C2p) / 2.0
h1p = numpy.degrees(numpy.arctan2(b, a1p))
h1p += (h1p < 0) * 360
h2p = numpy.degrees(numpy.arctan2(lab_color_matrix[:, 2], a2p))
h2p += (h2p < 0) * 360
avg_Hp = (((numpy.fabs(h1p - h2p) > 180) * 360) + h1p + h2p) / 2.0
T = 1 - 0.17 * numpy.cos(numpy.radians(avg_Hp - 30)) + \
0.24 * numpy.cos(numpy.radians(2 * avg_Hp)) + \
0.32 * numpy.cos(numpy.radians(3 * avg_Hp + 6)) - \
0.2 * numpy.cos(numpy.radians(4 * avg_Hp - 63))
diff_h2p_h1p = h2p - h1p
delta_hp = diff_h2p_h1p + (numpy.fabs(diff_h2p_h1p) > 180) * 360
delta_hp -= (h2p > h1p) * 720
delta_Lp = lab_color_matrix[:, 0] - L
delta_Cp = C2p - C1p
delta_Hp = 2 * numpy.sqrt(C2p * C1p) * numpy.sin(numpy.radians(delta_hp) / 2.0)
S_L = 1 + ((0.015 * numpy.power(avg_Lp - 50, 2)) / numpy.sqrt(20 + numpy.power(avg_Lp - 50, 2.0)))
S_C = 1 + 0.045 * avg_C1p_C2p
S_H = 1 + 0.015 * avg_C1p_C2p * T
delta_ro = 30 * numpy.exp(-(numpy.power(((avg_Hp - 275) / 25), 2.0)))
R_C = numpy.sqrt((numpy.power(avg_C1p_C2p, 7.0)) / (numpy.power(avg_C1p_C2p, 7.0) + numpy.power(25.0, 7.0)))
R_T = -2 * R_C * numpy.sin(2 * numpy.radians(delta_ro))
return numpy.sqrt(
numpy.power(delta_Lp / (S_L * Kl), 2) +
numpy.power(delta_Cp / (S_C * Kc), 2) +
numpy.power(delta_Hp / (S_H * Kh), 2) +
R_T * (delta_Cp / (S_C * Kc)) * (delta_Hp / (S_H * Kh))) | Calculates the Delta E (CIE2000) of two colors. | ### Input:
Calculates the Delta E (CIE2000) of two colors.
### Response:
def delta_e_cie2000(lab_color_vector, lab_color_matrix, Kl=1, Kc=1, Kh=1):
L, a, b = lab_color_vector
avg_Lp = (L + lab_color_matrix[:, 0]) / 2.0
C1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
avg_C1_C2 = (C1 + C2) / 2.0
G = 0.5 * (1 - numpy.sqrt(numpy.power(avg_C1_C2, 7.0) / (numpy.power(avg_C1_C2, 7.0) + numpy.power(25.0, 7.0))))
a1p = (1.0 + G) * a
a2p = (1.0 + G) * lab_color_matrix[:, 1]
C1p = numpy.sqrt(numpy.power(a1p, 2) + numpy.power(b, 2))
C2p = numpy.sqrt(numpy.power(a2p, 2) + numpy.power(lab_color_matrix[:, 2], 2))
avg_C1p_C2p = (C1p + C2p) / 2.0
h1p = numpy.degrees(numpy.arctan2(b, a1p))
h1p += (h1p < 0) * 360
h2p = numpy.degrees(numpy.arctan2(lab_color_matrix[:, 2], a2p))
h2p += (h2p < 0) * 360
avg_Hp = (((numpy.fabs(h1p - h2p) > 180) * 360) + h1p + h2p) / 2.0
T = 1 - 0.17 * numpy.cos(numpy.radians(avg_Hp - 30)) + \
0.24 * numpy.cos(numpy.radians(2 * avg_Hp)) + \
0.32 * numpy.cos(numpy.radians(3 * avg_Hp + 6)) - \
0.2 * numpy.cos(numpy.radians(4 * avg_Hp - 63))
diff_h2p_h1p = h2p - h1p
delta_hp = diff_h2p_h1p + (numpy.fabs(diff_h2p_h1p) > 180) * 360
delta_hp -= (h2p > h1p) * 720
delta_Lp = lab_color_matrix[:, 0] - L
delta_Cp = C2p - C1p
delta_Hp = 2 * numpy.sqrt(C2p * C1p) * numpy.sin(numpy.radians(delta_hp) / 2.0)
S_L = 1 + ((0.015 * numpy.power(avg_Lp - 50, 2)) / numpy.sqrt(20 + numpy.power(avg_Lp - 50, 2.0)))
S_C = 1 + 0.045 * avg_C1p_C2p
S_H = 1 + 0.015 * avg_C1p_C2p * T
delta_ro = 30 * numpy.exp(-(numpy.power(((avg_Hp - 275) / 25), 2.0)))
R_C = numpy.sqrt((numpy.power(avg_C1p_C2p, 7.0)) / (numpy.power(avg_C1p_C2p, 7.0) + numpy.power(25.0, 7.0)))
R_T = -2 * R_C * numpy.sin(2 * numpy.radians(delta_ro))
return numpy.sqrt(
numpy.power(delta_Lp / (S_L * Kl), 2) +
numpy.power(delta_Cp / (S_C * Kc), 2) +
numpy.power(delta_Hp / (S_H * Kh), 2) +
R_T * (delta_Cp / (S_C * Kc)) * (delta_Hp / (S_H * Kh))) |
def detectTierRichCss(self):
if not self.detectMobileQuick():
return False
if self.detectTierIphone() \
or self.detectKindle():
return False
return self.detectWebkit() \
or self.detectS60OssBrowser() \
or self.detectBlackBerryHigh() \
or self.detectWindowsMobile() \
or UAgentInfo.engineTelecaQ in self.__userAgent | Return detection of any device in the 'Rich CSS' Tier
The quick way to detect for a tier of devices.
This method detects for devices which are likely to be capable
of viewing CSS content optimized for the iPhone,
but may not necessarily support JavaScript.
Excludes all iPhone Tier devices. | ### Input:
Return detection of any device in the 'Rich CSS' Tier
The quick way to detect for a tier of devices.
This method detects for devices which are likely to be capable
of viewing CSS content optimized for the iPhone,
but may not necessarily support JavaScript.
Excludes all iPhone Tier devices.
### Response:
def detectTierRichCss(self):
if not self.detectMobileQuick():
return False
if self.detectTierIphone() \
or self.detectKindle():
return False
return self.detectWebkit() \
or self.detectS60OssBrowser() \
or self.detectBlackBerryHigh() \
or self.detectWindowsMobile() \
or UAgentInfo.engineTelecaQ in self.__userAgent |
def encode(self, encoding=None, errors=):
return ColorBytes(super(ColorStr, self).encode(encoding, errors), original_class=self.__class__) | Encode using the codec registered for encoding. encoding defaults to the default encoding.
errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors
raise a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well as any
other name registered with codecs.register_error that is able to handle UnicodeEncodeErrors.
:param str encoding: Codec.
:param str errors: Error handling scheme. | ### Input:
Encode using the codec registered for encoding. encoding defaults to the default encoding.
errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors
raise a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well as any
other name registered with codecs.register_error that is able to handle UnicodeEncodeErrors.
:param str encoding: Codec.
:param str errors: Error handling scheme.
### Response:
def encode(self, encoding=None, errors=):
return ColorBytes(super(ColorStr, self).encode(encoding, errors), original_class=self.__class__) |
def order_by(self, field_path, direction=ASCENDING):
field_path_module.split_field_path(field_path)
order_pb = self._make_order(field_path, direction)
new_orders = self._orders + (order_pb,)
return self.__class__(
self._parent,
projection=self._projection,
field_filters=self._field_filters,
orders=new_orders,
limit=self._limit,
offset=self._offset,
start_at=self._start_at,
end_at=self._end_at,
) | Modify the query to add an order clause on a specific field.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
Successive :meth:`~.firestore_v1beta1.query.Query.order_by` calls
will further refine the ordering of results returned by the query
(i.e. the new "order by" fields will be added to existing ones).
Args:
field_path (str): A field path (``.``-delimited list of
field names) on which to order the query results.
direction (Optional[str]): The direction to order by. Must be one
of :attr:`ASCENDING` or :attr:`DESCENDING`, defaults to
:attr:`ASCENDING`.
Returns:
~.firestore_v1beta1.query.Query: An ordered query. Acts as a
copy of the current query, modified with the newly added
"order by" constraint.
Raises:
ValueError: If ``field_path`` is invalid.
ValueError: If ``direction`` is not one of :attr:`ASCENDING` or
:attr:`DESCENDING`. | ### Input:
Modify the query to add an order clause on a specific field.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
Successive :meth:`~.firestore_v1beta1.query.Query.order_by` calls
will further refine the ordering of results returned by the query
(i.e. the new "order by" fields will be added to existing ones).
Args:
field_path (str): A field path (``.``-delimited list of
field names) on which to order the query results.
direction (Optional[str]): The direction to order by. Must be one
of :attr:`ASCENDING` or :attr:`DESCENDING`, defaults to
:attr:`ASCENDING`.
Returns:
~.firestore_v1beta1.query.Query: An ordered query. Acts as a
copy of the current query, modified with the newly added
"order by" constraint.
Raises:
ValueError: If ``field_path`` is invalid.
ValueError: If ``direction`` is not one of :attr:`ASCENDING` or
:attr:`DESCENDING`.
### Response:
def order_by(self, field_path, direction=ASCENDING):
field_path_module.split_field_path(field_path)
order_pb = self._make_order(field_path, direction)
new_orders = self._orders + (order_pb,)
return self.__class__(
self._parent,
projection=self._projection,
field_filters=self._field_filters,
orders=new_orders,
limit=self._limit,
offset=self._offset,
start_at=self._start_at,
end_at=self._end_at,
) |
def convert_accession_to_taxid(self, accessionid):
server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?" + self.options + "&db=nuccore&id={0}&retmode=xml".format(
accessionid)
r = requests.get(server, headers={"Content-Type": "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
response = r.text
records = xmltodict.parse(response)
try:
for i in records[][][][][][]:
for key, value in i.items():
if value == :
taxid = i[]
taxid = taxid.split()[1]
return taxid
except:
for i in records[][][][][0][][]:
for key, value in i.items():
if value == :
taxid = i[]
taxid = taxid.split()[1]
return taxid
return | Convert Accession Id to Tax Id | ### Input:
Convert Accession Id to Tax Id
### Response:
def convert_accession_to_taxid(self, accessionid):
server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?" + self.options + "&db=nuccore&id={0}&retmode=xml".format(
accessionid)
r = requests.get(server, headers={"Content-Type": "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
response = r.text
records = xmltodict.parse(response)
try:
for i in records[][][][][][]:
for key, value in i.items():
if value == :
taxid = i[]
taxid = taxid.split()[1]
return taxid
except:
for i in records[][][][][0][][]:
for key, value in i.items():
if value == :
taxid = i[]
taxid = taxid.split()[1]
return taxid
return |
def add_mutations_to_nglview(self, view, alignment_type=, alignment_ids=None,
seqprop=None, structprop=None, chain_id=None, use_representatives=False,
grouped=False, color=, unique_colors=True,
opacity_range=(0.8,1), scale_range=(1,5)):
if use_representatives:
if seqprop and structprop and chain_id:
raise ValueError(
)
else:
if not seqprop or not structprop or not chain_id:
raise ValueError(
)
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
log.debug(.format(seqprop.id, structprop.id, chain_id))
single, fingerprint = self.sequence_mutation_summary(alignment_type=alignment_type, alignment_ids=alignment_ids)
if not grouped:
single_lens = {k: len(v) for k, v in single.items()}
single_map_to_structure = {}
for k, v in single_lens.items():
resnum = int(k[1])
resnum_to_structure = self.map_seqprop_resnums_to_structprop_resnums(resnums=resnum,
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
if resnum not in resnum_to_structure:
log.warning(.format(resnum, structprop.id))
continue
new_key = resnum_to_structure[resnum]
single_map_to_structure[new_key] = v
structprop.add_scaled_residues_highlight_to_nglview(view=view,
structure_resnums=single_map_to_structure,
chain=chain_id,
color=color,
unique_colors=unique_colors,
opacity_range=opacity_range,
scale_range=scale_range)
else:
log.warning()
fingerprint_lens = {k: len(v) for k, v in fingerprint.items()}
fingerprint_map_to_structure = {}
for k, v in fingerprint_lens.items():
k_list = [int(x[1]) for x in k]
resnums_to_structure = self.map_seqprop_resnums_to_structprop_resnums(resnums=k_list,
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
new_key = tuple(y for y in resnums_to_structure.values())
fingerprint_map_to_structure[new_key] = v
structprop.add_scaled_residues_highlight_to_nglview(view=view,
structure_resnums=fingerprint_map_to_structure,
chain=chain_id,
color=color,
unique_colors=unique_colors,
opacity_range=opacity_range,
scale_range=scale_range) | Add representations to an NGLWidget view object for residues that are mutated in the
``sequence_alignments`` attribute.
Args:
view (NGLWidget): NGLWidget view object
alignment_type (str): Specified alignment type contained in the ``annotation`` field of an alignment object,
``seqalign`` or ``structalign`` are the current types.
alignment_ids (str, list): Specified alignment ID or IDs to use
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): ID of the structure's chain to get annotation from
use_representatives (bool): If the representative sequence/structure/chain IDs should be used
grouped (bool): If groups of mutations should be colored and sized together
color (str): Color of the mutations (overridden if unique_colors=True)
unique_colors (bool): If each mutation/mutation group should be colored uniquely
opacity_range (tuple): Min/max opacity values (mutations that show up more will be opaque)
scale_range (tuple): Min/max size values (mutations that show up more will be bigger) | ### Input:
Add representations to an NGLWidget view object for residues that are mutated in the
``sequence_alignments`` attribute.
Args:
view (NGLWidget): NGLWidget view object
alignment_type (str): Specified alignment type contained in the ``annotation`` field of an alignment object,
``seqalign`` or ``structalign`` are the current types.
alignment_ids (str, list): Specified alignment ID or IDs to use
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): ID of the structure's chain to get annotation from
use_representatives (bool): If the representative sequence/structure/chain IDs should be used
grouped (bool): If groups of mutations should be colored and sized together
color (str): Color of the mutations (overridden if unique_colors=True)
unique_colors (bool): If each mutation/mutation group should be colored uniquely
opacity_range (tuple): Min/max opacity values (mutations that show up more will be opaque)
scale_range (tuple): Min/max size values (mutations that show up more will be bigger)
### Response:
def add_mutations_to_nglview(self, view, alignment_type=, alignment_ids=None,
seqprop=None, structprop=None, chain_id=None, use_representatives=False,
grouped=False, color=, unique_colors=True,
opacity_range=(0.8,1), scale_range=(1,5)):
if use_representatives:
if seqprop and structprop and chain_id:
raise ValueError(
)
else:
if not seqprop or not structprop or not chain_id:
raise ValueError(
)
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
log.debug(.format(seqprop.id, structprop.id, chain_id))
single, fingerprint = self.sequence_mutation_summary(alignment_type=alignment_type, alignment_ids=alignment_ids)
if not grouped:
single_lens = {k: len(v) for k, v in single.items()}
single_map_to_structure = {}
for k, v in single_lens.items():
resnum = int(k[1])
resnum_to_structure = self.map_seqprop_resnums_to_structprop_resnums(resnums=resnum,
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
if resnum not in resnum_to_structure:
log.warning(.format(resnum, structprop.id))
continue
new_key = resnum_to_structure[resnum]
single_map_to_structure[new_key] = v
structprop.add_scaled_residues_highlight_to_nglview(view=view,
structure_resnums=single_map_to_structure,
chain=chain_id,
color=color,
unique_colors=unique_colors,
opacity_range=opacity_range,
scale_range=scale_range)
else:
log.warning()
fingerprint_lens = {k: len(v) for k, v in fingerprint.items()}
fingerprint_map_to_structure = {}
for k, v in fingerprint_lens.items():
k_list = [int(x[1]) for x in k]
resnums_to_structure = self.map_seqprop_resnums_to_structprop_resnums(resnums=k_list,
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
new_key = tuple(y for y in resnums_to_structure.values())
fingerprint_map_to_structure[new_key] = v
structprop.add_scaled_residues_highlight_to_nglview(view=view,
structure_resnums=fingerprint_map_to_structure,
chain=chain_id,
color=color,
unique_colors=unique_colors,
opacity_range=opacity_range,
scale_range=scale_range) |
def update(self, friendly_name=values.unset, identity=values.unset,
deployment_sid=values.unset, enabled=values.unset):
data = values.of({
: friendly_name,
: identity,
: deployment_sid,
: enabled,
})
payload = self._version.update(
,
self._uri,
data=data,
)
return DeviceInstance(
self._version,
payload,
fleet_sid=self._solution[],
sid=self._solution[],
) | Update the DeviceInstance
:param unicode friendly_name: A human readable description for this Device.
:param unicode identity: An identifier of the Device user.
:param unicode deployment_sid: The unique SID of the Deployment group.
:param bool enabled: The enabled
:returns: Updated DeviceInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance | ### Input:
Update the DeviceInstance
:param unicode friendly_name: A human readable description for this Device.
:param unicode identity: An identifier of the Device user.
:param unicode deployment_sid: The unique SID of the Deployment group.
:param bool enabled: The enabled
:returns: Updated DeviceInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance
### Response:
def update(self, friendly_name=values.unset, identity=values.unset,
deployment_sid=values.unset, enabled=values.unset):
data = values.of({
: friendly_name,
: identity,
: deployment_sid,
: enabled,
})
payload = self._version.update(
,
self._uri,
data=data,
)
return DeviceInstance(
self._version,
payload,
fleet_sid=self._solution[],
sid=self._solution[],
) |
def spin1_polar(self):
return coordinates.cartesian_to_spherical_polar(
self.spin1x, self.spin1y, self.spin1z) | Returns the polar spin angle of mass 1. | ### Input:
Returns the polar spin angle of mass 1.
### Response:
def spin1_polar(self):
return coordinates.cartesian_to_spherical_polar(
self.spin1x, self.spin1y, self.spin1z) |
def run(self, *coros: CoroWrapper):
if not self.running:
raise RuntimeError("not running!")
async def wrapper():
results = []
for coro in coros:
try:
if inspect.isawaitable(coro):
results.append(await coro)
elif inspect.isfunction(coro):
res = coro()
if inspect.isawaitable(res):
results.append(await res)
else:
results.append(res)
else:
raise RuntimeError(
"don't know how to run {}".format(coro))
except Exception as ex:
logger.error("Error while running coroutine {}: {}".format(coro.__name__, ex.__repr__()))
raise ex
if len(results) == 1:
return results[0]
return results
if coros:
what = wrapper()
else:
what = self.runFut
return self.loop.run_until_complete(what) | Runs an arbitrary list of coroutines in order and then quits the loop,
if not running as a context manager. | ### Input:
Runs an arbitrary list of coroutines in order and then quits the loop,
if not running as a context manager.
### Response:
def run(self, *coros: CoroWrapper):
if not self.running:
raise RuntimeError("not running!")
async def wrapper():
results = []
for coro in coros:
try:
if inspect.isawaitable(coro):
results.append(await coro)
elif inspect.isfunction(coro):
res = coro()
if inspect.isawaitable(res):
results.append(await res)
else:
results.append(res)
else:
raise RuntimeError(
"don't know how to run {}".format(coro))
except Exception as ex:
logger.error("Error while running coroutine {}: {}".format(coro.__name__, ex.__repr__()))
raise ex
if len(results) == 1:
return results[0]
return results
if coros:
what = wrapper()
else:
what = self.runFut
return self.loop.run_until_complete(what) |
def delete(self, wait_for_deletion=True):
if self.exists():
try:
self._api.objects_delete(self._bucket, self._key)
except Exception as e:
raise e
if wait_for_deletion:
for _ in range(_MAX_POLL_ATTEMPTS):
objects = Objects(self._bucket, prefix=self.key, delimiter=,
context=self._context)
if any(o.key == self.key for o in objects):
time.sleep(_POLLING_SLEEP)
continue
break
else:
logging.error(,
_MAX_POLL_ATTEMPTS) | Deletes this object from its bucket.
Args:
wait_for_deletion: If True, we poll until this object no longer appears in
objects.list operations for this bucket before returning.
Raises:
Exception if there was an error deleting the object. | ### Input:
Deletes this object from its bucket.
Args:
wait_for_deletion: If True, we poll until this object no longer appears in
objects.list operations for this bucket before returning.
Raises:
Exception if there was an error deleting the object.
### Response:
def delete(self, wait_for_deletion=True):
if self.exists():
try:
self._api.objects_delete(self._bucket, self._key)
except Exception as e:
raise e
if wait_for_deletion:
for _ in range(_MAX_POLL_ATTEMPTS):
objects = Objects(self._bucket, prefix=self.key, delimiter=,
context=self._context)
if any(o.key == self.key for o in objects):
time.sleep(_POLLING_SLEEP)
continue
break
else:
logging.error(,
_MAX_POLL_ATTEMPTS) |
def run_checked (self):
self.start_time = time.time()
self.setName("Status")
wait_seconds = 1
first_wait = True
while not self.stopped(wait_seconds):
self.log_status()
if first_wait:
wait_seconds = self.wait_seconds
first_wait = False | Print periodic status messages. | ### Input:
Print periodic status messages.
### Response:
def run_checked (self):
self.start_time = time.time()
self.setName("Status")
wait_seconds = 1
first_wait = True
while not self.stopped(wait_seconds):
self.log_status()
if first_wait:
wait_seconds = self.wait_seconds
first_wait = False |
def hquad(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH, etaV, zetaH,
zetaV, xdirect, quadargs, use_ne_eval, msrc, mrec):
r
rtol, atol, limit, a, b, pts_per_dec = quadargs
la = np.log10(a)
lb = np.log10(b)
ilambd = np.logspace(la, lb, (lb-la)*pts_per_dec + 1)
PJ0, PJ1, PJ0b = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH,
etaV, zetaH, zetaV,
np.atleast_2d(ilambd), ab, xdirect,
msrc, mrec, use_ne_eval)
if PJ0 is not None:
sPJ0r = iuSpline(np.log(ilambd), PJ0.real)
sPJ0i = iuSpline(np.log(ilambd), PJ0.imag)
else:
sPJ0r = None
sPJ0i = None
if PJ1 is not None:
sPJ1r = iuSpline(np.log(ilambd), PJ1.real)
sPJ1i = iuSpline(np.log(ilambd), PJ1.imag)
else:
sPJ1r = None
sPJ1i = None
if PJ0b is not None:
sPJ0br = iuSpline(np.log(ilambd), PJ0b.real)
sPJ0bi = iuSpline(np.log(ilambd), PJ0b.imag)
else:
sPJ0br = None
sPJ0bi = None
fEM = np.zeros(off.size, dtype=complex)
conv = True
iinp = {: a, : b, : atol, : rtol, : limit}
for i in range(off.size):
fEM[i], tc = quad(sPJ0r, sPJ0i, sPJ1r, sPJ1i, sPJ0br, sPJ0bi, ab,
off[i], factAng[i], iinp)
conv *= tc
return fEM, 1, conv | r"""Hankel Transform using the ``QUADPACK`` library.
This routine uses the ``scipy.integrate.quad`` module, which in turn makes
use of the Fortran library ``QUADPACK`` (``qagse``).
It is massively (orders of magnitudes) slower than either ``fht`` or
``hqwe``, and is mainly here for completeness and comparison purposes. It
always uses interpolation in the wavenumber domain, hence it generally will
not be as precise as the other methods. However, it might work in some
areas where the others fail.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
fEM : array
Returns frequency-domain EM response.
kcount : int
Kernel count. For HQUAD, this is 1.
conv : bool
If true, QUAD converged. If not, <htarg> might have to be adjusted. | ### Input:
r"""Hankel Transform using the ``QUADPACK`` library.
This routine uses the ``scipy.integrate.quad`` module, which in turn makes
use of the Fortran library ``QUADPACK`` (``qagse``).
It is massively (orders of magnitudes) slower than either ``fht`` or
``hqwe``, and is mainly here for completeness and comparison purposes. It
always uses interpolation in the wavenumber domain, hence it generally will
not be as precise as the other methods. However, it might work in some
areas where the others fail.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
fEM : array
Returns frequency-domain EM response.
kcount : int
Kernel count. For HQUAD, this is 1.
conv : bool
If true, QUAD converged. If not, <htarg> might have to be adjusted.
### Response:
def hquad(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH, etaV, zetaH,
zetaV, xdirect, quadargs, use_ne_eval, msrc, mrec):
r
rtol, atol, limit, a, b, pts_per_dec = quadargs
la = np.log10(a)
lb = np.log10(b)
ilambd = np.logspace(la, lb, (lb-la)*pts_per_dec + 1)
PJ0, PJ1, PJ0b = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH,
etaV, zetaH, zetaV,
np.atleast_2d(ilambd), ab, xdirect,
msrc, mrec, use_ne_eval)
if PJ0 is not None:
sPJ0r = iuSpline(np.log(ilambd), PJ0.real)
sPJ0i = iuSpline(np.log(ilambd), PJ0.imag)
else:
sPJ0r = None
sPJ0i = None
if PJ1 is not None:
sPJ1r = iuSpline(np.log(ilambd), PJ1.real)
sPJ1i = iuSpline(np.log(ilambd), PJ1.imag)
else:
sPJ1r = None
sPJ1i = None
if PJ0b is not None:
sPJ0br = iuSpline(np.log(ilambd), PJ0b.real)
sPJ0bi = iuSpline(np.log(ilambd), PJ0b.imag)
else:
sPJ0br = None
sPJ0bi = None
fEM = np.zeros(off.size, dtype=complex)
conv = True
iinp = {: a, : b, : atol, : rtol, : limit}
for i in range(off.size):
fEM[i], tc = quad(sPJ0r, sPJ0i, sPJ1r, sPJ1i, sPJ0br, sPJ0bi, ab,
off[i], factAng[i], iinp)
conv *= tc
return fEM, 1, conv |
def _Tension(T):
if 248.15 <= T <= Tc:
Tr = T/Tc
return 1e-3*(235.8*(1-Tr)**1.256*(1-0.625*(1-Tr)))
else:
raise NotImplementedError("Incoming out of bound") | Equation for the surface tension
Parameters
----------
T : float
Temperature, [K]
Returns
-------
σ : float
Surface tension, [N/m]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 248.15 ≤ T ≤ 647
* Estrapolate to -25ºC in supercooled liquid metastable state
Examples
--------
>>> _Tension(300)
0.0716859625
>>> _Tension(450)
0.0428914992
References
----------
IAPWS, Revised Release on Surface Tension of Ordinary Water Substance
June 2014, http://www.iapws.org/relguide/Surf-H2O.html | ### Input:
Equation for the surface tension
Parameters
----------
T : float
Temperature, [K]
Returns
-------
σ : float
Surface tension, [N/m]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 248.15 ≤ T ≤ 647
* Estrapolate to -25ºC in supercooled liquid metastable state
Examples
--------
>>> _Tension(300)
0.0716859625
>>> _Tension(450)
0.0428914992
References
----------
IAPWS, Revised Release on Surface Tension of Ordinary Water Substance
June 2014, http://www.iapws.org/relguide/Surf-H2O.html
### Response:
def _Tension(T):
if 248.15 <= T <= Tc:
Tr = T/Tc
return 1e-3*(235.8*(1-Tr)**1.256*(1-0.625*(1-Tr)))
else:
raise NotImplementedError("Incoming out of bound") |
def getback(config, force=False):
repo = config.repo
active_branch = repo.active_branch
if active_branch.name == "master":
error_out("YouRepo is "dirty". ({})t know how to do this "natively" with GitPython.
merged_branches = [
x.strip()
for x in repo.git.branch("--merged").splitlines()
if x.strip() and not x.strip().startswith("*")
]
was_merged = branch_name in merged_branches
certain = was_merged or force
if not certain:
certain = (
input("Are you certain {} is actually merged? [Y/n] ".format(branch_name))
.lower()
.strip()
!= "n"
)
if not certain:
return 1
if was_merged:
repo.git.branch("-d", branch_name)
else:
repo.git.branch("-D", branch_name)
fork_remote = None
for remote in repo.remotes:
if remote.name == state.get("FORK_NAME"):
fork_remote = remote
break
if fork_remote:
fork_remote.push(":" + branch_name)
info_out("Remote branch on fork deleted too.") | Goes back to the master branch, deletes the current branch locally
and remotely. | ### Input:
Goes back to the master branch, deletes the current branch locally
and remotely.
### Response:
def getback(config, force=False):
repo = config.repo
active_branch = repo.active_branch
if active_branch.name == "master":
error_out("YouRepo is "dirty". ({})t know how to do this "natively" with GitPython.
merged_branches = [
x.strip()
for x in repo.git.branch("--merged").splitlines()
if x.strip() and not x.strip().startswith("*")
]
was_merged = branch_name in merged_branches
certain = was_merged or force
if not certain:
certain = (
input("Are you certain {} is actually merged? [Y/n] ".format(branch_name))
.lower()
.strip()
!= "n"
)
if not certain:
return 1
if was_merged:
repo.git.branch("-d", branch_name)
else:
repo.git.branch("-D", branch_name)
fork_remote = None
for remote in repo.remotes:
if remote.name == state.get("FORK_NAME"):
fork_remote = remote
break
if fork_remote:
fork_remote.push(":" + branch_name)
info_out("Remote branch on fork deleted too.") |
def get_request_kwargs(self):
kwargs = dict(stream=True, timeout=self.aggregate.config["timeout"])
if self.proxy:
kwargs["proxies"] = {self.proxytype: self.proxy}
if self.scheme == u"https" and self.aggregate.config["sslverify"]:
kwargs[] = self.aggregate.config["sslverify"]
else:
kwargs[] = False
return kwargs | Construct keyword parameters for Session.request() and
Session.resolve_redirects(). | ### Input:
Construct keyword parameters for Session.request() and
Session.resolve_redirects().
### Response:
def get_request_kwargs(self):
kwargs = dict(stream=True, timeout=self.aggregate.config["timeout"])
if self.proxy:
kwargs["proxies"] = {self.proxytype: self.proxy}
if self.scheme == u"https" and self.aggregate.config["sslverify"]:
kwargs[] = self.aggregate.config["sslverify"]
else:
kwargs[] = False
return kwargs |
def was_modified_since(header=None, mtime=0, size=0):
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False | Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about. | ### Input:
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
### Response:
def was_modified_since(header=None, mtime=0, size=0):
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False |
def add_parameters(self,template_file,in_file=None,pst_path=None):
assert os.path.exists(template_file),"template file not found".format(template_file)
assert template_file != in_file
parnme = pst_utils.parse_tpl_file(template_file)
new_parnme = [p for p in parnme if p not in self.parameter_data.parnme]
if len(new_parnme) == 0:
warnings.warn("no new parameters found in template file {0}".format(template_file),PyemuWarning)
new_par_data = None
else:
new_par_data = pst_utils.populate_dataframe(new_parnme,pst_utils.pst_config["par_fieldnames"],
pst_utils.pst_config["par_defaults"],
pst_utils.pst_config["par_dtype"])
new_par_data.loc[new_parnme,"parnme"] = new_parnme
self.parameter_data = self.parameter_data.append(new_par_data)
if in_file is None:
in_file = template_file.replace(".tpl",)
if pst_path is not None:
template_file = os.path.join(pst_path,os.path.split(template_file)[-1])
in_file = os.path.join(pst_path, os.path.split(in_file)[-1])
self.template_files.append(template_file)
self.input_files.append(in_file)
return new_par_data | add new parameters to a control file
Parameters
----------
template_file : str
template file
in_file : str(optional)
model input file. If None, template_file.replace('.tpl','') is used
pst_path : str(optional)
the path to append to the template_file and in_file in the control file. If
not None, then any existing path in front of the template or in file is split off
and pst_path is prepended. Default is None
Returns
-------
new_par_data : pandas.DataFrame
the data for the new parameters that were added. If no new parameters are in the
new template file, returns None
Note
----
populates the new parameter information with default values | ### Input:
add new parameters to a control file
Parameters
----------
template_file : str
template file
in_file : str(optional)
model input file. If None, template_file.replace('.tpl','') is used
pst_path : str(optional)
the path to append to the template_file and in_file in the control file. If
not None, then any existing path in front of the template or in file is split off
and pst_path is prepended. Default is None
Returns
-------
new_par_data : pandas.DataFrame
the data for the new parameters that were added. If no new parameters are in the
new template file, returns None
Note
----
populates the new parameter information with default values
### Response:
def add_parameters(self,template_file,in_file=None,pst_path=None):
assert os.path.exists(template_file),"template file not found".format(template_file)
assert template_file != in_file
parnme = pst_utils.parse_tpl_file(template_file)
new_parnme = [p for p in parnme if p not in self.parameter_data.parnme]
if len(new_parnme) == 0:
warnings.warn("no new parameters found in template file {0}".format(template_file),PyemuWarning)
new_par_data = None
else:
new_par_data = pst_utils.populate_dataframe(new_parnme,pst_utils.pst_config["par_fieldnames"],
pst_utils.pst_config["par_defaults"],
pst_utils.pst_config["par_dtype"])
new_par_data.loc[new_parnme,"parnme"] = new_parnme
self.parameter_data = self.parameter_data.append(new_par_data)
if in_file is None:
in_file = template_file.replace(".tpl",)
if pst_path is not None:
template_file = os.path.join(pst_path,os.path.split(template_file)[-1])
in_file = os.path.join(pst_path, os.path.split(in_file)[-1])
self.template_files.append(template_file)
self.input_files.append(in_file)
return new_par_data |
def is_unlocked(self):
if self.is_public:
return True
if not self.is_protected:
return True
return self._key.unlocked | ``False`` if this is a private key that is protected with a passphrase and has not yet been unlocked, otherwise ``True`` | ### Input:
``False`` if this is a private key that is protected with a passphrase and has not yet been unlocked, otherwise ``True``
### Response:
def is_unlocked(self):
if self.is_public:
return True
if not self.is_protected:
return True
return self._key.unlocked |
def stop_broker(host, connection, stop_command, verbose):
_, stdout, stderr = connection.sudo_command(stop_command)
if verbose:
report_stdout(host, stdout)
report_stderr(host, stderr) | Execute the stop | ### Input:
Execute the stop
### Response:
def stop_broker(host, connection, stop_command, verbose):
_, stdout, stderr = connection.sudo_command(stop_command)
if verbose:
report_stdout(host, stdout)
report_stderr(host, stderr) |
def submit_mult_calcs(calc_suite_specs, exec_options=None):
if exec_options is None:
exec_options = dict()
if exec_options.pop(, False):
print(_print_suite_summary(calc_suite_specs))
_user_verify()
calc_suite = CalcSuite(calc_suite_specs)
calcs = calc_suite.create_calcs()
if not calcs:
raise AospyException(
"The specified combination of parameters yielded zero "
"calculations. Most likely, one of the parameters is "
"inadvertently empty."
)
return _exec_calcs(calcs, **exec_options) | Generate and execute all specified computations.
Once the calculations are prepped and submitted for execution, any
calculation that triggers any exception or error is skipped, and the rest
of the calculations proceed unaffected. This prevents an error in a single
calculation from crashing a large suite of calculations.
Parameters
----------
calc_suite_specs : dict
The specifications describing the full set of calculations to be
generated and potentially executed. Accepted keys and their values:
library : module or package comprising an aospy object library
The aospy object library for these calculations.
projects : list of aospy.Proj objects
The projects to permute over.
models : 'all', 'default', or list of aospy.Model objects
The models to permute over. If 'all', use all models in the
``models`` attribute of each ``Proj``. If 'default', use all
models in the ``default_models`` attribute of each ``Proj``.
runs : 'all', 'default', or list of aospy.Run objects
The runs to permute over. If 'all', use all runs in the
``runs`` attribute of each ``Model``. If 'default', use all
runs in the ``default_runs`` attribute of each ``Model``.
variables : list of aospy.Var objects
The variables to be calculated.
regions : 'all' or list of aospy.Region objects
The region(s) over which any regional reductions will be performed.
If 'all', use all regions in the ``regions`` attribute of each
``Proj``.
date_ranges : 'default' or a list of tuples
The range of dates (inclusive) over which to perform calculations.
If 'default', use the ``default_start_date`` and
``default_end_date`` attribute of each ``Run``. Else provide a
list of tuples, each containing a pair of start and end dates,
such as ``date_ranges=[(start, end)]`` where ``start`` and
``end`` are each ``datetime.datetime`` objects, partial
datetime strings (e.g. '0001'), ``np.datetime64`` objects, or
``cftime.datetime`` objects.
output_time_intervals : {'ann', season-string, month-integer}
The sub-annual time interval over which to aggregate.
- 'ann' : Annual mean
- season-string : E.g. 'JJA' for June-July-August
- month-integer : 1 for January, 2 for February, etc. Each one is
a separate reduction, e.g. [1, 2] would produce averages (or
other specified time reduction) over all Januaries, and
separately over all Februaries.
output_time_regional_reductions : list of reduction string identifiers
Unlike most other keys, these are not permuted over when creating
the :py:class:`aospy.Calc` objects that execute the calculations;
each :py:class:`aospy.Calc` performs all of the specified
reductions. Accepted string identifiers are:
- Gridpoint-by-gridpoint output:
- 'av' : Gridpoint-by-gridpoint time-average
- 'std' : Gridpoint-by-gridpoint temporal standard deviation
- 'ts' : Gridpoint-by-gridpoint time-series
- Averages over each region specified via `region`:
- 'reg.av', 'reg.std', 'reg.ts' : analogous to 'av', 'std', 'ts'
output_vertical_reductions : {None, 'vert_av', 'vert_int'}, optional
How to reduce the data vertically:
- None : no vertical reduction
- 'vert_av' : mass-weighted vertical average
- 'vert_int' : mass-weighted vertical integral
input_time_intervals : {'annual', 'monthly', 'daily', '#hr'}
A string specifying the time resolution of the input data. In
'#hr' above, the '#' stands for a number, e.g. 3hr or 6hr, for
sub-daily output. These are the suggested specifiers, but others
may be used if they are also used by the DataLoaders for the given
Runs.
input_time_datatypes : {'inst', 'ts', 'av'}
What the time axis of the input data represents:
- 'inst' : Timeseries of instantaneous values
- 'ts' : Timeseries of averages over the period of each time-index
- 'av' : A single value averaged over a date range
input_vertical_datatypes : {False, 'pressure', 'sigma'}, optional
The vertical coordinate system used by the input data:
- False : not defined vertically
- 'pressure' : pressure coordinates
- 'sigma' : hybrid sigma-pressure coordinates
input_time_offsets : {None, dict}, optional
How to offset input data in time to correct for metadata errors
- None : no time offset applied
- dict : e.g. ``{'hours': -3}`` to offset times by -3 hours
See :py:meth:`aospy.utils.times.apply_time_offset`.
exec_options : dict or None (default None)
Options regarding how the calculations are reported, submitted, and
saved. If None, default settings are used for all options. Currently
supported options (each should be either `True` or `False`):
- prompt_verify : (default False) If True, print summary of
calculations to be performed and prompt user to confirm before
submitting for execution.
- parallelize : (default False) If True, submit calculations in
parallel.
- client : distributed.Client or None (default None) The
dask.distributed Client used to schedule computations. If None
and parallelize is True, a LocalCluster will be started.
- write_to_tar : (default True) If True, write results of calculations
to .tar files, one for each :py:class:`aospy.Run` object.
These tar files have an identical directory structures the
standard output relative to their root directory, which is
specified via the `tar_direc_out` argument of each Proj
object's instantiation.
Returns
-------
A list of the return values from each :py:meth:`aospy.Calc.compute` call
If a calculation ran without error, this value is the
:py:class:`aospy.Calc` object itself, with the results of its
calculations saved in its ``data_out`` attribute. ``data_out`` is a
dictionary, with the keys being the temporal-regional reduction
identifiers (e.g. 'reg.av'), and the values being the corresponding
result.
If any error occurred during a calculation, the return value is None.
Raises
------
AospyException
If the ``prompt_verify`` option is set to True and the user does not
respond affirmatively to the prompt. | ### Input:
Generate and execute all specified computations.
Once the calculations are prepped and submitted for execution, any
calculation that triggers any exception or error is skipped, and the rest
of the calculations proceed unaffected. This prevents an error in a single
calculation from crashing a large suite of calculations.
Parameters
----------
calc_suite_specs : dict
The specifications describing the full set of calculations to be
generated and potentially executed. Accepted keys and their values:
library : module or package comprising an aospy object library
The aospy object library for these calculations.
projects : list of aospy.Proj objects
The projects to permute over.
models : 'all', 'default', or list of aospy.Model objects
The models to permute over. If 'all', use all models in the
``models`` attribute of each ``Proj``. If 'default', use all
models in the ``default_models`` attribute of each ``Proj``.
runs : 'all', 'default', or list of aospy.Run objects
The runs to permute over. If 'all', use all runs in the
``runs`` attribute of each ``Model``. If 'default', use all
runs in the ``default_runs`` attribute of each ``Model``.
variables : list of aospy.Var objects
The variables to be calculated.
regions : 'all' or list of aospy.Region objects
The region(s) over which any regional reductions will be performed.
If 'all', use all regions in the ``regions`` attribute of each
``Proj``.
date_ranges : 'default' or a list of tuples
The range of dates (inclusive) over which to perform calculations.
If 'default', use the ``default_start_date`` and
``default_end_date`` attribute of each ``Run``. Else provide a
list of tuples, each containing a pair of start and end dates,
such as ``date_ranges=[(start, end)]`` where ``start`` and
``end`` are each ``datetime.datetime`` objects, partial
datetime strings (e.g. '0001'), ``np.datetime64`` objects, or
``cftime.datetime`` objects.
output_time_intervals : {'ann', season-string, month-integer}
The sub-annual time interval over which to aggregate.
- 'ann' : Annual mean
- season-string : E.g. 'JJA' for June-July-August
- month-integer : 1 for January, 2 for February, etc. Each one is
a separate reduction, e.g. [1, 2] would produce averages (or
other specified time reduction) over all Januaries, and
separately over all Februaries.
output_time_regional_reductions : list of reduction string identifiers
Unlike most other keys, these are not permuted over when creating
the :py:class:`aospy.Calc` objects that execute the calculations;
each :py:class:`aospy.Calc` performs all of the specified
reductions. Accepted string identifiers are:
- Gridpoint-by-gridpoint output:
- 'av' : Gridpoint-by-gridpoint time-average
- 'std' : Gridpoint-by-gridpoint temporal standard deviation
- 'ts' : Gridpoint-by-gridpoint time-series
- Averages over each region specified via `region`:
- 'reg.av', 'reg.std', 'reg.ts' : analogous to 'av', 'std', 'ts'
output_vertical_reductions : {None, 'vert_av', 'vert_int'}, optional
How to reduce the data vertically:
- None : no vertical reduction
- 'vert_av' : mass-weighted vertical average
- 'vert_int' : mass-weighted vertical integral
input_time_intervals : {'annual', 'monthly', 'daily', '#hr'}
A string specifying the time resolution of the input data. In
'#hr' above, the '#' stands for a number, e.g. 3hr or 6hr, for
sub-daily output. These are the suggested specifiers, but others
may be used if they are also used by the DataLoaders for the given
Runs.
input_time_datatypes : {'inst', 'ts', 'av'}
What the time axis of the input data represents:
- 'inst' : Timeseries of instantaneous values
- 'ts' : Timeseries of averages over the period of each time-index
- 'av' : A single value averaged over a date range
input_vertical_datatypes : {False, 'pressure', 'sigma'}, optional
The vertical coordinate system used by the input data:
- False : not defined vertically
- 'pressure' : pressure coordinates
- 'sigma' : hybrid sigma-pressure coordinates
input_time_offsets : {None, dict}, optional
How to offset input data in time to correct for metadata errors
- None : no time offset applied
- dict : e.g. ``{'hours': -3}`` to offset times by -3 hours
See :py:meth:`aospy.utils.times.apply_time_offset`.
exec_options : dict or None (default None)
Options regarding how the calculations are reported, submitted, and
saved. If None, default settings are used for all options. Currently
supported options (each should be either `True` or `False`):
- prompt_verify : (default False) If True, print summary of
calculations to be performed and prompt user to confirm before
submitting for execution.
- parallelize : (default False) If True, submit calculations in
parallel.
- client : distributed.Client or None (default None) The
dask.distributed Client used to schedule computations. If None
and parallelize is True, a LocalCluster will be started.
- write_to_tar : (default True) If True, write results of calculations
to .tar files, one for each :py:class:`aospy.Run` object.
These tar files have an identical directory structures the
standard output relative to their root directory, which is
specified via the `tar_direc_out` argument of each Proj
object's instantiation.
Returns
-------
A list of the return values from each :py:meth:`aospy.Calc.compute` call
If a calculation ran without error, this value is the
:py:class:`aospy.Calc` object itself, with the results of its
calculations saved in its ``data_out`` attribute. ``data_out`` is a
dictionary, with the keys being the temporal-regional reduction
identifiers (e.g. 'reg.av'), and the values being the corresponding
result.
If any error occurred during a calculation, the return value is None.
Raises
------
AospyException
If the ``prompt_verify`` option is set to True and the user does not
respond affirmatively to the prompt.
### Response:
def submit_mult_calcs(calc_suite_specs, exec_options=None):
if exec_options is None:
exec_options = dict()
if exec_options.pop(, False):
print(_print_suite_summary(calc_suite_specs))
_user_verify()
calc_suite = CalcSuite(calc_suite_specs)
calcs = calc_suite.create_calcs()
if not calcs:
raise AospyException(
"The specified combination of parameters yielded zero "
"calculations. Most likely, one of the parameters is "
"inadvertently empty."
)
return _exec_calcs(calcs, **exec_options) |
def Concat(self: Iterable, *others):
return concat_generator(self, *[unbox_if_flow(other) for other in others]) | [
{
'self': [1, 2, 3],
':args': [[4, 5, 6], [7, 8, 9]],
'assert': lambda ret: list(ret) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
}
] | ### Input:
[
{
'self': [1, 2, 3],
':args': [[4, 5, 6], [7, 8, 9]],
'assert': lambda ret: list(ret) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
}
]
### Response:
def Concat(self: Iterable, *others):
return concat_generator(self, *[unbox_if_flow(other) for other in others]) |
def delete(self, obj, id):
self.url = .format(self.base_url, obj, id)
self.method =
self.resp = requests.delete(url=self.url,
auth=self.auth,
headers=self.headers, cert=self.ca_cert)
return self.__process_resp__(obj) | Function delete
Delete an object by id
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@return RETURN: the server response | ### Input:
Function delete
Delete an object by id
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@return RETURN: the server response
### Response:
def delete(self, obj, id):
self.url = .format(self.base_url, obj, id)
self.method =
self.resp = requests.delete(url=self.url,
auth=self.auth,
headers=self.headers, cert=self.ca_cert)
return self.__process_resp__(obj) |
def translate_codons(sequence):
sequence
return .join([gencode.get(sequence[3*i:3*i+3],) for i in range(len(sequence)//3)]) | Return the translated protein from 'sequence' assuming +1 reading frame
Source - http://adamcoster.com/2011/01/13/python-clean-up-and-translate-nucleotide-sequences/ | ### Input:
Return the translated protein from 'sequence' assuming +1 reading frame
Source - http://adamcoster.com/2011/01/13/python-clean-up-and-translate-nucleotide-sequences/
### Response:
def translate_codons(sequence):
sequence
return .join([gencode.get(sequence[3*i:3*i+3],) for i in range(len(sequence)//3)]) |
def matrix(self) -> np.ndarray:
num_qubits = self.num_qubits()
if num_qubits is None:
raise ValueError()
num_dim = 2 ** num_qubits
result = np.zeros((num_dim, num_dim), dtype=np.complex128)
for gate, coefficient in self.items():
result += protocols.unitary(gate) * coefficient
return result | Reconstructs matrix of self using unitaries of underlying gates.
Raises:
TypeError: if any of the gates in self does not provide a unitary. | ### Input:
Reconstructs matrix of self using unitaries of underlying gates.
Raises:
TypeError: if any of the gates in self does not provide a unitary.
### Response:
def matrix(self) -> np.ndarray:
num_qubits = self.num_qubits()
if num_qubits is None:
raise ValueError()
num_dim = 2 ** num_qubits
result = np.zeros((num_dim, num_dim), dtype=np.complex128)
for gate, coefficient in self.items():
result += protocols.unitary(gate) * coefficient
return result |
def set_difficulty(self, difficulty):
if not is_string(difficulty):
raise InvalidArgument()
if difficulty.lower() not in [, , ]:
raise InvalidArgument()
self.my_osid_object_form._my_map[][][] = difficulty | stub | ### Input:
stub
### Response:
def set_difficulty(self, difficulty):
if not is_string(difficulty):
raise InvalidArgument()
if difficulty.lower() not in [, , ]:
raise InvalidArgument()
self.my_osid_object_form._my_map[][][] = difficulty |
def convert_table(shell_output, delimiter=, output=):
import re
gap_pattern = re.compile(delimiter)
output_lines = shell_output.splitlines()
column_headers = gap_pattern.split(output_lines[0])
blank_index = column_headers.index()
if blank_index > -1:
column_headers.pop(blank_index)
indices = []
for i in range(len(column_headers)):
if i + 1 < len(column_headers):
indices.append((
output_lines[0].find(column_headers[i]),
output_lines[0].find(column_headers[i + 1])
))
else:
indices.append((
output_lines[0].find(column_headers[i]),
-1
))
python_list = []
csv_string =
if output == :
pass
elif output == :
python_list.append(column_headers)
elif output == :
for i in range(len(column_headers)):
if i:
csv_string +=
csv_string += column_headers[i]
else:
raise ValueError()
for i in range(1, len(output_lines)):
if output == :
row_details = {}
for j in range(len(column_headers)):
row_details[column_headers[j]] = output_lines[i][indices[j][0]:indices[j][1]].rstrip()
python_list.append(row_details)
elif output == :
row_list = []
for j in range(len(column_headers)):
row_list.append(output_lines[i][indices[j][0]:indices[j][1]]).rstrip()
python_list.append(row_list)
elif output == :
csv_string +=
for j in range(len(column_headers)):
if j:
csv_string +=
csv_string += output_lines[i][indices[j][0]:indices[j][1]].rstrip()
if csv_string:
return csv_string
return python_list | a method to convert a STDOUT shell table into a python data structure
:param shell_output: string from STDOUT with headers
:param delimiter: string with regex pattern delimiting headers
:param output: string with type of structure to output (dict, list or csv)
:return: list of dictionaries or list of lists or string with csv format | ### Input:
a method to convert a STDOUT shell table into a python data structure
:param shell_output: string from STDOUT with headers
:param delimiter: string with regex pattern delimiting headers
:param output: string with type of structure to output (dict, list or csv)
:return: list of dictionaries or list of lists or string with csv format
### Response:
def convert_table(shell_output, delimiter=, output=):
import re
gap_pattern = re.compile(delimiter)
output_lines = shell_output.splitlines()
column_headers = gap_pattern.split(output_lines[0])
blank_index = column_headers.index()
if blank_index > -1:
column_headers.pop(blank_index)
indices = []
for i in range(len(column_headers)):
if i + 1 < len(column_headers):
indices.append((
output_lines[0].find(column_headers[i]),
output_lines[0].find(column_headers[i + 1])
))
else:
indices.append((
output_lines[0].find(column_headers[i]),
-1
))
python_list = []
csv_string =
if output == :
pass
elif output == :
python_list.append(column_headers)
elif output == :
for i in range(len(column_headers)):
if i:
csv_string +=
csv_string += column_headers[i]
else:
raise ValueError()
for i in range(1, len(output_lines)):
if output == :
row_details = {}
for j in range(len(column_headers)):
row_details[column_headers[j]] = output_lines[i][indices[j][0]:indices[j][1]].rstrip()
python_list.append(row_details)
elif output == :
row_list = []
for j in range(len(column_headers)):
row_list.append(output_lines[i][indices[j][0]:indices[j][1]]).rstrip()
python_list.append(row_list)
elif output == :
csv_string +=
for j in range(len(column_headers)):
if j:
csv_string +=
csv_string += output_lines[i][indices[j][0]:indices[j][1]].rstrip()
if csv_string:
return csv_string
return python_list |
def execute(self, operation, params=()):
conn = self._assert_open()
conn._try_activate_cursor(self)
self._execute(operation, params)
return self | Execute the query
:param operation: SQL statement
:type operation: str | ### Input:
Execute the query
:param operation: SQL statement
:type operation: str
### Response:
def execute(self, operation, params=()):
conn = self._assert_open()
conn._try_activate_cursor(self)
self._execute(operation, params)
return self |
def build_and_run(self, images):
from harpoon.ship.builder import Builder
Builder().make_image(self, images)
try:
Runner().run_container(self, images)
except DockerAPIError as error:
raise BadImage("Failed to start the container", error=error) | Make this image and run it | ### Input:
Make this image and run it
### Response:
def build_and_run(self, images):
from harpoon.ship.builder import Builder
Builder().make_image(self, images)
try:
Runner().run_container(self, images)
except DockerAPIError as error:
raise BadImage("Failed to start the container", error=error) |
def bind(mod_path, with_path=None):
if with_path:
if os.path.isdir(with_path):
sys.path.insert(0, with_path)
else:
sys.path.insert(0, with_path.rsplit(, 2)[0])
pass
mod = importlib.import_module(mod_path)
settings = Settings()
for v in dir(mod):
if v[0] == or type(getattr(mod, v)).__name__ == :
continue
setattr(settings, v, getattr(mod, v))
pass
Settings._path = mod_path
Settings._wrapped = settings
return settings | bind user variable to `_wrapped`
.. note::
you don't need call this method by yourself.
program will call it in `cliez.parser.parse`
.. expection::
if path is not correct,will cause an `ImportError`
:param str mod_path: module path, *use dot style,'mod.mod1'*
:param str with_path: add path to `sys.path`,
if path is file,use its parent.
:return: A instance of `Settings` | ### Input:
bind user variable to `_wrapped`
.. note::
you don't need call this method by yourself.
program will call it in `cliez.parser.parse`
.. expection::
if path is not correct,will cause an `ImportError`
:param str mod_path: module path, *use dot style,'mod.mod1'*
:param str with_path: add path to `sys.path`,
if path is file,use its parent.
:return: A instance of `Settings`
### Response:
def bind(mod_path, with_path=None):
if with_path:
if os.path.isdir(with_path):
sys.path.insert(0, with_path)
else:
sys.path.insert(0, with_path.rsplit(, 2)[0])
pass
mod = importlib.import_module(mod_path)
settings = Settings()
for v in dir(mod):
if v[0] == or type(getattr(mod, v)).__name__ == :
continue
setattr(settings, v, getattr(mod, v))
pass
Settings._path = mod_path
Settings._wrapped = settings
return settings |
def _spec_fft(self, complex_data):
return np.fft.fftshift( np.fft.fft(complex_data), 1) | Calculates the DFT of the complex_data along axis = 1. This assumes complex_data is a 2D array.
This uses numpy and the code is straight forward
np.fft.fftshift( np.fft.fft(complex_data), 1)
Note that we automatically shift the FFT frequency bins so that along the frequency axis,
"negative" frequencies are first, then the central frequency, followed by "positive" frequencies. | ### Input:
Calculates the DFT of the complex_data along axis = 1. This assumes complex_data is a 2D array.
This uses numpy and the code is straight forward
np.fft.fftshift( np.fft.fft(complex_data), 1)
Note that we automatically shift the FFT frequency bins so that along the frequency axis,
"negative" frequencies are first, then the central frequency, followed by "positive" frequencies.
### Response:
def _spec_fft(self, complex_data):
return np.fft.fftshift( np.fft.fft(complex_data), 1) |
def unflatten(flat_weights):
if flat_weights.columns.contains("key"):
weights = {}
for key in flat_weights.loc[:, "key"].unique():
flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :]
flt_wts = flt_wts.drop(labels="key", axis=1)
wts = flt_wts.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
wts.columns = wts.columns.droplevel(0)
weights[key] = wts
else:
weights = flat_weights.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
weights.columns = weights.columns.droplevel(0)
return weights | Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets() | ### Input:
Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets()
### Response:
def unflatten(flat_weights):
if flat_weights.columns.contains("key"):
weights = {}
for key in flat_weights.loc[:, "key"].unique():
flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :]
flt_wts = flt_wts.drop(labels="key", axis=1)
wts = flt_wts.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
wts.columns = wts.columns.droplevel(0)
weights[key] = wts
else:
weights = flat_weights.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
weights.columns = weights.columns.droplevel(0)
return weights |
def pauling_stability_ratio(self):
if self._pauling_stability_ratio is None:
if self.ce_symbol in [, ]:
self._pauling_stability_ratio = 0.0
else:
mindist_anions = 1000000.0
mindist_cation_anion = 1000000.0
for ipt1 in range(len(self.points)):
pt1 = np.array(self.points[ipt1])
mindist_cation_anion = min(mindist_cation_anion,
np.linalg.norm(pt1-self.central_site))
for ipt2 in range(ipt1+1, len(self.points)):
pt2 = np.array(self.points[ipt2])
mindist_anions = min(mindist_anions,
np.linalg.norm(pt1-pt2))
anion_radius = mindist_anions / 2.0
cation_radius = mindist_cation_anion - anion_radius
self._pauling_stability_ratio = cation_radius / anion_radius
return self._pauling_stability_ratio | Returns the theoretical Pauling stability ratio (rC/rA) for this environment. | ### Input:
Returns the theoretical Pauling stability ratio (rC/rA) for this environment.
### Response:
def pauling_stability_ratio(self):
if self._pauling_stability_ratio is None:
if self.ce_symbol in [, ]:
self._pauling_stability_ratio = 0.0
else:
mindist_anions = 1000000.0
mindist_cation_anion = 1000000.0
for ipt1 in range(len(self.points)):
pt1 = np.array(self.points[ipt1])
mindist_cation_anion = min(mindist_cation_anion,
np.linalg.norm(pt1-self.central_site))
for ipt2 in range(ipt1+1, len(self.points)):
pt2 = np.array(self.points[ipt2])
mindist_anions = min(mindist_anions,
np.linalg.norm(pt1-pt2))
anion_radius = mindist_anions / 2.0
cation_radius = mindist_cation_anion - anion_radius
self._pauling_stability_ratio = cation_radius / anion_radius
return self._pauling_stability_ratio |
def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.count is not None:
_dict[] = self.count
if hasattr(self, ) and self.relevance is not None:
_dict[] = self.relevance
if hasattr(self, ) and self.text is not None:
_dict[] = self.text
if hasattr(self, ) and self.emotion is not None:
_dict[] = self.emotion._to_dict()
if hasattr(self, ) and self.sentiment is not None:
_dict[] = self.sentiment._to_dict()
return _dict | Return a json dictionary representing this model. | ### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.count is not None:
_dict[] = self.count
if hasattr(self, ) and self.relevance is not None:
_dict[] = self.relevance
if hasattr(self, ) and self.text is not None:
_dict[] = self.text
if hasattr(self, ) and self.emotion is not None:
_dict[] = self.emotion._to_dict()
if hasattr(self, ) and self.sentiment is not None:
_dict[] = self.sentiment._to_dict()
return _dict |
def filter_spent_outputs(self, outputs):
links = [o.to_dict() for o in outputs]
txs = list(query.get_spending_transactions(self.connection, links))
spends = {TransactionLink.from_dict(input_[])
for tx in txs
for input_ in tx[]}
return [ff for ff in outputs if ff not in spends] | Remove outputs that have been spent
Args:
outputs: list of TransactionLink | ### Input:
Remove outputs that have been spent
Args:
outputs: list of TransactionLink
### Response:
def filter_spent_outputs(self, outputs):
links = [o.to_dict() for o in outputs]
txs = list(query.get_spending_transactions(self.connection, links))
spends = {TransactionLink.from_dict(input_[])
for tx in txs
for input_ in tx[]}
return [ff for ff in outputs if ff not in spends] |
def _EntriesGenerator(self):
try:
fsapfs_file_entry = self._file_system.GetAPFSFileEntryByPathSpec(
self.path_spec)
except errors.PathSpecError:
return
location = getattr(self.path_spec, , None)
for fsapfs_sub_file_entry in fsapfs_file_entry.sub_file_entries:
directory_entry = fsapfs_sub_file_entry.name
if location == self._file_system.PATH_SEPARATOR:
directory_entry = self._file_system.JoinPath([directory_entry])
else:
directory_entry = self._file_system.JoinPath([
location, directory_entry])
yield apfs_path_spec.APFSPathSpec(
identifier=fsapfs_sub_file_entry.identifier, location=directory_entry,
parent=self.path_spec.parent) | Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
APFSPathSpec: APFS path specification. | ### Input:
Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
APFSPathSpec: APFS path specification.
### Response:
def _EntriesGenerator(self):
try:
fsapfs_file_entry = self._file_system.GetAPFSFileEntryByPathSpec(
self.path_spec)
except errors.PathSpecError:
return
location = getattr(self.path_spec, , None)
for fsapfs_sub_file_entry in fsapfs_file_entry.sub_file_entries:
directory_entry = fsapfs_sub_file_entry.name
if location == self._file_system.PATH_SEPARATOR:
directory_entry = self._file_system.JoinPath([directory_entry])
else:
directory_entry = self._file_system.JoinPath([
location, directory_entry])
yield apfs_path_spec.APFSPathSpec(
identifier=fsapfs_sub_file_entry.identifier, location=directory_entry,
parent=self.path_spec.parent) |
def dipole_k(src, rec, depth, res, freq, wavenumber, ab=11, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None, verb=2):
r
off, angle = get_off_ang(src, rec, nsrc, nrec, verb)
factAng = kernel.angle_factor(angle, ab, msrc, mrec)
lsrc, zsrc = get_layer_nr(src, depth)
lrec, zrec = get_layer_nr(rec, depth)
if off.size == 1 and np.ndim(wavenumber) == 2:
PJ0 = np.zeros((freq.size, wavenumber.shape[0], wavenumber.shape[1]),
dtype=complex)
PJ1 = np.zeros((freq.size, wavenumber.shape[0], wavenumber.shape[1]),
dtype=complex)
else:
PJ0 = np.zeros((freq.size, off.size, wavenumber.size), dtype=complex)
PJ1 = np.zeros((freq.size, off.size, wavenumber.size), dtype=complex)
if ab_calc not in [36, ]:
J0, J1, J0b = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH,
etaV, zetaH, zetaV,
np.atleast_2d(wavenumber), ab_calc,
False, msrc, mrec, False)
if J1 is not None:
PJ1 += factAng[:, np.newaxis]*J1
if ab in [11, 12, 21, 22, 14, 24, 15, 25]:
PJ1 /= off[:, None]
if J0 is not None:
PJ0 += J0
if J0b is not None:
PJ0 += factAng[:, np.newaxis]*J0b
printstartfinish(verb, t0, 1)
return np.squeeze(PJ0), np.squeeze(PJ1) | r"""Return the electromagnetic wavenumber-domain field.
Calculate the electromagnetic wavenumber-domain field due to infinitesimal
small electric or magnetic dipole source(s), measured by infinitesimal
small electric or magnetic dipole receiver(s); sources and receivers are
directed along the principal directions x, y, or z, and all sources are at
the same depth, as well as all receivers are at the same depth.
See Also
--------
dipole : Electromagnetic field due to an electromagnetic source (dipoles).
bipole : Electromagnetic field due to an electromagnetic source (bipoles).
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m): [x, y, z].
The x- and y-coordinates can be arrays, z is a single value.
The x- and y-coordinates must have the same dimension.
The x- and y-coordinates only matter for the angle-dependent factor.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
freq : array_like
Frequencies f (Hz), used to calculate etaH/V and zetaH/V.
wavenumber : array
Wavenumbers lambda (1/m)
ab : int, optional
Source-receiver configuration, defaults to 11.
+---------------+-------+------+------+------+------+------+------+
| | electric source | magnetic source |
+===============+=======+======+======+======+======+======+======+
| | **x**| **y**| **z**| **x**| **y**| **z**|
+---------------+-------+------+------+------+------+------+------+
| | **x** | 11 | 12 | 13 | 14 | 15 | 16 |
+ **electric** +-------+------+------+------+------+------+------+
| | **y** | 21 | 22 | 23 | 24 | 25 | 26 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 31 | 32 | 33 | 34 | 35 | 36 |
+---------------+-------+------+------+------+------+------+------+
| | **x** | 41 | 42 | 43 | 44 | 45 | 46 |
+ **magnetic** +-------+------+------+------+------+------+------+
| | **y** | 51 | 52 | 53 | 54 | 55 | 56 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 61 | 62 | 63 | 64 | 65 | 66 |
+---------------+-------+------+------+------+------+------+------+
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
PJ0, PJ1 : array
Wavenumber-domain EM responses:
- PJ0: Wavenumber-domain solution for the kernel with a Bessel
function of the first kind of order zero.
- PJ1: Wavenumber-domain solution for the kernel with a Bessel
function of the first kind of order one.
Examples
--------
>>> import numpy as np
>>> from empymod.model import dipole_k
>>> src = [0, 0, 100]
>>> rec = [5000, 0, 200]
>>> depth = [0, 300, 1000, 1050]
>>> res = [1e20, .3, 1, 50, 1]
>>> freq = 1
>>> wavenrs = np.logspace(-3.7, -3.6, 10)
>>> PJ0, PJ1 = dipole_k(src, rec, depth, res, freq, wavenrs, verb=0)
>>> print(PJ0)
[ -1.02638329e-08 +4.91531529e-09j -1.05289724e-08 +5.04222413e-09j
-1.08009148e-08 +5.17238608e-09j -1.10798310e-08 +5.30588284e-09j
-1.13658957e-08 +5.44279805e-09j -1.16592877e-08 +5.58321732e-09j
-1.19601897e-08 +5.72722830e-09j -1.22687889e-08 +5.87492067e-09j
-1.25852765e-08 +6.02638626e-09j -1.29098481e-08 +6.18171904e-09j]
>>> print(PJ1)
[ 1.79483705e-10 -6.59235332e-10j 1.88672497e-10 -6.93749344e-10j
1.98325814e-10 -7.30068377e-10j 2.08466693e-10 -7.68286748e-10j
2.19119282e-10 -8.08503709e-10j 2.30308887e-10 -8.50823701e-10j
2.42062030e-10 -8.95356636e-10j 2.54406501e-10 -9.42218177e-10j
2.67371420e-10 -9.91530051e-10j 2.80987292e-10 -1.04342036e-09j] | ### Input:
r"""Return the electromagnetic wavenumber-domain field.
Calculate the electromagnetic wavenumber-domain field due to infinitesimal
small electric or magnetic dipole source(s), measured by infinitesimal
small electric or magnetic dipole receiver(s); sources and receivers are
directed along the principal directions x, y, or z, and all sources are at
the same depth, as well as all receivers are at the same depth.
See Also
--------
dipole : Electromagnetic field due to an electromagnetic source (dipoles).
bipole : Electromagnetic field due to an electromagnetic source (bipoles).
fem : Electromagnetic frequency-domain response.
tem : Electromagnetic time-domain response.
Parameters
----------
src, rec : list of floats or arrays
Source and receiver coordinates (m): [x, y, z].
The x- and y-coordinates can be arrays, z is a single value.
The x- and y-coordinates must have the same dimension.
The x- and y-coordinates only matter for the angle-dependent factor.
Sources or receivers placed on a layer interface are considered in the
upper layer.
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
freq : array_like
Frequencies f (Hz), used to calculate etaH/V and zetaH/V.
wavenumber : array
Wavenumbers lambda (1/m)
ab : int, optional
Source-receiver configuration, defaults to 11.
+---------------+-------+------+------+------+------+------+------+
| | electric source | magnetic source |
+===============+=======+======+======+======+======+======+======+
| | **x**| **y**| **z**| **x**| **y**| **z**|
+---------------+-------+------+------+------+------+------+------+
| | **x** | 11 | 12 | 13 | 14 | 15 | 16 |
+ **electric** +-------+------+------+------+------+------+------+
| | **y** | 21 | 22 | 23 | 24 | 25 | 26 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 31 | 32 | 33 | 34 | 35 | 36 |
+---------------+-------+------+------+------+------+------+------+
| | **x** | 41 | 42 | 43 | 44 | 45 | 46 |
+ **magnetic** +-------+------+------+------+------+------+------+
| | **y** | 51 | 52 | 53 | 54 | 55 | 56 |
+ **receiver** +-------+------+------+------+------+------+------+
| | **z** | 61 | 62 | 63 | 64 | 65 | 66 |
+---------------+-------+------+------+------+------+------+------+
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
epermH, epermV : array_like, optional
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res. Default is ones.
mpermH, mpermV : array_like, optional
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res. Default is ones.
verb : {0, 1, 2, 3, 4}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional runtime and kernel calls
- 3: Print additional start/stop, condensed parameter information.
- 4: Print additional full parameter information
Returns
-------
PJ0, PJ1 : array
Wavenumber-domain EM responses:
- PJ0: Wavenumber-domain solution for the kernel with a Bessel
function of the first kind of order zero.
- PJ1: Wavenumber-domain solution for the kernel with a Bessel
function of the first kind of order one.
Examples
--------
>>> import numpy as np
>>> from empymod.model import dipole_k
>>> src = [0, 0, 100]
>>> rec = [5000, 0, 200]
>>> depth = [0, 300, 1000, 1050]
>>> res = [1e20, .3, 1, 50, 1]
>>> freq = 1
>>> wavenrs = np.logspace(-3.7, -3.6, 10)
>>> PJ0, PJ1 = dipole_k(src, rec, depth, res, freq, wavenrs, verb=0)
>>> print(PJ0)
[ -1.02638329e-08 +4.91531529e-09j -1.05289724e-08 +5.04222413e-09j
-1.08009148e-08 +5.17238608e-09j -1.10798310e-08 +5.30588284e-09j
-1.13658957e-08 +5.44279805e-09j -1.16592877e-08 +5.58321732e-09j
-1.19601897e-08 +5.72722830e-09j -1.22687889e-08 +5.87492067e-09j
-1.25852765e-08 +6.02638626e-09j -1.29098481e-08 +6.18171904e-09j]
>>> print(PJ1)
[ 1.79483705e-10 -6.59235332e-10j 1.88672497e-10 -6.93749344e-10j
1.98325814e-10 -7.30068377e-10j 2.08466693e-10 -7.68286748e-10j
2.19119282e-10 -8.08503709e-10j 2.30308887e-10 -8.50823701e-10j
2.42062030e-10 -8.95356636e-10j 2.54406501e-10 -9.42218177e-10j
2.67371420e-10 -9.91530051e-10j 2.80987292e-10 -1.04342036e-09j]
### Response:
def dipole_k(src, rec, depth, res, freq, wavenumber, ab=11, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None, verb=2):
r
off, angle = get_off_ang(src, rec, nsrc, nrec, verb)
factAng = kernel.angle_factor(angle, ab, msrc, mrec)
lsrc, zsrc = get_layer_nr(src, depth)
lrec, zrec = get_layer_nr(rec, depth)
if off.size == 1 and np.ndim(wavenumber) == 2:
PJ0 = np.zeros((freq.size, wavenumber.shape[0], wavenumber.shape[1]),
dtype=complex)
PJ1 = np.zeros((freq.size, wavenumber.shape[0], wavenumber.shape[1]),
dtype=complex)
else:
PJ0 = np.zeros((freq.size, off.size, wavenumber.size), dtype=complex)
PJ1 = np.zeros((freq.size, off.size, wavenumber.size), dtype=complex)
if ab_calc not in [36, ]:
J0, J1, J0b = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH,
etaV, zetaH, zetaV,
np.atleast_2d(wavenumber), ab_calc,
False, msrc, mrec, False)
if J1 is not None:
PJ1 += factAng[:, np.newaxis]*J1
if ab in [11, 12, 21, 22, 14, 24, 15, 25]:
PJ1 /= off[:, None]
if J0 is not None:
PJ0 += J0
if J0b is not None:
PJ0 += factAng[:, np.newaxis]*J0b
printstartfinish(verb, t0, 1)
return np.squeeze(PJ0), np.squeeze(PJ1) |
def fromBox(self, name, strings, objects, proto):
objects[name] = getattr(proto, self.attr) | Retreive an attribute from the C{proto} parameter. | ### Input:
Retreive an attribute from the C{proto} parameter.
### Response:
def fromBox(self, name, strings, objects, proto):
objects[name] = getattr(proto, self.attr) |
def _inner_read_config(path):
global _project_dir
full_path = os.path.join(_project_dir, path)
return read_fbcode_builder_config(full_path) | Helper to read a named config file.
The grossness with the global is a workaround for this python bug:
https://bugs.python.org/issue21591
The bug prevents us from defining either a local function or a lambda
in the scope of read_fbcode_builder_config below. | ### Input:
Helper to read a named config file.
The grossness with the global is a workaround for this python bug:
https://bugs.python.org/issue21591
The bug prevents us from defining either a local function or a lambda
in the scope of read_fbcode_builder_config below.
### Response:
def _inner_read_config(path):
global _project_dir
full_path = os.path.join(_project_dir, path)
return read_fbcode_builder_config(full_path) |
def _make_tooth_template(self):
period_arc = (2 * pi) / self.tooth_count
tooth_arc = period_arc * self.spacing_ratio
outer_radius = self.effective_radius + (self.tooth_height / 2)
face_angle_rad = radians(self.face_angle)
side_angle = face_angle_rad - (tooth_arc / 2)
side_tangent_radius = sin(face_angle_rad) * self.effective_radius
extra_side_angle = side_angle + acos(side_tangent_radius / outer_radius)
tooth = cadquery.Workplane(, origin=(0, 0, -self.width / 2)) \
.moveTo(
side_tangent_radius * cos(side_angle),
side_tangent_radius * sin(side_angle)
)
opposite_point = (
-side_tangent_radius * cos(side_angle),
side_tangent_radius * sin(side_angle)
)
if self.face_angle:
tooth = tooth.lineTo(*opposite_point)
tooth = tooth.lineTo(
-cos(extra_side_angle) * outer_radius,
sin(extra_side_angle) * outer_radius
)
opposite_point = (
cos(extra_side_angle) * outer_radius,
sin(extra_side_angle) * outer_radius
)
if self.flat_top:
tooth = tooth.lineTo(*opposite_point)
else:
tooth = tooth.threePointArc((0, outer_radius), opposite_point)
tooth = tooth.close().extrude(self.width)
return tooth | Builds a single tooth including the cylinder with tooth faces
tangential to its circumference. | ### Input:
Builds a single tooth including the cylinder with tooth faces
tangential to its circumference.
### Response:
def _make_tooth_template(self):
period_arc = (2 * pi) / self.tooth_count
tooth_arc = period_arc * self.spacing_ratio
outer_radius = self.effective_radius + (self.tooth_height / 2)
face_angle_rad = radians(self.face_angle)
side_angle = face_angle_rad - (tooth_arc / 2)
side_tangent_radius = sin(face_angle_rad) * self.effective_radius
extra_side_angle = side_angle + acos(side_tangent_radius / outer_radius)
tooth = cadquery.Workplane(, origin=(0, 0, -self.width / 2)) \
.moveTo(
side_tangent_radius * cos(side_angle),
side_tangent_radius * sin(side_angle)
)
opposite_point = (
-side_tangent_radius * cos(side_angle),
side_tangent_radius * sin(side_angle)
)
if self.face_angle:
tooth = tooth.lineTo(*opposite_point)
tooth = tooth.lineTo(
-cos(extra_side_angle) * outer_radius,
sin(extra_side_angle) * outer_radius
)
opposite_point = (
cos(extra_side_angle) * outer_radius,
sin(extra_side_angle) * outer_radius
)
if self.flat_top:
tooth = tooth.lineTo(*opposite_point)
else:
tooth = tooth.threePointArc((0, outer_radius), opposite_point)
tooth = tooth.close().extrude(self.width)
return tooth |
def make_script_sig(stack_script, redeem_script):
stack_script += .format(
serialization.hex_serialize(redeem_script))
return serialization.serialize(stack_script) | str, str -> bytearray | ### Input:
str, str -> bytearray
### Response:
def make_script_sig(stack_script, redeem_script):
stack_script += .format(
serialization.hex_serialize(redeem_script))
return serialization.serialize(stack_script) |
def plot_slippage_sweep(returns, positions, transactions,
slippage_params=(3, 8, 10, 12, 15, 20, 50),
ax=None, **kwargs):
if ax is None:
ax = plt.gca()
slippage_sweep = pd.DataFrame()
for bps in slippage_params:
adj_returns = txn.adjust_returns_for_slippage(returns, positions,
transactions, bps)
label = str(bps) + " bps"
slippage_sweep[label] = ep.cum_returns(adj_returns, 1)
slippage_sweep.plot(alpha=1.0, lw=0.5, ax=ax)
ax.set_title()
ax.set_ylabel()
ax.legend(loc=, frameon=True, framealpha=0.5)
return ax | Plots equity curves at different per-dollar slippage assumptions.
Parameters
----------
returns : pd.Series
Timeseries of portfolio returns to be adjusted for various
degrees of slippage.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
slippage_params: tuple
Slippage pameters to apply to the return time series (in
basis points).
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on. | ### Input:
Plots equity curves at different per-dollar slippage assumptions.
Parameters
----------
returns : pd.Series
Timeseries of portfolio returns to be adjusted for various
degrees of slippage.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
slippage_params: tuple
Slippage pameters to apply to the return time series (in
basis points).
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
### Response:
def plot_slippage_sweep(returns, positions, transactions,
slippage_params=(3, 8, 10, 12, 15, 20, 50),
ax=None, **kwargs):
if ax is None:
ax = plt.gca()
slippage_sweep = pd.DataFrame()
for bps in slippage_params:
adj_returns = txn.adjust_returns_for_slippage(returns, positions,
transactions, bps)
label = str(bps) + " bps"
slippage_sweep[label] = ep.cum_returns(adj_returns, 1)
slippage_sweep.plot(alpha=1.0, lw=0.5, ax=ax)
ax.set_title()
ax.set_ylabel()
ax.legend(loc=, frameon=True, framealpha=0.5)
return ax |
def get_review_history_statuses(instance, reverse=False):
review_history = getReviewHistory(instance, reverse=reverse)
return map(lambda event: event["review_state"], review_history) | Returns a list with the statuses of the instance from the review_history | ### Input:
Returns a list with the statuses of the instance from the review_history
### Response:
def get_review_history_statuses(instance, reverse=False):
review_history = getReviewHistory(instance, reverse=reverse)
return map(lambda event: event["review_state"], review_history) |
def set_delivery(self, order_id, delivery_data):
delivery_data[] = order_id
return self._post(
,
data=delivery_data
) | 修改货架
:param order_id: 订单ID
:param delivery_data: 商品物流信息
:return: 返回的 JSON 数据包 | ### Input:
修改货架
:param order_id: 订单ID
:param delivery_data: 商品物流信息
:return: 返回的 JSON 数据包
### Response:
def set_delivery(self, order_id, delivery_data):
delivery_data[] = order_id
return self._post(
,
data=delivery_data
) |
def GetCellValueNoFail (self, column, row = None):
if row == None:
(row, column) = ParseCellSpec(column)
cell = GetCellValue(self, column, row)
if cell == None:
raise ValueError("cell %d:%d does not exist" % (column, row))
return cell | get a cell, if it does not exist fail
note that column at row START AT 1 same as excel | ### Input:
get a cell, if it does not exist fail
note that column at row START AT 1 same as excel
### Response:
def GetCellValueNoFail (self, column, row = None):
if row == None:
(row, column) = ParseCellSpec(column)
cell = GetCellValue(self, column, row)
if cell == None:
raise ValueError("cell %d:%d does not exist" % (column, row))
return cell |
def get_name_cost( db, name ):
lastblock = db.lastblock
namespace_id = get_namespace_from_name( name )
if namespace_id is None or len(namespace_id) == 0:
log.debug("No namespace " % namespace_id)
return None
namespace = db.get_namespace( namespace_id )
if namespace is None:
log.debug("Namespace is being revealed".format(namespace_id))
namespace = db.get_namespace_reveal( namespace_id )
if namespace is None:
log.debug("No namespace " % namespace_id)
return None
name_fee = price_name( get_name_from_fq_name( name ), namespace, lastblock )
name_fee_units = None
if namespace[] == NAMESPACE_VERSION_PAY_WITH_STACKS:
name_fee_units = TOKEN_TYPE_STACKS
else:
name_fee_units =
name_fee = int(math.ceil(name_fee))
log.debug("Cost of at %s is %s units of %s" % (name, lastblock, name_fee, name_fee_units))
return {: name_fee, : name_fee_units} | Get the cost of a name, given the fully-qualified name.
Do so by finding the namespace it belongs to (even if the namespace is being imported).
Return {'amount': ..., 'units': ...} on success
Return None if the namespace has not been declared | ### Input:
Get the cost of a name, given the fully-qualified name.
Do so by finding the namespace it belongs to (even if the namespace is being imported).
Return {'amount': ..., 'units': ...} on success
Return None if the namespace has not been declared
### Response:
def get_name_cost( db, name ):
lastblock = db.lastblock
namespace_id = get_namespace_from_name( name )
if namespace_id is None or len(namespace_id) == 0:
log.debug("No namespace " % namespace_id)
return None
namespace = db.get_namespace( namespace_id )
if namespace is None:
log.debug("Namespace is being revealed".format(namespace_id))
namespace = db.get_namespace_reveal( namespace_id )
if namespace is None:
log.debug("No namespace " % namespace_id)
return None
name_fee = price_name( get_name_from_fq_name( name ), namespace, lastblock )
name_fee_units = None
if namespace[] == NAMESPACE_VERSION_PAY_WITH_STACKS:
name_fee_units = TOKEN_TYPE_STACKS
else:
name_fee_units =
name_fee = int(math.ceil(name_fee))
log.debug("Cost of at %s is %s units of %s" % (name, lastblock, name_fee, name_fee_units))
return {: name_fee, : name_fee_units} |
def _handle_create_cfn_template_request(self, app_id, semver, key, logical_id):
create_cfn_template = (lambda app_id, semver: self._sar_client.create_cloud_formation_template(
ApplicationId=self._sanitize_sar_str_param(app_id),
SemanticVersion=self._sanitize_sar_str_param(semver)
))
response = self._sar_service_call(create_cfn_template, logical_id, app_id, semver)
self._applications[key] = response[self.TEMPLATE_URL_KEY]
if response[] != "ACTIVE":
self._in_progress_templates.append((response[self.APPLICATION_ID_KEY], response[])) | Method that handles the create_cloud_formation_template API call to the serverless application repo
:param string app_id: ApplicationId
:param string semver: SemanticVersion
:param string key: The dictionary key consisting of (ApplicationId, SemanticVersion)
:param string logical_id: the logical_id of this application resource | ### Input:
Method that handles the create_cloud_formation_template API call to the serverless application repo
:param string app_id: ApplicationId
:param string semver: SemanticVersion
:param string key: The dictionary key consisting of (ApplicationId, SemanticVersion)
:param string logical_id: the logical_id of this application resource
### Response:
def _handle_create_cfn_template_request(self, app_id, semver, key, logical_id):
create_cfn_template = (lambda app_id, semver: self._sar_client.create_cloud_formation_template(
ApplicationId=self._sanitize_sar_str_param(app_id),
SemanticVersion=self._sanitize_sar_str_param(semver)
))
response = self._sar_service_call(create_cfn_template, logical_id, app_id, semver)
self._applications[key] = response[self.TEMPLATE_URL_KEY]
if response[] != "ACTIVE":
self._in_progress_templates.append((response[self.APPLICATION_ID_KEY], response[])) |
def get(self, name):
if not isvalidinterface(name):
return None
config = self.get_block(r % name)
resp = dict()
resp.update(self._parse_bpduguard(config))
resp.update(self._parse_portfast(config))
resp.update(self._parse_portfast_type(config))
return resp | Returns the specified interfaces STP configuration resource
The STP interface resource contains the following
* name (str): The interface name
* portfast (bool): The spanning-tree portfast admin state
* bpduguard (bool): The spanning-tree bpduguard admin state
* portfast_type (str): The spanning-tree portfast <type> value.
Valid values include "edge", "network", "normal"
Args:
name (string): The interface identifier to retrieve the config
for. Note: Spanning-tree interfaces are only supported on
Ethernet and Port-Channel interfaces
Returns:
dict: A resource dict object that represents the interface
configuration.
None: If the specified interace is not a STP port | ### Input:
Returns the specified interfaces STP configuration resource
The STP interface resource contains the following
* name (str): The interface name
* portfast (bool): The spanning-tree portfast admin state
* bpduguard (bool): The spanning-tree bpduguard admin state
* portfast_type (str): The spanning-tree portfast <type> value.
Valid values include "edge", "network", "normal"
Args:
name (string): The interface identifier to retrieve the config
for. Note: Spanning-tree interfaces are only supported on
Ethernet and Port-Channel interfaces
Returns:
dict: A resource dict object that represents the interface
configuration.
None: If the specified interace is not a STP port
### Response:
def get(self, name):
if not isvalidinterface(name):
return None
config = self.get_block(r % name)
resp = dict()
resp.update(self._parse_bpduguard(config))
resp.update(self._parse_portfast(config))
resp.update(self._parse_portfast_type(config))
return resp |
def fullname(self):
prefix = ""
if self.parent:
if self.parent.fullname:
prefix = self.parent.fullname + ":"
else:
return ""
return prefix + self.name | includes the full path with parent names | ### Input:
includes the full path with parent names
### Response:
def fullname(self):
prefix = ""
if self.parent:
if self.parent.fullname:
prefix = self.parent.fullname + ":"
else:
return ""
return prefix + self.name |
def _GetReportService(self):
if not self._report_service:
self._report_service = self._ad_manager_client.GetService(
, self._version, self._server)
return self._report_service | Lazily initializes a report service client. | ### Input:
Lazily initializes a report service client.
### Response:
def _GetReportService(self):
if not self._report_service:
self._report_service = self._ad_manager_client.GetService(
, self._version, self._server)
return self._report_service |
def motion_sensor(self, enabled):
if enabled is True:
value = CONST.SETTINGS_MOTION_POLICY_ON
elif enabled is False:
value = CONST.SETTINGS_MOTION_POLICY_OFF
else:
raise SkybellException(ERROR.INVALID_SETTING_VALUE,
(CONST.SETTINGS_MOTION_POLICY, enabled))
self._set_setting({CONST.SETTINGS_MOTION_POLICY: value}) | Set the motion sensor state. | ### Input:
Set the motion sensor state.
### Response:
def motion_sensor(self, enabled):
if enabled is True:
value = CONST.SETTINGS_MOTION_POLICY_ON
elif enabled is False:
value = CONST.SETTINGS_MOTION_POLICY_OFF
else:
raise SkybellException(ERROR.INVALID_SETTING_VALUE,
(CONST.SETTINGS_MOTION_POLICY, enabled))
self._set_setting({CONST.SETTINGS_MOTION_POLICY: value}) |
def parse_multipart_headers(iterable):
result = []
for line in iterable:
line = to_native(line)
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError()
if not line:
break
elif line[0] in and result:
key, value = result[-1]
result[-1] = (key, value + + line[1:])
else:
parts = line.split(, 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
return Headers(result) | Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated | ### Input:
Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated
### Response:
def parse_multipart_headers(iterable):
result = []
for line in iterable:
line = to_native(line)
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError()
if not line:
break
elif line[0] in and result:
key, value = result[-1]
result[-1] = (key, value + + line[1:])
else:
parts = line.split(, 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
return Headers(result) |
def difflag1(self):
if self.ncols > 1:
raise H2OValueError("Only single-column frames supported")
if self.types[self.columns[0]] not in {"real", "int", "bool"}:
raise H2OValueError("Numeric column expected")
fr = H2OFrame._expr(expr=ExprNode("difflag1", self), cache=self._ex._cache)
return fr | Conduct a diff-1 transform on a numeric frame column.
:returns: an H2OFrame where each element is equal to the corresponding element in the source
frame minus the previous-row element in the same frame. | ### Input:
Conduct a diff-1 transform on a numeric frame column.
:returns: an H2OFrame where each element is equal to the corresponding element in the source
frame minus the previous-row element in the same frame.
### Response:
def difflag1(self):
if self.ncols > 1:
raise H2OValueError("Only single-column frames supported")
if self.types[self.columns[0]] not in {"real", "int", "bool"}:
raise H2OValueError("Numeric column expected")
fr = H2OFrame._expr(expr=ExprNode("difflag1", self), cache=self._ex._cache)
return fr |
def play_NoteContainer(self, notecontainer):
if len(notecontainer) <= 1:
[self.play_Note(x) for x in notecontainer]
else:
self.play_Note(notecontainer[0])
self.set_deltatime(0)
[self.play_Note(x) for x in notecontainer[1:]] | Convert a mingus.containers.NoteContainer to the equivalent MIDI
events and add it to the track_data.
Note.channel and Note.velocity can be set as well. | ### Input:
Convert a mingus.containers.NoteContainer to the equivalent MIDI
events and add it to the track_data.
Note.channel and Note.velocity can be set as well.
### Response:
def play_NoteContainer(self, notecontainer):
if len(notecontainer) <= 1:
[self.play_Note(x) for x in notecontainer]
else:
self.play_Note(notecontainer[0])
self.set_deltatime(0)
[self.play_Note(x) for x in notecontainer[1:]] |
def is_trusted_subject(request):
logging.debug(.format(.join(request.all_subjects_set)))
logging.debug(.format(.join(get_trusted_subjects())))
return not request.all_subjects_set.isdisjoint(get_trusted_subjects()) | Determine if calling subject is fully trusted. | ### Input:
Determine if calling subject is fully trusted.
### Response:
def is_trusted_subject(request):
logging.debug(.format(.join(request.all_subjects_set)))
logging.debug(.format(.join(get_trusted_subjects())))
return not request.all_subjects_set.isdisjoint(get_trusted_subjects()) |
def start_numbered_list(self):
self._ordered = True
self.start_container(List, stylename=)
self.set_next_paragraph_style(
if self._item_level <= 0
else ) | Start a numbered list. | ### Input:
Start a numbered list.
### Response:
def start_numbered_list(self):
self._ordered = True
self.start_container(List, stylename=)
self.set_next_paragraph_style(
if self._item_level <= 0
else ) |
def IOW(type, nr, size):
return IOC(IOC_WRITE, type, nr, IOC_TYPECHECK(size)) | An ioctl with write parameters.
size (ctype type or instance)
Type/structure of the argument passed to ioctl's "arg" argument. | ### Input:
An ioctl with write parameters.
size (ctype type or instance)
Type/structure of the argument passed to ioctl's "arg" argument.
### Response:
def IOW(type, nr, size):
return IOC(IOC_WRITE, type, nr, IOC_TYPECHECK(size)) |
def codemirror_field_css_bundle(field):
manifesto = CodemirrorAssetTagRender()
manifesto.register_from_fields(field)
try:
bundle_name = manifesto.css_bundle_names()[0]
except IndexError:
msg = ("Given field with configuration name does not have a "
"Javascript bundle name")
raise CodeMirrorFieldBundleError(msg.format(field.config_name))
return bundle_name | Filter to get CodeMirror CSS bundle name needed for a single field.
Example:
::
{% load djangocodemirror_tags %}
{{ form.myfield|codemirror_field_css_bundle }}
Arguments:
field (djangocodemirror.fields.CodeMirrorField): A form field.
Raises:
CodeMirrorFieldBundleError: Raised if Codemirror configuration from
field does not have a bundle name.
Returns:
string: Bundle name to load with webassets. | ### Input:
Filter to get CodeMirror CSS bundle name needed for a single field.
Example:
::
{% load djangocodemirror_tags %}
{{ form.myfield|codemirror_field_css_bundle }}
Arguments:
field (djangocodemirror.fields.CodeMirrorField): A form field.
Raises:
CodeMirrorFieldBundleError: Raised if Codemirror configuration from
field does not have a bundle name.
Returns:
string: Bundle name to load with webassets.
### Response:
def codemirror_field_css_bundle(field):
manifesto = CodemirrorAssetTagRender()
manifesto.register_from_fields(field)
try:
bundle_name = manifesto.css_bundle_names()[0]
except IndexError:
msg = ("Given field with configuration name does not have a "
"Javascript bundle name")
raise CodeMirrorFieldBundleError(msg.format(field.config_name))
return bundle_name |
def compress_pdf(filepath, output_path, ghostscript_binary):
if not filepath.endswith(PDF_EXTENSION):
raise ValueError("Filename must end with .pdf!\n%s does not." % filepath)
try:
file_size = os.stat(filepath).st_size
if file_size < FILE_SIZE_LOWER_LIMIT:
LOGGER.info(NOT_COMPRESSING.format(filepath, file_size, FILE_SIZE_LOWER_LIMIT))
process = subprocess.Popen([, filepath, output_path])
else:
LOGGER.info(COMPRESSING.format(filepath))
process = subprocess.Popen(
[ghostscript_binary, "-sDEVICE=pdfwrite",
"-dCompatabilityLevel=1.4", "-dPDFSETTINGS=/ebook",
"-dNOPAUSE", "-dQUIET", "-dBATCH",
"-sOutputFile=%s" % output_path, filepath]
)
except FileNotFoundError:
msg = GS_NOT_INSTALLED.format(ghostscript_binary)
raise FileNotFoundError(msg)
process.communicate()
LOGGER.info(FILE_DONE.format(output_path)) | Compress a single PDF file.
Args:
filepath (str): Path to the PDF file.
output_path (str): Output path.
ghostscript_binary (str): Name/alias of the Ghostscript binary.
Raises:
ValueError
FileNotFoundError | ### Input:
Compress a single PDF file.
Args:
filepath (str): Path to the PDF file.
output_path (str): Output path.
ghostscript_binary (str): Name/alias of the Ghostscript binary.
Raises:
ValueError
FileNotFoundError
### Response:
def compress_pdf(filepath, output_path, ghostscript_binary):
if not filepath.endswith(PDF_EXTENSION):
raise ValueError("Filename must end with .pdf!\n%s does not." % filepath)
try:
file_size = os.stat(filepath).st_size
if file_size < FILE_SIZE_LOWER_LIMIT:
LOGGER.info(NOT_COMPRESSING.format(filepath, file_size, FILE_SIZE_LOWER_LIMIT))
process = subprocess.Popen([, filepath, output_path])
else:
LOGGER.info(COMPRESSING.format(filepath))
process = subprocess.Popen(
[ghostscript_binary, "-sDEVICE=pdfwrite",
"-dCompatabilityLevel=1.4", "-dPDFSETTINGS=/ebook",
"-dNOPAUSE", "-dQUIET", "-dBATCH",
"-sOutputFile=%s" % output_path, filepath]
)
except FileNotFoundError:
msg = GS_NOT_INSTALLED.format(ghostscript_binary)
raise FileNotFoundError(msg)
process.communicate()
LOGGER.info(FILE_DONE.format(output_path)) |
def using_hg(cwd):
try:
hg_log = shell_out(["hg", "log"], cwd=cwd)
return True
except (CalledProcessError, OSError):
return False | Test whether the directory cwd is contained in a mercurial
repository. | ### Input:
Test whether the directory cwd is contained in a mercurial
repository.
### Response:
def using_hg(cwd):
try:
hg_log = shell_out(["hg", "log"], cwd=cwd)
return True
except (CalledProcessError, OSError):
return False |
def add(self, name, nestable, **kw):
self.checkpoints[name] = self.nest
self.nest = copy.copy(self.nest)
return self.nest.add(name, nestable, **kw) | Adds a level to the nesting and creates a checkpoint that can be
reverted to later for aggregation by calling :meth:`SConsWrap.pop`.
:param name: Identifier for the nest level
:param nestable: A nestable object - see
:meth:`Nest.add() <nestly.core.Nest.add>`.
:param kw: Additional parameters to pass to
:meth:`Nest.add() <nestly.core.Nest.add>`. | ### Input:
Adds a level to the nesting and creates a checkpoint that can be
reverted to later for aggregation by calling :meth:`SConsWrap.pop`.
:param name: Identifier for the nest level
:param nestable: A nestable object - see
:meth:`Nest.add() <nestly.core.Nest.add>`.
:param kw: Additional parameters to pass to
:meth:`Nest.add() <nestly.core.Nest.add>`.
### Response:
def add(self, name, nestable, **kw):
self.checkpoints[name] = self.nest
self.nest = copy.copy(self.nest)
return self.nest.add(name, nestable, **kw) |
def _create_penwidth_combo(self):
choices = map(unicode, xrange(12))
self.pen_width_combo = \
_widgets.PenWidthComboBox(self, choices=choices,
style=wx.CB_READONLY, size=(50, -1))
self.pen_width_combo.SetToolTipString(_(u"Border width"))
self.AddControl(self.pen_width_combo)
self.Bind(wx.EVT_COMBOBOX, self.OnLineWidth, self.pen_width_combo) | Create pen width combo box | ### Input:
Create pen width combo box
### Response:
def _create_penwidth_combo(self):
choices = map(unicode, xrange(12))
self.pen_width_combo = \
_widgets.PenWidthComboBox(self, choices=choices,
style=wx.CB_READONLY, size=(50, -1))
self.pen_width_combo.SetToolTipString(_(u"Border width"))
self.AddControl(self.pen_width_combo)
self.Bind(wx.EVT_COMBOBOX, self.OnLineWidth, self.pen_width_combo) |
def choose_1_from_each(lists):
if len(lists) == 0:
yield []
else:
for el in lists[0]:
for next_list in choose_1_from_each(lists[1:]):
yield [el] + next_list | Takes a list of lists and returns a list of lists with one item
from each list. This new list should be the length of each list multiplied
by the others. 18 for an list with lists of 3, 2 and 3. Also the lenght
of each sub list should be same as the length of lists passed in.
Args:
lists(list of Lists): A list of lists
Returns:
list of lists: returns a list of lists constructions of one item from each
list in lists. | ### Input:
Takes a list of lists and returns a list of lists with one item
from each list. This new list should be the length of each list multiplied
by the others. 18 for an list with lists of 3, 2 and 3. Also the lenght
of each sub list should be same as the length of lists passed in.
Args:
lists(list of Lists): A list of lists
Returns:
list of lists: returns a list of lists constructions of one item from each
list in lists.
### Response:
def choose_1_from_each(lists):
if len(lists) == 0:
yield []
else:
for el in lists[0]:
for next_list in choose_1_from_each(lists[1:]):
yield [el] + next_list |
def renew_voms_proxy(passwd="", vo=None, lifetime="196:00"):
with tmp_file() as (_, tmp):
with open(tmp, "w") as f:
f.write(passwd)
cmd = "cat | voms-proxy-init --valid ".format(tmp, lifetime)
if vo:
cmd += " -voms ".format(vo)
code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash",
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if code != 0:
raise Exception("proxy renewal failed: {}".format(out)) | Renews the voms proxy using a password *passwd*, an optional virtual organization name *vo*, and
a default *lifetime* of 8 days. The password is written to a temporary file first and piped into
the renewal commad to ensure it is not visible in the process list. | ### Input:
Renews the voms proxy using a password *passwd*, an optional virtual organization name *vo*, and
a default *lifetime* of 8 days. The password is written to a temporary file first and piped into
the renewal commad to ensure it is not visible in the process list.
### Response:
def renew_voms_proxy(passwd="", vo=None, lifetime="196:00"):
with tmp_file() as (_, tmp):
with open(tmp, "w") as f:
f.write(passwd)
cmd = "cat | voms-proxy-init --valid ".format(tmp, lifetime)
if vo:
cmd += " -voms ".format(vo)
code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash",
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if code != 0:
raise Exception("proxy renewal failed: {}".format(out)) |
def present(name,
block_icmp=None,
prune_block_icmp=False,
default=None,
masquerade=False,
ports=None,
prune_ports=False,
port_fwd=None,
prune_port_fwd=False,
services=None,
prune_services=False,
interfaces=None,
prune_interfaces=False,
sources=None,
prune_sources=False,
rich_rules=None,
prune_rich_rules=False):
ret = _present(name, block_icmp, prune_block_icmp, default, masquerade, ports, prune_ports,
port_fwd, prune_port_fwd, services, prune_services, interfaces, prune_interfaces,
sources, prune_sources, rich_rules, prune_rich_rules)
if ret[] != {}:
__salt__[]()
return ret | Ensure a zone has specific attributes.
name
The zone to modify.
default : None
Set this zone as the default zone if ``True``.
masquerade : False
Enable or disable masquerade for a zone.
block_icmp : None
List of ICMP types to block in the zone.
prune_block_icmp : False
If ``True``, remove all but the specified block_icmp from the zone.
ports : None
List of ports to add to the zone.
prune_ports : False
If ``True``, remove all but the specified ports from the zone.
port_fwd : None
List of port forwards to add to the zone.
prune_port_fwd : False
If ``True``, remove all but the specified port_fwd from the zone.
services : None
List of services to add to the zone.
prune_services : False
If ``True``, remove all but the specified services from the zone.
.. note:: Currently defaults to True for compatibility, but will be changed to False in a future release.
interfaces : None
List of interfaces to add to the zone.
prune_interfaces : False
If ``True``, remove all but the specified interfaces from the zone.
sources : None
List of sources to add to the zone.
prune_sources : False
If ``True``, remove all but the specified sources from the zone.
rich_rules : None
List of rich rules to add to the zone.
prune_rich_rules : False
If ``True``, remove all but the specified rich rules from the zone. | ### Input:
Ensure a zone has specific attributes.
name
The zone to modify.
default : None
Set this zone as the default zone if ``True``.
masquerade : False
Enable or disable masquerade for a zone.
block_icmp : None
List of ICMP types to block in the zone.
prune_block_icmp : False
If ``True``, remove all but the specified block_icmp from the zone.
ports : None
List of ports to add to the zone.
prune_ports : False
If ``True``, remove all but the specified ports from the zone.
port_fwd : None
List of port forwards to add to the zone.
prune_port_fwd : False
If ``True``, remove all but the specified port_fwd from the zone.
services : None
List of services to add to the zone.
prune_services : False
If ``True``, remove all but the specified services from the zone.
.. note:: Currently defaults to True for compatibility, but will be changed to False in a future release.
interfaces : None
List of interfaces to add to the zone.
prune_interfaces : False
If ``True``, remove all but the specified interfaces from the zone.
sources : None
List of sources to add to the zone.
prune_sources : False
If ``True``, remove all but the specified sources from the zone.
rich_rules : None
List of rich rules to add to the zone.
prune_rich_rules : False
If ``True``, remove all but the specified rich rules from the zone.
### Response:
def present(name,
block_icmp=None,
prune_block_icmp=False,
default=None,
masquerade=False,
ports=None,
prune_ports=False,
port_fwd=None,
prune_port_fwd=False,
services=None,
prune_services=False,
interfaces=None,
prune_interfaces=False,
sources=None,
prune_sources=False,
rich_rules=None,
prune_rich_rules=False):
ret = _present(name, block_icmp, prune_block_icmp, default, masquerade, ports, prune_ports,
port_fwd, prune_port_fwd, services, prune_services, interfaces, prune_interfaces,
sources, prune_sources, rich_rules, prune_rich_rules)
if ret[] != {}:
__salt__[]()
return ret |
def schedule_job(self, j):
job_id = uuid.uuid4().hex
j.job_id = job_id
session = self.sessionmaker()
orm_job = ORMJob(
id=job_id,
state=j.state,
app=self.app,
namespace=self.namespace,
obj=j)
session.add(orm_job)
try:
session.commit()
except Exception as e:
logging.error(
"Got an error running session.commit(): {}".format(e))
return job_id | Add the job given by j to the job queue.
Note: Does not actually run the job. | ### Input:
Add the job given by j to the job queue.
Note: Does not actually run the job.
### Response:
def schedule_job(self, j):
job_id = uuid.uuid4().hex
j.job_id = job_id
session = self.sessionmaker()
orm_job = ORMJob(
id=job_id,
state=j.state,
app=self.app,
namespace=self.namespace,
obj=j)
session.add(orm_job)
try:
session.commit()
except Exception as e:
logging.error(
"Got an error running session.commit(): {}".format(e))
return job_id |
def fallback_to_sshv1(self, ctx):
command = self.get_command(version=1)
ctx.spawn_session(command)
return True | Fallback to SSHv1. | ### Input:
Fallback to SSHv1.
### Response:
def fallback_to_sshv1(self, ctx):
command = self.get_command(version=1)
ctx.spawn_session(command)
return True |
def tile_is_valid(self):
return self.tile_info is not None \
and (self.datetime == self.date or self.datetime == self.parse_datetime(self.tile_info[])) | Checks if tile has tile info and valid timestamp
:return: `True` if tile is valid and `False` otherwise
:rtype: bool | ### Input:
Checks if tile has tile info and valid timestamp
:return: `True` if tile is valid and `False` otherwise
:rtype: bool
### Response:
def tile_is_valid(self):
return self.tile_info is not None \
and (self.datetime == self.date or self.datetime == self.parse_datetime(self.tile_info[])) |
def split(df):
idx = df.index.argmax() + 1
down = df.iloc[:idx]
up = df.iloc[idx:][::-1]
return down, up | Returns a tuple with down/up-cast. | ### Input:
Returns a tuple with down/up-cast.
### Response:
def split(df):
idx = df.index.argmax() + 1
down = df.iloc[:idx]
up = df.iloc[idx:][::-1]
return down, up |
def serialize(element):
if getattr(_THREAD, "serializer", None) is None:
_THREAD.serializer = XMPPSerializer("jabber:client")
_THREAD.serializer.emit_head(None, None)
return _THREAD.serializer.emit_stanza(element) | Serialize an XMPP element.
Utility function for debugging or logging.
:Parameters:
- `element`: the element to serialize
:Types:
- `element`: :etree:`ElementTree.Element`
:Return: serialized element
:Returntype: `unicode` | ### Input:
Serialize an XMPP element.
Utility function for debugging or logging.
:Parameters:
- `element`: the element to serialize
:Types:
- `element`: :etree:`ElementTree.Element`
:Return: serialized element
:Returntype: `unicode`
### Response:
def serialize(element):
if getattr(_THREAD, "serializer", None) is None:
_THREAD.serializer = XMPPSerializer("jabber:client")
_THREAD.serializer.emit_head(None, None)
return _THREAD.serializer.emit_stanza(element) |
def get(self, template_ids, session, fields=[]):
request = TOPRequest()
request[] = template_ids
if not fields:
fields = self.fields
request[] = fields
self.create(self.execute(request, session), fields=[, ], models={:DeliveryTemplate})
return self.delivery_templates | taobao.delivery.template.get 获取用户指定运费模板信息
获取用户指定运费模板信息 | ### Input:
taobao.delivery.template.get 获取用户指定运费模板信息
获取用户指定运费模板信息
### Response:
def get(self, template_ids, session, fields=[]):
request = TOPRequest()
request[] = template_ids
if not fields:
fields = self.fields
request[] = fields
self.create(self.execute(request, session), fields=[, ], models={:DeliveryTemplate})
return self.delivery_templates |
def start(job_label, runas=None):
***
service = _service_by_name(job_label)
if service:
cmd = .format(service[], runas=runas)
return not __salt__[](cmd, runas=runas, python_shell=False)
return False | Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service label>
salt '*' service.start org.ntp.ntpd
salt '*' service.start /System/Library/LaunchDaemons/org.ntp.ntpd.plist | ### Input:
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service label>
salt '*' service.start org.ntp.ntpd
salt '*' service.start /System/Library/LaunchDaemons/org.ntp.ntpd.plist
### Response:
def start(job_label, runas=None):
***
service = _service_by_name(job_label)
if service:
cmd = .format(service[], runas=runas)
return not __salt__[](cmd, runas=runas, python_shell=False)
return False |
def init(name, *args):
matcher = get(name)
if not matcher:
raise ValueError(.format(name))
return matcher(*args) | Initializes a matcher instance passing variadic arguments to
its constructor. Acts as a delegator proxy.
Arguments:
name (str): matcher class name or alias to execute.
*args (mixed): variadic argument
Returns:
matcher: matcher instance.
Raises:
ValueError: if matcher was not found. | ### Input:
Initializes a matcher instance passing variadic arguments to
its constructor. Acts as a delegator proxy.
Arguments:
name (str): matcher class name or alias to execute.
*args (mixed): variadic argument
Returns:
matcher: matcher instance.
Raises:
ValueError: if matcher was not found.
### Response:
def init(name, *args):
matcher = get(name)
if not matcher:
raise ValueError(.format(name))
return matcher(*args) |
def conjugate(self):
stine_l = np.conjugate(self._data[0])
stine_r = None
if self._data[1] is not None:
stine_r = np.conjugate(self._data[1])
return Stinespring((stine_l, stine_r), self.input_dims(),
self.output_dims()) | Return the conjugate of the QuantumChannel. | ### Input:
Return the conjugate of the QuantumChannel.
### Response:
def conjugate(self):
stine_l = np.conjugate(self._data[0])
stine_r = None
if self._data[1] is not None:
stine_r = np.conjugate(self._data[1])
return Stinespring((stine_l, stine_r), self.input_dims(),
self.output_dims()) |
def generate(bits, progress_func=None):
key = rsa.generate_private_key(
public_exponent=65537, key_size=bits, backend=default_backend()
)
return RSAKey(key=key) | Generate a new private RSA key. This factory function can be used to
generate a new host key or authentication key.
:param int bits: number of bits the generated key should be.
:param progress_func: Unused
:return: new `.RSAKey` private key | ### Input:
Generate a new private RSA key. This factory function can be used to
generate a new host key or authentication key.
:param int bits: number of bits the generated key should be.
:param progress_func: Unused
:return: new `.RSAKey` private key
### Response:
def generate(bits, progress_func=None):
key = rsa.generate_private_key(
public_exponent=65537, key_size=bits, backend=default_backend()
)
return RSAKey(key=key) |
def combine_slices(slice_datasets, rescale=None):
s coordinate system.
Returns a two-tuple containing the 3D-ndarray and the affine matrix.
If `rescale` is set to `None` (the default), then the image array dtype
will be preserved, unless any of the DICOM images contain either the
`Rescale Slope
<https://dicom.innolitics.com/ciods/ct-image/ct-image/00281053>`_ or the
`Rescale Intercept <https://dicom.innolitics.com/ciods/ct-image/ct-image/00281052>`_
attributes. If either of these attributes are present, they will be
applied to each slice individually.
If `rescale` is `True` the voxels will be cast to `float32`, if set to
`False`, the original dtype will be preserved even if DICOM rescaling information is present.
The returned array has the column-major byte-order.
This function requires that the datasets:
- Be in same series (have the same
`Series Instance UID <https://dicom.innolitics.com/ciods/ct-image/general-series/0020000e>`_,
`Modality <https://dicom.innolitics.com/ciods/ct-image/general-series/00080060>`_,
and `SOP Class UID <https://dicom.innolitics.com/ciods/ct-image/sop-common/00080016>`_).
- The binary storage of each slice must be the same (have the same
`Bits Allocated <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280100>`_,
`Bits Stored <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280101>`_,
`High Bit <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280102>`_, and
`Pixel Representation <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280103>`_).
- The image slice must approximately form a grid. This means there can not
be any missing internal slices (missing slices on the ends of the dataset
are not detected).
- It also means that each slice must have the same
`Rows <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280010>`_,
`Columns <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280011>`_,
`Pixel Spacing <https://dicom.innolitics.com/ciods/ct-image/image-plane/00280030>`_, and
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute values.
- The direction cosines derived from the
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute must, within 1e-4, have a magnitude of 1. The cosines must
also be approximately perpendicular (their dot-product must be within
1e-4 of 0). Warnings are displayed if any of these approximations are
below 1e-8, however, since we have seen real datasets with values up to
1e-4, we let them pass.
- The `Image Position (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200032>`_
values must approximately form a line.
If any of these conditions are not met, a `dicom_numpy.DicomImportException` is raised.
'
if len(slice_datasets) == 0:
raise DicomImportException("Must provide at least one DICOM dataset")
_validate_slices_form_uniform_grid(slice_datasets)
voxels = _merge_slice_pixel_arrays(slice_datasets, rescale)
transform = _ijk_to_patient_xyz_transform_matrix(slice_datasets)
return voxels, transform | Given a list of pydicom datasets for an image series, stitch them together into a
three-dimensional numpy array. Also calculate a 4x4 affine transformation
matrix that converts the ijk-pixel-indices into the xyz-coordinates in the
DICOM patient's coordinate system.
Returns a two-tuple containing the 3D-ndarray and the affine matrix.
If `rescale` is set to `None` (the default), then the image array dtype
will be preserved, unless any of the DICOM images contain either the
`Rescale Slope
<https://dicom.innolitics.com/ciods/ct-image/ct-image/00281053>`_ or the
`Rescale Intercept <https://dicom.innolitics.com/ciods/ct-image/ct-image/00281052>`_
attributes. If either of these attributes are present, they will be
applied to each slice individually.
If `rescale` is `True` the voxels will be cast to `float32`, if set to
`False`, the original dtype will be preserved even if DICOM rescaling information is present.
The returned array has the column-major byte-order.
This function requires that the datasets:
- Be in same series (have the same
`Series Instance UID <https://dicom.innolitics.com/ciods/ct-image/general-series/0020000e>`_,
`Modality <https://dicom.innolitics.com/ciods/ct-image/general-series/00080060>`_,
and `SOP Class UID <https://dicom.innolitics.com/ciods/ct-image/sop-common/00080016>`_).
- The binary storage of each slice must be the same (have the same
`Bits Allocated <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280100>`_,
`Bits Stored <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280101>`_,
`High Bit <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280102>`_, and
`Pixel Representation <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280103>`_).
- The image slice must approximately form a grid. This means there can not
be any missing internal slices (missing slices on the ends of the dataset
are not detected).
- It also means that each slice must have the same
`Rows <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280010>`_,
`Columns <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280011>`_,
`Pixel Spacing <https://dicom.innolitics.com/ciods/ct-image/image-plane/00280030>`_, and
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute values.
- The direction cosines derived from the
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute must, within 1e-4, have a magnitude of 1. The cosines must
also be approximately perpendicular (their dot-product must be within
1e-4 of 0). Warnings are displayed if any of these approximations are
below 1e-8, however, since we have seen real datasets with values up to
1e-4, we let them pass.
- The `Image Position (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200032>`_
values must approximately form a line.
If any of these conditions are not met, a `dicom_numpy.DicomImportException` is raised. | ### Input:
Given a list of pydicom datasets for an image series, stitch them together into a
three-dimensional numpy array. Also calculate a 4x4 affine transformation
matrix that converts the ijk-pixel-indices into the xyz-coordinates in the
DICOM patient's coordinate system.
Returns a two-tuple containing the 3D-ndarray and the affine matrix.
If `rescale` is set to `None` (the default), then the image array dtype
will be preserved, unless any of the DICOM images contain either the
`Rescale Slope
<https://dicom.innolitics.com/ciods/ct-image/ct-image/00281053>`_ or the
`Rescale Intercept <https://dicom.innolitics.com/ciods/ct-image/ct-image/00281052>`_
attributes. If either of these attributes are present, they will be
applied to each slice individually.
If `rescale` is `True` the voxels will be cast to `float32`, if set to
`False`, the original dtype will be preserved even if DICOM rescaling information is present.
The returned array has the column-major byte-order.
This function requires that the datasets:
- Be in same series (have the same
`Series Instance UID <https://dicom.innolitics.com/ciods/ct-image/general-series/0020000e>`_,
`Modality <https://dicom.innolitics.com/ciods/ct-image/general-series/00080060>`_,
and `SOP Class UID <https://dicom.innolitics.com/ciods/ct-image/sop-common/00080016>`_).
- The binary storage of each slice must be the same (have the same
`Bits Allocated <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280100>`_,
`Bits Stored <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280101>`_,
`High Bit <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280102>`_, and
`Pixel Representation <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280103>`_).
- The image slice must approximately form a grid. This means there can not
be any missing internal slices (missing slices on the ends of the dataset
are not detected).
- It also means that each slice must have the same
`Rows <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280010>`_,
`Columns <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280011>`_,
`Pixel Spacing <https://dicom.innolitics.com/ciods/ct-image/image-plane/00280030>`_, and
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute values.
- The direction cosines derived from the
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute must, within 1e-4, have a magnitude of 1. The cosines must
also be approximately perpendicular (their dot-product must be within
1e-4 of 0). Warnings are displayed if any of these approximations are
below 1e-8, however, since we have seen real datasets with values up to
1e-4, we let them pass.
- The `Image Position (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200032>`_
values must approximately form a line.
If any of these conditions are not met, a `dicom_numpy.DicomImportException` is raised.
### Response:
def combine_slices(slice_datasets, rescale=None):
s coordinate system.
Returns a two-tuple containing the 3D-ndarray and the affine matrix.
If `rescale` is set to `None` (the default), then the image array dtype
will be preserved, unless any of the DICOM images contain either the
`Rescale Slope
<https://dicom.innolitics.com/ciods/ct-image/ct-image/00281053>`_ or the
`Rescale Intercept <https://dicom.innolitics.com/ciods/ct-image/ct-image/00281052>`_
attributes. If either of these attributes are present, they will be
applied to each slice individually.
If `rescale` is `True` the voxels will be cast to `float32`, if set to
`False`, the original dtype will be preserved even if DICOM rescaling information is present.
The returned array has the column-major byte-order.
This function requires that the datasets:
- Be in same series (have the same
`Series Instance UID <https://dicom.innolitics.com/ciods/ct-image/general-series/0020000e>`_,
`Modality <https://dicom.innolitics.com/ciods/ct-image/general-series/00080060>`_,
and `SOP Class UID <https://dicom.innolitics.com/ciods/ct-image/sop-common/00080016>`_).
- The binary storage of each slice must be the same (have the same
`Bits Allocated <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280100>`_,
`Bits Stored <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280101>`_,
`High Bit <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280102>`_, and
`Pixel Representation <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280103>`_).
- The image slice must approximately form a grid. This means there can not
be any missing internal slices (missing slices on the ends of the dataset
are not detected).
- It also means that each slice must have the same
`Rows <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280010>`_,
`Columns <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280011>`_,
`Pixel Spacing <https://dicom.innolitics.com/ciods/ct-image/image-plane/00280030>`_, and
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute values.
- The direction cosines derived from the
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute must, within 1e-4, have a magnitude of 1. The cosines must
also be approximately perpendicular (their dot-product must be within
1e-4 of 0). Warnings are displayed if any of these approximations are
below 1e-8, however, since we have seen real datasets with values up to
1e-4, we let them pass.
- The `Image Position (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200032>`_
values must approximately form a line.
If any of these conditions are not met, a `dicom_numpy.DicomImportException` is raised.
'
if len(slice_datasets) == 0:
raise DicomImportException("Must provide at least one DICOM dataset")
_validate_slices_form_uniform_grid(slice_datasets)
voxels = _merge_slice_pixel_arrays(slice_datasets, rescale)
transform = _ijk_to_patient_xyz_transform_matrix(slice_datasets)
return voxels, transform |
def move_siblings(
start, end, new_,
keep_start_boundary=False,
keep_end_boundary=False
):
old_ = start.getparent()
if keep_start_boundary:
new_.append(copy(start))
else:
if start.tail:
new_.text = start.tail
for node in start.itersiblings():
if node is not end:
new_.append(node)
elif node is end:
if keep_end_boundary:
new_.append(copy(node))
break
old_.replace(start, new_)
old_.remove(end) | a helper function that will replace a start/end node pair
by a new containing element, effectively moving all in-between siblings
This is particularly helpful to replace for /for loops in tables
with the content resulting from the iteration
This function call returns None. The parent xml tree is modified in place
@param start: the starting xml node
@type start: lxml.etree.Element
@param end: the ending xml node
@type end: lxml.etree.Element
@param new_: the new xml element that will replace the start/end pair
@type new_: lxlm.etree.Element
@param keep_start_boundary: Flag to let the function know if it copies
your start tag to the new_ node or not, Default value is False
@type keep_start_boundary: bool
@param keep_end_boundary: Flag to let the function know if it copies
your end tag to the new_ node or not, Default value is False
@type keep_end_boundary: bool
@returns: None | ### Input:
a helper function that will replace a start/end node pair
by a new containing element, effectively moving all in-between siblings
This is particularly helpful to replace for /for loops in tables
with the content resulting from the iteration
This function call returns None. The parent xml tree is modified in place
@param start: the starting xml node
@type start: lxml.etree.Element
@param end: the ending xml node
@type end: lxml.etree.Element
@param new_: the new xml element that will replace the start/end pair
@type new_: lxlm.etree.Element
@param keep_start_boundary: Flag to let the function know if it copies
your start tag to the new_ node or not, Default value is False
@type keep_start_boundary: bool
@param keep_end_boundary: Flag to let the function know if it copies
your end tag to the new_ node or not, Default value is False
@type keep_end_boundary: bool
@returns: None
### Response:
def move_siblings(
start, end, new_,
keep_start_boundary=False,
keep_end_boundary=False
):
old_ = start.getparent()
if keep_start_boundary:
new_.append(copy(start))
else:
if start.tail:
new_.text = start.tail
for node in start.itersiblings():
if node is not end:
new_.append(node)
elif node is end:
if keep_end_boundary:
new_.append(copy(node))
break
old_.replace(start, new_)
old_.remove(end) |
def AdditiveGaussianNoise(loc=0, scale=0, per_channel=False, name=None, deterministic=False, random_state=None):
loc2 = iap.handle_continuous_param(loc, "loc", value_range=None, tuple_to_uniform=True, list_to_choice=True)
scale2 = iap.handle_continuous_param(scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return AddElementwise(iap.Normal(loc=loc2, scale=scale2), per_channel=per_channel, name=name,
deterministic=deterministic, random_state=random_state) | Add gaussian noise (aka white noise) to images.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the normal distribution that generates the noise.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the normal distribution that generates the noise.
Must be ``>= 0``. If 0 then only `loc` will be used.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images.
>>> aug = iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))
adds gaussian noise from the distribution ``N(0, s)`` to images,
where s is sampled per image from the range ``0 <= s <= 0.1*255``.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=True)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=0.5)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent). | ### Input:
Add gaussian noise (aka white noise) to images.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the normal distribution that generates the noise.
* If a number, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the normal distribution that generates the noise.
Must be ``>= 0``. If 0 then only `loc` will be used.
* If an int or float, exactly that value will be used.
* If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will
be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
per_channel : bool or float, optional
Whether to use the same noise value per pixel for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images.
>>> aug = iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))
adds gaussian noise from the distribution ``N(0, s)`` to images,
where s is sampled per image from the range ``0 <= s <= 0.1*255``.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=True)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is different per pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=0.5)
adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is sometimes (50 percent of all cases) the same
per pixel for all channels and sometimes different (other 50 percent).
### Response:
def AdditiveGaussianNoise(loc=0, scale=0, per_channel=False, name=None, deterministic=False, random_state=None):
loc2 = iap.handle_continuous_param(loc, "loc", value_range=None, tuple_to_uniform=True, list_to_choice=True)
scale2 = iap.handle_continuous_param(scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return AddElementwise(iap.Normal(loc=loc2, scale=scale2), per_channel=per_channel, name=name,
deterministic=deterministic, random_state=random_state) |
def add_column(self, width):
tblGrid = self._tbl.tblGrid
gridCol = tblGrid.add_gridCol()
gridCol.w = width
for tr in self._tbl.tr_lst:
tc = tr.add_tc()
tc.width = width
return _Column(gridCol, self) | Return a |_Column| object of *width*, newly added rightmost to the
table. | ### Input:
Return a |_Column| object of *width*, newly added rightmost to the
table.
### Response:
def add_column(self, width):
tblGrid = self._tbl.tblGrid
gridCol = tblGrid.add_gridCol()
gridCol.w = width
for tr in self._tbl.tr_lst:
tc = tr.add_tc()
tc.width = width
return _Column(gridCol, self) |
def meta(self):
if self._meta is None:
lat = self.geo_data[]
earth_mask = self._get_earth_mask(lat)
crow, ccol = self._get_nadir_pixel(earth_mask=earth_mask,
sector=self.sector)
lat0 = lat.values[crow, ccol] if crow is not None else None
yaw_flip = self._is_yaw_flip(lat)
del lat
lon = self.geo_data[]
lon0 = lon.values[crow, ccol] if crow is not None else None
area_def_uni = self._get_area_def_uniform_sampling(
lon0=lon0, channel=self.gvar_channel)
del lon
self._meta = {: earth_mask,
: yaw_flip,
: lat0,
: lon0,
: crow,
: ccol,
: area_def_uni}
return self._meta | Derive metadata from the coordinates | ### Input:
Derive metadata from the coordinates
### Response:
def meta(self):
if self._meta is None:
lat = self.geo_data[]
earth_mask = self._get_earth_mask(lat)
crow, ccol = self._get_nadir_pixel(earth_mask=earth_mask,
sector=self.sector)
lat0 = lat.values[crow, ccol] if crow is not None else None
yaw_flip = self._is_yaw_flip(lat)
del lat
lon = self.geo_data[]
lon0 = lon.values[crow, ccol] if crow is not None else None
area_def_uni = self._get_area_def_uniform_sampling(
lon0=lon0, channel=self.gvar_channel)
del lon
self._meta = {: earth_mask,
: yaw_flip,
: lat0,
: lon0,
: crow,
: ccol,
: area_def_uni}
return self._meta |
def expanded_by(self, n):
return Rect(self.left - n, self.top - n, self.right + n, self.bottom + n) | Return a rectangle with extended borders.
Create a new rectangle that is wider and taller than the
immediate one. All sides are extended by "n" points. | ### Input:
Return a rectangle with extended borders.
Create a new rectangle that is wider and taller than the
immediate one. All sides are extended by "n" points.
### Response:
def expanded_by(self, n):
return Rect(self.left - n, self.top - n, self.right + n, self.bottom + n) |
def add_markup( self ):
if self.markup and self.markup_lines:
marks = self.markup_lines
if len( marks ) > 0 and not string.strip( marks[-1] ):
self.markup_lines = marks[:-1]
m = DocMarkup( self.markup, self.markup_lines )
self.markups.append( m )
self.markup = None
self.markup_lines = [] | add a new markup section | ### Input:
add a new markup section
### Response:
def add_markup( self ):
if self.markup and self.markup_lines:
marks = self.markup_lines
if len( marks ) > 0 and not string.strip( marks[-1] ):
self.markup_lines = marks[:-1]
m = DocMarkup( self.markup, self.markup_lines )
self.markups.append( m )
self.markup = None
self.markup_lines = [] |
def json_doc_to_xml(json_obj, lang=, custom_namespace=None):
if not in json_obj:
raise Exception("This function requires a conforming Open511 JSON document with a section.")
json_obj = dict(json_obj)
meta = json_obj.pop()
elem = get_base_open511_element(lang=lang, version=meta.pop())
pagination = json_obj.pop(, None)
json_struct_to_xml(json_obj, elem, custom_namespace=custom_namespace)
if pagination:
elem.append(json_struct_to_xml(pagination, , custom_namespace=custom_namespace))
json_struct_to_xml(meta, elem)
return elem | Converts a Open511 JSON document to XML.
lang: the appropriate language code
Takes a dict deserialized from JSON, returns an lxml Element.
Accepts only the full root-level JSON object from an Open511 response. | ### Input:
Converts a Open511 JSON document to XML.
lang: the appropriate language code
Takes a dict deserialized from JSON, returns an lxml Element.
Accepts only the full root-level JSON object from an Open511 response.
### Response:
def json_doc_to_xml(json_obj, lang=, custom_namespace=None):
if not in json_obj:
raise Exception("This function requires a conforming Open511 JSON document with a section.")
json_obj = dict(json_obj)
meta = json_obj.pop()
elem = get_base_open511_element(lang=lang, version=meta.pop())
pagination = json_obj.pop(, None)
json_struct_to_xml(json_obj, elem, custom_namespace=custom_namespace)
if pagination:
elem.append(json_struct_to_xml(pagination, , custom_namespace=custom_namespace))
json_struct_to_xml(meta, elem)
return elem |
def get_zipped_file(url, encoding_error_opt=):
remotezip = urllib2.urlopen(url)
raw_contents = cStringIO.StringIO(remotezip.read())
target_zip = zipfile.ZipFile(raw_contents)
first_filename = target_zip.namelist()[0]
return unicode(target_zip.read(first_filename), errors=encoding_error_opt) | Download and unzip the report file at the given URL.
Downloads and unzips the CO-TRACER archive at the given URL. This is not
intended for data outside of the CO-TRACER official site and it will
automatically extract the first file found in the downloaded zip archive
as the CO-TRACER website produces single file archives. Note that the
contents of that file are loaded directly into memory.
@param url: The URL to download the archive from.
@type url: str
@return: The contents of the first file found in the provided archive.
@rtype: str | ### Input:
Download and unzip the report file at the given URL.
Downloads and unzips the CO-TRACER archive at the given URL. This is not
intended for data outside of the CO-TRACER official site and it will
automatically extract the first file found in the downloaded zip archive
as the CO-TRACER website produces single file archives. Note that the
contents of that file are loaded directly into memory.
@param url: The URL to download the archive from.
@type url: str
@return: The contents of the first file found in the provided archive.
@rtype: str
### Response:
def get_zipped_file(url, encoding_error_opt=):
remotezip = urllib2.urlopen(url)
raw_contents = cStringIO.StringIO(remotezip.read())
target_zip = zipfile.ZipFile(raw_contents)
first_filename = target_zip.namelist()[0]
return unicode(target_zip.read(first_filename), errors=encoding_error_opt) |
def interstore(self, dest, *others, **kwargs):
keys = [self.key]
keys.extend([other.key for other in others])
self.database.zinterstore(dest, keys, **kwargs)
return self.database.ZSet(dest) | Store the intersection of the current zset and one or more
others in a new key.
:param dest: the name of the key to store intersection
:param others: One or more :py:class:`ZSet` instances
:returns: A :py:class:`ZSet` referencing ``dest``. | ### Input:
Store the intersection of the current zset and one or more
others in a new key.
:param dest: the name of the key to store intersection
:param others: One or more :py:class:`ZSet` instances
:returns: A :py:class:`ZSet` referencing ``dest``.
### Response:
def interstore(self, dest, *others, **kwargs):
keys = [self.key]
keys.extend([other.key for other in others])
self.database.zinterstore(dest, keys, **kwargs)
return self.database.ZSet(dest) |
async def wait_done(self):
if self._active_jobs > 0:
future = self._loop.create_future()
self._waiters.append(future)
await future | Waits until the job set is finished. Returns immediately if the job set
is already finished. | ### Input:
Waits until the job set is finished. Returns immediately if the job set
is already finished.
### Response:
async def wait_done(self):
if self._active_jobs > 0:
future = self._loop.create_future()
self._waiters.append(future)
await future |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.