code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def casefold_parts(self, parts):
if self.filesystem.is_windows_fs:
return [p.lower() for p in parts]
return parts | Return the lower-case version of parts for a Windows filesystem. |
def get(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._get_internal(key_data) | Returns the value for the specified key, or ``None`` if this map does not contain this key.
**Warning:
This method returns a clone of original value, modifying the returned value does not change the actual value in
the map. One should put modified value back to make changes visible to all nodes.**
>>> value = map.get(key)
>>> value.update_some_property()
>>> map.put(key,value)
**Warning 2: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the specified key.
:return: (object), the value for the specified key. |
def append(self, node):
if node.parent == self.key and not self.elapsed_time:
self.children.append(node)
else:
for child in self.children:
if not child.elapsed_time:
child.append(node) | To append a new child. |
def _populate_worksheet(self, workbook, worksheet):
chart_num_format = workbook.add_format(
{: self._chart_data.number_format}
)
for series in self._chart_data:
series_num_format = (
workbook.add_format({: series.number_format})
)
offset = self.series_table_row_offset(series)
worksheet.write_column(
offset+1, 0, series.x_values, chart_num_format
)
worksheet.write(offset, 1, series.name)
worksheet.write_column(
offset+1, 1, series.y_values, series_num_format
)
worksheet.write(offset, 2, )
worksheet.write_column(
offset+1, 2, series.bubble_sizes, chart_num_format
) | Write chart data contents to *worksheet* in the bubble chart layout.
Write the data for each series to a separate three-column table with
X values in column A, Y values in column B, and bubble sizes in
column C. Place the series label in the first (heading) cell of the
values column. |
def get_container_id(self, container_id=None):
if container_id == None and self.container_id == None:
bot.exit()
container_id = container_id or self.container_id
return container_id | a helper function shared between functions that will return a
container_id. First preference goes to a container_id provided by
the user at runtime. Second preference goes to the container_id
instantiated with the client.
Parameters
==========
container_id: image uri to parse (required) |
def replace(self, expression: Expression, max_count: int=math.inf) -> Union[Expression, Sequence[Expression]]:
replaced = True
replace_count = 0
while replaced and replace_count < max_count:
replaced = False
for subexpr, pos in preorder_iter_with_position(expression):
try:
replacement, subst = next(iter(self.matcher.match(subexpr)))
result = replacement(**subst)
expression = functions.replace(expression, pos, result)
replaced = True
break
except StopIteration:
pass
replace_count += 1
return expression | Replace all occurrences of the patterns according to the replacement rules.
Args:
expression:
The expression to which the replacement rules are applied.
max_count:
If given, at most *max_count* applications of the rules are performed. Otherwise, the rules
are applied until there is no more match. If the set of replacement rules is not confluent,
the replacement might not terminate without a *max_count* set.
Returns:
The resulting expression after the application of the replacement rules. This can also be a sequence of
expressions, if the root expression is replaced with a sequence of expressions by a rule. |
def average_duration(total_duration, visits):
if not visits:
seconds = 0
else:
seconds = int(round(total_duration / Decimal(visits)))
duration = timedelta(seconds=seconds)
return str(duration) | Method to calculate and format an average duration safely |
def key_from_protobuf(pb):
path_args = []
for element in pb.path:
path_args.append(element.kind)
if element.id:
path_args.append(element.id)
if element.name:
path_args.append(element.name)
project = None
if pb.partition_id.project_id:
project = pb.partition_id.project_id
namespace = None
if pb.partition_id.namespace_id:
namespace = pb.partition_id.namespace_id
return Key(*path_args, namespace=namespace, project=project) | Factory method for creating a key based on a protobuf.
The protobuf should be one returned from the Cloud Datastore
Protobuf API.
:type pb: :class:`.entity_pb2.Key`
:param pb: The Protobuf representing the key.
:rtype: :class:`google.cloud.datastore.key.Key`
:returns: a new `Key` instance |
def _recursive_split(self, bbox, zoom_level, column, row):
if zoom_level == self.zoom_level:
self.bbox_list.append(bbox)
self.info_list.append({: zoom_level,
: column,
: row})
return
bbox_partition = bbox.get_partition(2, 2)
for i, j in itertools.product(range(2), range(2)):
if self._intersects_area(bbox_partition[i][j]):
self._recursive_split(bbox_partition[i][j], zoom_level + 1, 2 * column + i, 2 * row + 1 - j) | Method that recursively creates bounding boxes of OSM grid that intersect the area.
:param bbox: Bounding box
:type bbox: BBox
:param zoom_level: OSM zoom level
:type zoom_level: int
:param column: Column in the OSM grid
:type column: int
:param row: Row in the OSM grid
:type row: int |
def download_docs(client, output_filename=None, expanded=False):
if output_filename is None:
projname = _sanitize_filename(client.get()[])
output_filename = .format(projname)
counter = 0
while os.access(output_filename, os.F_OK):
counter += 1
output_filename = .format(projname, counter)
print(.format(output_filename))
with open(output_filename, , encoding=) as out:
for doc in iterate_docs(client, expanded=expanded, progress=True):
print(json.dumps(doc, ensure_ascii=False), file=out) | Given a LuminosoClient pointing to a project and a filename to write to,
retrieve all its documents in batches, and write them to a JSON lines
(.jsons) file with one document per line. |
def get_default_configfile_path():
base = homebase.user_config_dir(
app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False,
use_virtualenv=False, create=False)
path = os.path.join(base, CONF_FILENAME)
return path | Return the default configuration-file path.
Typically returns a user-local configuration file; e.g:
``~/.config/dwave/dwave.conf``.
Returns:
str:
Configuration file path.
Examples:
This example displays the default configuration file on an Ubuntu Unix system
running IPython 2.7.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
['/etc/xdg/xdg-ubuntu/dwave/dwave.conf',
'/usr/share/upstart/xdg/dwave/dwave.conf',
'/etc/xdg/dwave/dwave.conf',
'/home/mary/.config/dwave/dwave.conf',
'./dwave.conf']
>>> # Find default configuration path
>>> dc.config.get_default_configfile_path() # doctest: +SKIP
'/home/mary/.config/dwave/dwave.conf' |
def run(cmd):
print("Preparing the following cmd:")
cmd = prepare_subprocess_cmd(cmd)
print("Running the following cmd:")
print(.join(cmd))
return subprocess.run(cmd, stdout=sys.stdout, stderr=sys.stderr) | Prepare and run a subprocess cmd, returning a CompletedProcess. |
def update_ride(api_client, ride_status, ride_id):
try:
update_product = api_client.update_sandbox_ride(ride_id, ride_status)
except (ClientError, ServerError) as error:
fail_print(error)
else:
message =
message = message.format(update_product.status_code, ride_status)
success_print(message) | Use an UberRidesClient to update ride status and print the results.
Parameters
api_client (UberRidesClient)
An authorized UberRidesClient with 'request' scope.
ride_status (str)
New ride status to update to.
ride_id (str)
Unique identifier for ride to update. |
def set(self, folder: str, subscribed: bool) -> None:
if subscribed:
self.add(folder)
else:
self.remove(folder) | Set the subscribed status of a folder. |
def _assert_all_loadable_terms_specialized_to(self, domain):
for term in self.graph.node:
if isinstance(term, LoadableTerm):
assert term.domain is domain | Make sure that we've specialized all loadable terms in the graph. |
def variance(data, data_mean=None):
data_mean = data_mean or mean(data)
return sum((x - data_mean) ** 2 for x in data) / len(data) | Return variance of a sequence of numbers.
:param data_mean: Precomputed mean of the sequence. |
def get_item(self):
if self.was_list:
result = ResultList()
for item in self:
if isinstance(item, WorkflowItem):
if item.done and not item.error:
result.append(item.result)
else:
result.append(item)
else:
result.append(item)
return result
else:
return self[0] | Returns the item to send back into the workflow generator. |
def add_all_at(self, index, items):
check_not_none(items, "Value cant be None")
data_items.append(self._to_data(item))
return self._encode_invoke(list_add_all_with_index_codec, index=index, value_list=data_items) | Adds all of the elements in the specified collection into this list at the specified position. Elements in this
positions and following elements are shifted to the right, if any. The order of new elements is determined by the
specified collection's iterator.
:param index: (int), the specified index at which the first element of specified collection is added.
:param items: (Collection), the specified collection which includes the elements to be added to list.
:return: (bool), ``true`` if this call changed the list, ``false`` otherwise. |
def color_stream_st(istream=sys.stdin, save_palette=False, **kwargs):
for line in istream:
filename = line.strip()
try:
palette = extract_colors(filename, **kwargs)
except Exception as e:
print(filename, e, file=sys.stderr)
continue
print_colors(filename, palette)
if save_palette:
save_palette_as_image(filename, palette) | Read filenames from the input stream and detect their palette. |
def load_resource(resource_url: str, forceupdate: bool = False):
log.info(f"Loading resource {resource_url}")
try:
fo = bel.utils.download_file(resource_url)
if not fo:
log.error(f"Could not download and open file {resource_url}")
return "Failed to download resource_url"
fo.seek(0)
with gzip.open(fo, "rt") as f:
metadata = json.loads(f.__next__())
if "metadata" not in metadata:
log.error(f"Missing metadata entry for {resource_url}")
return "Cannot load resource file - missing metadata object in first line of file"
if metadata["metadata"]["type"] == "namespace":
bel.resources.namespace.load_terms(fo, metadata, forceupdate)
elif metadata["metadata"]["type"] == "ortholog":
bel.resources.ortholog.load_orthologs(fo, metadata)
finally:
fo.close() | Load BEL Resource file
Forceupdate will create a new index in Elasticsearch regardless of whether
an index with the resource version already exists.
Args:
resource_url: URL from which to download the resource to load into the BEL API
forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches |
def format_table(self, width=None,
min_label_width=10, min_progress_width=10):
if len(self._lines) == 0:
return []
if width is None:
width = shutil.get_terminal_size()[0]
labelw, progw, summaryw = self.calculate_field_widths(
width=width,
min_label_width=min_label_width,
min_progress_width=min_progress_width
)
output = [
sb.format_status(
label_width=labelw,
progress_width=progw,
summary_width=summaryw
)
for sb in self._lines
]
return output | Format the entire table of progress bars.
The function first computes the widths of the fields so they can be
aligned across lines and then returns formatted lines as a list of
strings. |
def delete_collection_namespaced_config_map(self, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_collection_namespaced_config_map_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_config_map_with_http_info(namespace, **kwargs)
return data | delete_collection_namespaced_config_map # noqa: E501
delete collection of ConfigMap # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_config_map(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread. |
def get_queues(self):
res = self.celery.control.inspect().active_queues() or dict()
return [result.get() for host in res.values() for result in host] | Return a list of current active Celery queues. |
def cmd(*args, **kwargs):
try:
(first, *remainder) = args
except ValueError:
pass
else:
if callable(first):
return CommandDecorator(*remainder, **kwargs)(first)
return CommandDecorator(*args, **kwargs) | Decorate a callable to replace it with a manufactured command
class.
Extends the interface of ``CommandDecorator``, allowing the same
``cmd`` to be used as a decorator or as a decorator factory::
@cmd(root=True)
def build():
...
@build.register
@cmd
def deploy():
...
Further enables composition of configuration, for example via
partials, as helpers. |
def visit_Output(self, node, frame):
if self.has_known_extends and frame.require_output_check:
return
finalize = self.environment.finalize
finalize_context = getattr(finalize, , False)
finalize_eval = getattr(finalize, , False)
finalize_env = getattr(finalize, , False)
if finalize is not None:
if finalize_context or finalize_eval:
const_finalize = None
elif finalize_env:
def const_finalize(x):
return finalize(self.environment, x)
else:
const_finalize = finalize
else:
def const_finalize(x):
return x
outdent_later = False
if frame.require_output_check:
self.writeline()
self.indent()
outdent_later = True
body = []
for child in node.nodes:
try:
if const_finalize is None:
raise nodes.Impossible()
const = child.as_const(frame.eval_ctx)
if not has_safe_repr(const):
raise nodes.Impossible()
except nodes.Impossible:
body.append(child)
continue
if outdent_later:
self.outdent() | Same as :meth:`CodeGenerator.visit_Output`, but do not call
``to_string`` on output nodes in generated code. |
def _send_outgoing_route_refresh_msg(self, rr_msg):
assert rr_msg.type == BGP_MSG_ROUTE_REFRESH
self._protocol.send(rr_msg)
LOG.debug(,
self._neigh_conf.ip_address, rr_msg)
if rr_msg.demarcation == 0:
self.state.incr(PeerCounterNames.SENT_REFRESH)
elif (rr_msg.demarcation == 1 and
self._common_conf.refresh_max_eor_time != 0):
eor_timer = self._common_conf.refresh_max_eor_time
self._spawn_after(, eor_timer,
self._enqueue_eor_msg, rr_msg)
LOG.debug() | Sends given message `rr_msg` to peer.
Parameters:
- rr_msg: (RouteRefresh) route refresh message to send to peer.
Update appropriate counters and set appropriate timers. |
def switch(self, val=None):
if self._armed:
if val is None:
val = not self._switched
self._switched = val
self._armed = False
return True
else:
return False | Set the state of the switch. If the armed state is set to False,
the function does nothing.
:param val: Boolean. The value to set the switch state to. When None,
the switch will be set to the opposite of its current state.
:return: Boolean. Returns True if operation was successful (i.e. the
switch was armed before this was called) |
def element_tree_collection_to_records(tree):
from .bibrecord import create_record
records = []
collection = tree.getroot()
for record_element in collection.getchildren():
marcxml = ET.tostring(record_element, encoding="utf-8")
record, status, errors = create_record(marcxml)
if errors:
print(str(status))
records.append(record)
return records | Take an ElementTree and converts the nodes into BibRecord records.
This function is for a tree root of collection as such:
<collection>
<record>
<!-- MARCXML -->
</record>
<record> ... </record>
</collection> |
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(EventAdmin, self).get_urls()
my_urls = patterns(
,
url(
r,
self.admin_site.admin_view(self.calendar),
name=
),
url(
r,
self.admin_site.admin_view(self.calendar_data),
name=
),
)
return my_urls + urls | Add a calendar URL. |
def _slugify_foreign_key(schema):
for foreign_key in schema.get(, []):
foreign_key[][] = _slugify_resource_name(
foreign_key[].get(, ))
return schema | Slugify foreign key |
def check_for_usable_restore_env(self):
self.check_for_usable_environment()
if not os.path.isdir(self.mackup_folder):
utils.error("Unable to find the Mackup folder: {}\n"
"You might want to back up some files or get your"
" storage directory synced first."
.format(self.mackup_folder)) | Check if the current env can be used to restore files. |
def deploy_media():
if not env.MEDIA_URL or not env.MEDIA_ROOT or in env.MEDIA_URL: return
local_dir = env.MEDIA_ROOT
remote_dir = .join([deployment_root(),])
media_url = env.MEDIA_URL[1:]
if media_url:
remote_dir = .join([remote_dir,media_url])
if env.verbosity:
print env.host,"DEPLOYING media",remote_dir
deployed = deploy_files(local_dir,remote_dir)
sudo("chown -R www-data:sudo %s" % remote_dir)
sudo("chmod -R ug+w %s"% remote_dir)
return deployed | Deploy MEDIA_ROOT unversioned on host |
def _do_code_exchange(self, request,
extra_id_token_claims=None
):
token_request = AccessTokenRequest().from_dict(request)
try:
token_request.verify()
except MessageException as e:
raise InvalidTokenRequest(str(e), token_request) from e
authentication_request = self.authz_state.get_authorization_request_for_code(token_request[])
if token_request[] != authentication_request[]:
logger.info(%s\%s\%s\,
token_request[], authentication_request[], token_request[])
raise InvalidAuthorizationCode(.format(token_request[]))
if token_request[] != authentication_request[]:
raise InvalidTokenRequest(.format(token_request[],
authentication_request[]),
token_request)
sub = self.authz_state.get_subject_identifier_for_code(token_request[])
user_id = self.authz_state.get_user_id_for_subject_identifier(sub)
response = AccessTokenResponse()
access_token = self.authz_state.exchange_code_for_token(token_request[])
self._add_access_token_to_response(response, access_token)
refresh_token = self.authz_state.create_refresh_token(access_token.value)
if refresh_token is not None:
response[] = refresh_token
if extra_id_token_claims is None:
extra_id_token_claims = {}
elif callable(extra_id_token_claims):
extra_id_token_claims = extra_id_token_claims(user_id, authentication_request[])
requested_claims = self._get_requested_claims_in(authentication_request, )
user_claims = self.userinfo.get_claims_for(user_id, requested_claims)
response[] = self._create_signed_id_token(authentication_request[], sub,
user_claims,
authentication_request.get(),
None, access_token.value,
extra_id_token_claims)
logger.debug(,
response[], requested_claims, user_claims, extra_id_token_claims)
return response | Handles a token request for exchanging an authorization code for an access token
(grant_type=authorization_code).
:param request: parsed http request parameters
:param extra_id_token_claims: any extra parameters to include in the signed ID Token, either as a dict-like
object or as a callable object accepting the local user identifier and client identifier which returns
any extra claims which might depend on the user id and/or client id.
:return: a token response containing a signed ID Token, an Access Token, and a Refresh Token
:raise InvalidTokenRequest: if the token request is invalid |
def get_authorizations_for_resource_and_function(self, resource_id, function_id):
collection = JSONClientValidated(,
collection=,
runtime=self._runtime)
result = collection.find(
dict({: str(resource_id),
: str(function_id)},
**self._view_filter())).sort(, ASCENDING)
return objects.AuthorizationList(result, runtime=self._runtime) | Gets a list of ``Authorizations`` associated with a given resource.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``resource_id`` or ``function_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def sum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0):
r
footprint = __make_footprint(input, size, footprint)
slicer = [slice(None, None, -1)] * footprint.ndim
return convolve(input, footprint[slicer], output, mode, cval, origin) | r"""
Calculates a multi-dimensional sum filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
Returns
-------
sum_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
Convenience implementation employing convolve.
See Also
--------
scipy.ndimage.filters.convolve : Convolve an image with a kernel. |
def get_css_classes(document, style):
lst = [st.lower() for st in get_all_styles(document, style)[-1:]] + \
[.format(st.lower()) for st in get_all_styles(document, style)[-1:]]
return .join(lst) | Returns CSS classes for this style.
This function will check all the styles specified style is based on and return their CSS classes.
:Args:
- document (:class:`ooxml.doc.Document`): Document object
- style (:class:`ooxml.doc.Style`): Style object
:Returns:
String representing all the CSS classes for this element.
>>> get_css_classes(doc, st)
'header1 normal' |
def store(self, image, file):
from .entity import Image
if not isinstance(image, Image):
raise TypeError(
+ repr(image))
elif not callable(getattr(file, , None)):
raise TypeError(
+ repr(file))
self.put_file(file, image.object_type, image.object_id,
image.width, image.height, image.mimetype,
not image.original) | Stores the actual data ``file`` of the given ``image``.
::
with open(imagefile, 'rb') as f:
store.store(image, f)
:param image: the image to store its actual data file
:type image: :class:`sqlalchemy_imageattach.entity.Image`
:param file: the image file to put
:type file: file-like object, :class:`file` |
def do_authentication(environ, start_response, authn_context, key,
redirect_uri):
logger.debug("Do authentication")
auth_info = AUTHN_BROKER.pick(authn_context)
if len(auth_info):
method, reference = auth_info[0]
logger.debug("Authn chosen: %s (ref=%s)", method, reference)
return method(environ, start_response, reference, key, redirect_uri)
else:
resp = Unauthorized("No usable authentication method")
return resp(environ, start_response) | Display the login form |
def livehtml(context):
"Launch webserver on http://localhost:8000 with rendered documentation"
builder =
outputdir = os.path.join(DOCS_BUILDDIR, builder)
cmdline = .format(builder, DOCS_SRCDIR, outputdir)
context.run(cmdline, pty=True) | Launch webserver on http://localhost:8000 with rendered documentation |
def load_csv_data(fname, tag):
import re
if tag == "stations":
ddict = dict()
dkeys = list()
date_list = list()
with open(fname, "r") as fopen:
dtime = pds.datetime.strptime(fname.split("_")[-1].split(".")[0],
"%Y")
for fline in fopen.readlines():
sline = [ll for ll in re.split(r, fline) if len(ll) > 0]
if len(ddict.items()) == 0:
for kk in sline:
kk = re.sub("-", "_", kk)
ddict[kk] = list()
dkeys.append(kk)
else:
date_list.append(dtime)
for i,ll in enumerate(sline):
if i >= 1 and i <= 4:
ddict[dkeys[i]].append(float(ll))
elif i == 6:
ddict[dkeys[i]].append(int(ll))
elif i < len(dkeys):
ddict[dkeys[i]].append(ll)
else:
ddict[dkeys[-1]][-1] += " {:s}".format(ll)
data = pds.DataFrame(ddict, index=date_list, columns=ddict.keys())
else:
def parse_smag_date(dd):
return pysat.datetime.strptime(dd, "%Y-%m-%d %H:%M:%S")
data = pds.read_csv(fname, parse_dates={:[0]},
date_parser=parse_smag_date, index_col=)
return data | Load data from a comma separated SuperMAG file
Parameters
------------
fname : (str)
CSV SuperMAG file name
tag : (str)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements).
Returns
--------
data : (pandas.DataFrame)
Pandas DataFrame |
def status(name, location=):
r\\minion-id
if name not in list_tasks(location):
return .format(name, location)
with salt.utils.winapi.Com():
task_service = win32com.client.Dispatch("Schedule.Service")
task_service.Connect()
task_folder = task_service.GetFolder(location)
task = task_folder.GetTask(name)
return states[task.State] | r'''
Determine the status of a task. Is it Running, Queued, Ready, etc.
:param str name: The name of the task for which to return the status
:param str location: A string value representing the location of the task.
Default is '\\' which is the root for the task scheduler
(C:\Windows\System32\tasks).
:return: The current status of the task. Will be one of the following:
- Unknown
- Disabled
- Queued
- Ready
- Running
:rtype: string
CLI Example:
.. code-block:: bash
salt 'minion-id' task.list_status <task_name> |
def has_button(self, button):
rc = self._libinput.libinput_device_pointer_has_button(
self._handle, button)
assert rc >= 0,
return bool(rc) | Check if this device has a given button.
Args:
button (int): Button to check for, see ``input.h`` for button
definitions.
Returns:
bool: :obj:`True` if the device has this button, :obj:`False` if
it does not.
Raises:
AssertionError |
def save(self):
if self.views:
if self.get(, None) != QUERY_LANGUAGE:
for view_name, view in self.iterviews():
if isinstance(view, QueryIndexView):
raise CloudantDesignDocumentException(104, view_name)
else:
for view_name, view in self.iterviews():
if not isinstance(view, QueryIndexView):
raise CloudantDesignDocumentException(105, view_name)
if self.indexes:
if self.get(, None) != QUERY_LANGUAGE:
for index_name, search in self.iterindexes():
if not isinstance(search[], STRTYPE):
raise CloudantDesignDocumentException(106, index_name)
else:
for index_name, index in self.iterindexes():
if not isinstance(index[], dict):
raise CloudantDesignDocumentException(107, index_name)
for prop in self._nested_object_names:
if not getattr(self, prop):
self.__delitem__(prop)
super(DesignDocument, self).save()
for prop in self._nested_object_names:
getattr(self, prop, self.setdefault(prop, dict())) | Saves changes made to the locally cached DesignDocument object's data
structures to the remote database. If the design document does not
exist remotely then it is created in the remote database. If the object
does exist remotely then the design document is updated remotely. In
either case the locally cached DesignDocument object is also updated
accordingly based on the successful response of the operation. |
def format(self, record):
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno >= logging.getLevelName():
s = self._err_format % record.__dict__
else:
s = self._fmt % record.__dict__
if record.exc_info:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] == "\n":
s = s[:-1]
try:
s = s + + record.exc_text
except UnicodeError:
s = s + record.exc_text.decode(sys.getfilesystemencoding(),
)
record.exc_text = None
return s | Modified from logging/__init__.py in python 2.7 lib
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message. |
def query(self, queryString):
self._setHeaders()
return self._sforce.service.query(queryString) | Executes a query against the specified object and returns data that matches
the specified criteria. |
def _read_para_unassigned(self, code, cbit, clen, *, desc, length, version):
unassigned = dict(
type=desc,
critical=cbit,
length=clen,
contents=self._read_fileng(clen),
)
plen = length - clen
if plen:
self._read_fileng(plen)
return unassigned | Read HIP unassigned parameters.
Structure of HIP unassigned parameters [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type |C| Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
/ Contents /
/ +-+-+-+-+-+-+-+-+
| | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 para.type Parameter Type
1 15 para.critical Critical Bit
2 16 para.length Length of Contents
4 32 para.contents Contents
- - - Padding |
def _mergeGoSymbols(self, jsons = []):
symbols = {}
symbols["types"] = []
symbols["funcs"] = []
symbols["vars"] = []
for file_json in jsons:
symbols["types"] += file_json["types"]
symbols["funcs"] += file_json["funcs"]
symbols["vars"] += file_json["vars"]
return symbols | Exported symbols for a given package does not have any prefix.
So I can drop all import paths that are file specific and merge
all symbols.
Assuming all files in the given package has mutual exclusive symbols. |
def _format_num(self, value):
if value is True or value is False:
raise TypeError()
return self.num_type(value) | Return the number value for value, given this field's `num_type`. |
def records(self):
compounds = ModelList()
seen_labels = set()
tagged_tokens = [(CONTROL_RE.sub(, token), tag) for token, tag in self.tagged_tokens]
for parser in self.parsers:
for record in parser.parse(tagged_tokens):
p = record.serialize()
if not p:
continue
if record in compounds:
continue
if all(k in {, } for k in p.keys()) and set(record.labels).issubset(seen_labels):
continue
seen_labels.update(record.labels)
compounds.append(record)
return compounds | Return a list of records for this sentence. |
def get_instances(feature_name):
feats = []
for ft in AncillaryFeature.features:
if ft.feature_name == feature_name:
feats.append(ft)
return feats | Return all all instances that compute `feature_name` |
def get_image(self, filename: str=None) -> None:
if filename is None:
filename = "{0}.png".format(self.name)
endpoint = "/account/{0}/savings-goals/{1}/photo".format(
self._account_uid,
self.uid
)
response = get(
_url(endpoint, self._sandbox),
headers=self._auth_headers
)
response.raise_for_status()
base64_image = response.json()[]
with open(filename, ) as file:
file.write(b64decode(base64_image)) | Download the photo associated with a Savings Goal. |
def _get_arg_tokens(cli):
arg = cli.input_processor.arg
return [
(Token.Prompt.Arg, ),
(Token.Prompt.Arg.Text, str(arg)),
(Token.Prompt.Arg, ),
] | Tokens for the arg-prompt. |
def _spawn(self):
self.queue = Queue(maxsize=self.num_threads * 10)
for i in range(self.num_threads):
t = Thread(target=self._consume)
t.daemon = True
t.start() | Initialize the queue and the threads. |
def process_configs(file_lookup, app_config_format, pipeline_config):
app_configs = collections.defaultdict(dict)
for env in ENVS:
file_json = app_config_format.format(env=env)
try:
env_config = file_lookup.json(filename=file_json)
app_configs[env] = apply_region_configs(env_config)
except FileNotFoundError:
LOG.critical(, env)
continue
try:
app_configs[] = file_lookup.json(filename=pipeline_config)
except FileNotFoundError:
LOG.warning()
app_configs[] = {: [, ]}
LOG.debug(, app_configs)
return app_configs | Processes the configs from lookup sources.
Args:
file_lookup (FileLookup): Source to look for file/config
app_config_format (str): The format for application config files.
pipeline_config (str): Name/path of the pipeline config
Returns:
dict: Retreived application config |
def set_widgets(self):
self.clear_further_steps()
purpose = self.parent.step_kw_purpose.selected_purpose()
subcategory = self.parent.step_kw_subcategory.selected_subcategory()
unit = self.parent.step_kw_unit.selected_unit()
layer_mode = self.parent.step_kw_layermode.selected_layermode()
if field.type() not in layer_field_types:
continue
field_name = field.name()
item = QListWidgetItem(field_name, self.lstFields)
item.setData(Qt.UserRole, field_name)
if default_item:
self.lstFields.setCurrentItem(default_item)
self.lblDescribeField.clear()
field_keyword = self.parent.field_keyword_for_the_layer()
inasafe_field_keywords = self.parent.get_existing_keyword(
)
if inasafe_field_keywords:
fields = inasafe_field_keywords.get(field_keyword)
if isinstance(fields, str):
fields = [fields]
if fields:
option_fields = []
for index in range(self.lstFields.count()):
option_fields.append(
str(self.lstFields.item(index).text()))
for field in fields:
if field in option_fields:
self.lstFields.item(option_fields.index(
field)).setSelected(True)
self.auto_select_one_item(self.lstFields)
if self.selected_fields():
self.parent.pbnNext.setEnabled(True)
else:
self.parent.pbnNext.setEnabled(False) | Set widgets on the Field tab. |
def get_addon_name(addonxml):
xml = parse(addonxml)
addon_node = xml.getElementsByTagName()[0]
return addon_node.getAttribute() | Parses an addon name from the given addon.xml filename. |
def get_data(self, minion):
ret = copy.deepcopy(__opts__.get(, {}))
if isinstance(self.raw[minion], six.string_types):
ret.update({: self.raw[minion]})
return ret
elif isinstance(self.raw[minion], dict):
ret.update(self.raw[minion])
return ret
return False | Return the configured ip |
def _hide_column(self, column):
__\
column = _ensure_string_from_expression(column)
new_name = self._find_valid_name( + column)
self._rename(column, new_name) | Hides a column by prefixing the name with \'__\ |
def get_version_string(check_name):
version = VERSION.search(read_version_file(check_name))
if version:
return version.group(1) | Get the version string for the given check. |
def add(self, num_particles, D):
self._plist += self._generate(num_particles, D, box=self.box,
rs=self.rs) | Add particles with diffusion coefficient `D` at random positions. |
def unique_list_dicts(dlist, key):
return list(dict((val[key], val) for val in dlist).values()) | Return a list of dictionaries which are sorted for only unique entries.
:param dlist:
:param key:
:return list: |
def wait_any(futures, timeout=None):
for fut in futures:
if fut.complete:
return fut
wait = _Wait(futures)
for fut in futures:
fut._waits.add(wait)
if wait.done.wait(timeout):
raise errors.WaitTimeout()
return wait.completed_future | Wait for the completion of any (the first) one of multiple futures
:param list futures: A list of :class:`Future`\s
:param timeout:
The maximum time to wait. With ``None``, will block indefinitely.
:type timeout: float or None
:returns:
One of the futures from the provided list -- the first one to become
complete (or any of the ones that were already complete).
:raises WaitTimeout: if a timeout is provided and hit |
def _insert_pairs(self, ids_and_pairs):
ids_to_insert = [x[0] for x in ids_and_pairs]
for ids in self._key_ids.itervalues():
for i, id in enumerate(ids):
ids[i] += bisect(ids_to_insert, id)
for i, pair in ids_and_pairs:
self._pairs.insert(i, pair) | Insert some new pairs, and keep the _key_ids updated.
Params:
ids_and_pairs -- A list of (index, (key, value)) tuples. |
def _simplify_blocks(self, stack_pointer_tracker=None):
for key in self._blocks:
ail_block = self._blocks[key]
simplified = self._simplify_block(ail_block, stack_pointer_tracker=stack_pointer_tracker)
self._blocks[key] = simplified
self._update_graph() | Simplify all blocks in self._blocks.
:param stack_pointer_tracker: The RegisterDeltaTracker analysis instance.
:return: None |
def articles(self):
result = [None, None]
element = self._first()
if element:
if re.search(, element, re.U):
result[0] = re.findall(, element, re.U)[0].split()
if re.search(, element, re.U):
result[1] = []
return result | Tries to scrape the correct articles for singular and plural from vandale.nl. |
def pullPath_copy(self, d_msg, **kwargs):
d_meta = d_msg[]
d_local = d_meta[]
str_localPath = d_local[]
d_remote = d_meta[]
d_transport = d_meta[]
d_copy = d_transport[]
d_curl = {}
d_curl[] = self.pullPath_core()
d_curl[] = {}
d_curl[][] = d_curl[][]
if not d_curl[][]:
d_curl[][] = "Copy on remote server failed!"
else:
d_curl[][] = "Copy on remote server success!"
return d_curl | Handle the "copy" pull operation |
def to_dict(self):
return {
: {
"{}".format(qs.id): {
: qs.f1QRB,
: qs.fRO,
: qs.T1,
: qs.T2,
: qs.fActiveReset
} for qs in self.qubits_specs
},
: {
"{}-{}".format(*es.targets): {
: es.fBellState,
: es.fCZ,
: es.fCZ_std_err,
: es.fCPHASE
} for es in self.edges_specs
}
} | Create a JSON-serializable representation of the device Specs.
The dictionary representation is of the form::
{
'1Q': {
"0": {
"f1QRB": 0.99,
"T1": 20e-6,
...
},
"1": {
"f1QRB": 0.989,
"T1": 19e-6,
...
},
...
},
'2Q': {
"1-4": {
"fBellState": 0.93,
"fCZ": 0.92,
"fCZ_std_err": 0.03,
"fCPHASE": 0.91
},
"1-5": {
"fBellState": 0.9,
"fCZ": 0.89,
"fCZ_std_err": 0.05,
"fCPHASE": 0.88
},
...
},
...
}
:return: A dctionary representation of self.
:rtype: Dict[str, Any] |
def attr_to_path(node):
def get_intrinsic_path(modules, attr):
if isinstance(attr, ast.Name):
return modules[demangle(attr.id)], (demangle(attr.id),)
elif isinstance(attr, ast.Attribute):
module, path = get_intrinsic_path(modules, attr.value)
return module[attr.attr], path + (attr.attr,)
obj, path = get_intrinsic_path(MODULES, node)
if not obj.isliteral():
path = path[:-1] + (, path[-1])
return obj, (, ) + path | Compute path and final object for an attribute node |
def add_answer(self, vote, rationale):
self.raw_answers.append({
VOTE_KEY: vote,
RATIONALE_KEY: rationale,
}) | Add an answer
Args:
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option |
def _parse_arg(self, var, arg, scope):
if isinstance(var, Variable):
if arg:
if utility.is_variable(arg[0]):
tmp = scope.variables(arg[0])
if not tmp:
return None
val = tmp.value
else:
val = arg
var = Variable(var.tokens[:-1] + [val])
else:
if utility.is_variable(var):
if arg is None:
raise SyntaxError()
elif utility.is_variable(arg[0]):
tmp = scope.variables(arg[0])
if not tmp:
return None
val = tmp.value
else:
val = arg
var = Variable([var, None, val])
else:
return None
return var | Parse a single argument to mixin.
args:
var (Variable object): variable
arg (mixed): argument
scope (Scope object): current scope
returns:
Variable object or None |
def get_by_email(cls, email):
return cls.query().filter(cls.email == email).first() | Return a User by email address |
def steem_instance(self):
if self.s:
return self.s
for num_of_retries in range(default.max_retry):
node = self.util.goodnode(self.nodes)
try:
self.s = Steem(keys=self.keys,
nodes=[node])
except Exception as e:
self.util.retry("COULD NOT GET STEEM INSTANCE",
e, num_of_retries, default.wait_time)
self.s = None
else:
return self.s
return False | Returns the steem instance if it already exists
otherwise uses the goodnode method to fetch a node
and instantiate the Steem class. |
def _collect_external_resources(self, resource_attr):
external_resources = []
for _, cls in sorted(Model.model_class_reverse_map.items(), key=lambda arg: arg[0]):
external = getattr(cls, resource_attr, None)
if isinstance(external, string_types):
if external not in external_resources:
external_resources.append(external)
elif isinstance(external, list):
for e in external:
if e not in external_resources:
external_resources.append(e)
return external_resources | Collect external resources set on resource_attr attribute of all models. |
def mouseMoveEvent(self, event):
w = event.pos().x() - self._region.x()
h = event.pos().y() - self._region.y()
self._region.setWidth(w)
self._region.setHeight(h)
self.repaint()
super(XSnapshotWidget, self).mouseMoveEvent(event) | Drags the selection view for this widget.
:param event | <QMouseMoveEvent> |
def _get_module_folders(self, temp_repo):
paths = (
os.path.join(temp_repo, path) for path in os.listdir(temp_repo)
if self._is_module_included(path)
)
return (path for path in paths if os.path.isdir(path)) | Get a list of module paths contained in a temp directory.
:param string temp_repo: the folder containing the modules. |
def get_line(s, bold=False, underline=False, blinking=False, color=None,
bgcolor=None, update_line=False):
parts = []
if update_line:
parts.append(_UPDATE_LINE)
for val in [color, bgcolor]:
if val:
parts.append(val)
if bold:
parts.append(_TURN_BOLD_MODE_ON)
if underline:
parts.append(_TURN_UNDERLINE_MODE_ON)
if blinking:
parts.append(_TURN_BLINKING_MODE_ON)
parts.append(s)
parts.append(_TURN_OFF_CHARACTER_ATTS)
result = .join(parts)
return result | Returns a string with the given formatting. |
def vcf_to_df_worker(arg):
canvasvcf, exonbed, i = arg
logging.debug("Working on job {}: {}".format(i, canvasvcf))
samplekey = op.basename(canvasvcf).split(".")[0].rsplit(, 1)[0]
d = {: samplekey}
exons = BedTool(exonbed)
cn = parse_segments(canvasvcf)
overlaps = exons.intersect(cn, wao=True)
gcn_store = {}
for ov in overlaps:
gene_name = "|".join((ov.fields[6], ov.fields[3], ov.fields[5]))
if gene_name not in gcn_store:
gcn_store[gene_name] = defaultdict(int)
cn = ov.fields[-2]
if cn == ".":
continue
cn = int(cn)
if cn > 10:
cn = 10
amt = int(ov.fields[-1])
gcn_store[gene_name][cn] += amt
for k, v in sorted(gcn_store.items()):
v_mean, v_median = counter_mean_and_median(v)
d[k + ".avgcn"] = v_mean
d[k + ".medcn"] = v_median
cleanup()
return d | Convert CANVAS vcf to a dict, single thread |
def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
parameters = {}
if excludeRead:
parameters[] =
if continuation:
parameters[] = continuation
parameters[] = loadLimit
if since:
parameters[] = since
if until:
parameters[] = until
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False) | A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
:param id: (str, feed's id)
:param continuation: (str, to be used to fetch more items)
:param items: array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner) |
def read_plain_text(fname, encoding="utf-8"):
with io.open(fname, encoding=encoding) as f:
result = list(f)
if result:
if result[-1][-1:] == "\n":
result.append("\n")
else:
result[-1] += "\n"
return [line[:-1] for line in result]
return [] | Reads a file as a list of strings. |
def set_mode_loiter(self):
if self.mavlink10():
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_NAV_LOITER_UNLIM, 0, 0, 0, 0, 0, 0, 0, 0)
else:
MAV_ACTION_LOITER = 27
self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_LOITER) | enter LOITER mode |
def define_format(self, plotStyle, plotSize):
sizing_dict = {}
sizing_dict[] = (14, 8)
sizing_dict[] = 15
sizing_dict[] = 20
sizing_dict[] = 24
sizing_dict[] = 14
sizing_dict[] = 14
self.colorVector = {
:,
:,
:,
:,
:,
:,
:,
:,
:,
:,
:}
if plotStyle == None:
self.ColorVector = [None, None, None]
elif plotStyle == :
plt.style.use()
elif plotStyle == :
plt.style.use()
iron_color =
silver_color =
sizing_dict[] = iron_color
sizing_dict[] = iron_color
sizing_dict[] = silver_color
sizing_dict[] = silver_color
sizing_dict[] = silver_color
sizing_dict[] = silver_color
sizing_dict[] = silver_color
sizing_dict[] = silver_color
elif plotStyle == :
plt.style.use()
else:
plt.style.use(plotStyle)
if plotSize == :
rcParams.update(sizing_dict)
elif type(plotSize) is dict:
sizing_dict.update(plotSize)
rcParams.update(sizing_dict)
| Seaborn color blind
#0072B2 dark blue
#009E73 green
#D55E00 orangish
#CC79A7 pink
#F0E442 yellow
#56B4E9 cyan
#bcbd22 olive #adicional
#7f7f7f grey
#FFB5B8 skin |
def evidence(ns_run, logw=None, simulate=False):
r
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw)) | r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float |
def load_plugins(self, raise_error=False):
pm_env = os.getenv(, ).split()
include_pms = [s[4:] for s in pm_env if s.startswith()]
exclude_pms = [s[3:] for s in pm_env if s.startswith()]
logger = logging.getLogger(__name__)
plotters = self[]
def_plots = {: list(plotters)}
defaultParams = self.defaultParams
def_keys = {: defaultParams}
def register_pm(ep, name):
full_name = % (ep.module_name, name)
ret = True
if pm_env == []:
ret = False
elif name in exclude_pms or full_name in exclude_pms:
ret = False
elif include_pms and (name not in include_pms and
full_name not in include_pms):
ret = False
if not ret:
logger.debug(, full_name)
return ret
for ep in self._load_plugin_entrypoints():
plugin_mod = ep.load()
rc = plugin_mod.rcParams
plugin_plotters = {
key: val for key, val in rc.get(, {}).items()
if register_pm(ep, key)}
already_defined = set(plotters).intersection(plugin_plotters)
if already_defined:
msg = ("Error while loading psyplot plugin %s! The "
"following plotters have already been "
"defined") % ep
msg += if not raise_error else
msg += + .join(chain.from_iterable(
(( % (key, plugin)
for plugin, keys in def_plots.items() if key in keys)
for key in already_defined)))
if raise_error:
raise ImportError(msg)
else:
warn(msg)
for d in plugin_plotters.values():
d[] = ep.module_name
plotters.update(plugin_plotters)
def_plots[ep] = list(plugin_plotters)
plugin_defaultParams = rc.defaultParams
already_defined = set(defaultParams).intersection(
plugin_defaultParams) - {}
if already_defined:
msg = ("Error while loading psyplot plugin %s! The "
"following default keys have already been "
"defined:") % ep
msg += + .join(chain.from_iterable(
(( % (key, plugin)
for plugin, keys in def_keys.items() if key in keys)
for key in already_defined)))
if raise_error:
raise ImportError(msg)
else:
warn(msg)
update_keys = set(plugin_defaultParams) - {}
def_keys[ep] = update_keys
self.defaultParams.update(
{key: plugin_defaultParams[key] for key in update_keys})
super(RcParams, self).update({key: rc[key] for key in update_keys})
self._deprecated_ignore_map.update(rc._deprecated_ignore_map)
self._deprecated_map.update(rc._deprecated_map) | Load the plotters and defaultParams from the plugins
This method loads the `plotters` attribute and `defaultParams`
attribute from the plugins that use the entry point specified by
`group`. Entry points must be objects (or modules) that have a
`defaultParams` and a `plotters` attribute.
Parameters
----------
raise_error: bool
If True, an error is raised when multiple plugins define the same
plotter or rcParams key. Otherwise only a warning is raised |
def App(apptype, data_flow_kernel=None, walltime=60, cache=False, executors=):
from parsl.app.python import PythonApp
from parsl.app.bash import BashApp
logger.warning("The decorator will be deprecated in Parsl 0.8. Please use or instead.")
if apptype == :
app_class = PythonApp
elif apptype == :
app_class = BashApp
else:
raise InvalidAppTypeError("Invalid apptype requested {}; must be or ".format(apptype))
def wrapper(f):
return app_class(f,
data_flow_kernel=data_flow_kernel,
walltime=walltime,
cache=cache,
executors=executors)
return wrapper | The App decorator function.
Args:
- apptype (string) : Apptype can be bash|python
Kwargs:
- data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for
managing this app. This can be omitted only
after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.
- walltime (int) : Walltime for app in seconds,
default=60
- executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.
- cache (Bool) : Enable caching of the app call
default=False
Returns:
A PythonApp or BashApp object, which when called runs the apps through the executor. |
def support_autoupload_enable(self, **kwargs):
config = ET.Element("config")
support = ET.SubElement(config, "support", xmlns="urn:brocade.com:mgmt:brocade-ras")
autoupload = ET.SubElement(support, "autoupload")
enable = ET.SubElement(autoupload, "enable")
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
def system_find_apps(input_params={}, always_retry=True, **kwargs):
return DXHTTPRequest(, input_params, always_retry=always_retry, **kwargs) | Invokes the /system/findApps API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method%3A-%2Fsystem%2FfindApps |
def items(self):
return [
(c.name, getattr(self, c.name, None))
for c in self.__table__._columns
] | return list of pair of name and value of all declared columns. |
def stop(self, id):
path = partial(_path, self.adapter)
path = path(id)
return self._delete(path) | stop the tracker. |
def create_subject(self,
authc_token=None,
account_id=None,
existing_subject=None,
subject_context=None):
if subject_context is None:
context = self.create_subject_context(existing_subject)
context.authenticated = True
context.authentication_token = authc_token
context.account_id = account_id
if (existing_subject):
context.subject = existing_subject
else:
context = copy.copy(subject_context)
context = self.ensure_security_manager(context)
context = self.resolve_session(context)
context = self.resolve_identifiers(context)
subject = self.do_create_subject(context)
self.save(subject)
return subject | Creates a ``Subject`` instance for the user represented by the given method
arguments.
It is an overloaded method, due to porting java to python, and is
consequently highly likely to be refactored.
It gets called in one of two ways:
1) when creating an anonymous subject, passing create_subject
a subject_context argument
2) following a after successful login, passing all but the context argument
This implementation functions as follows:
- Ensures that the ``SubjectContext`` exists and is as populated as it can be,
using heuristics to acquire data that may not have already been available
to it (such as a referenced session or remembered identifiers).
- Calls subject_context.do_create_subject to perform the Subject
instance creation
- Calls subject.save to ensure the constructed Subject's state is
accessible for future requests/invocations if necessary
- Returns the constructed Subject instance
:type authc_token: subject_abcs.AuthenticationToken
:param account_id: the identifiers of a newly authenticated user
:type account: SimpleIdentifierCollection
:param existing_subject: the existing Subject instance that initiated the
authentication attempt
:type subject: subject_abcs.Subject
:type subject_context: subject_abcs.SubjectContext
:returns: the Subject instance that represents the context and session
data for the newly authenticated subject |
def file(self):
if photos_settings.FORMATED_PHOTO_FILENAME is not None:
return photos_settings.FORMATED_PHOTO_FILENAME(self)
source_file = path.split(self.photo.image.name)
return path.join(source_file[0], str(self.format.id) + + source_file[1]) | Method returns formated photo path - derived from format.id and source Photo filename |
def form_valid(self, post_form, attachment_formset, poll_option_formset, **kwargs):
save_poll_option_formset = poll_option_formset is not None \
and not self.preview
valid = super().form_valid(
post_form, attachment_formset, poll_option_formset=poll_option_formset, **kwargs)
if save_poll_option_formset:
poll_option_formset.topic = self.forum_post.topic
poll_option_formset.save(
poll_question=post_form.cleaned_data.pop(, None),
poll_max_options=post_form.cleaned_data.pop(, None),
poll_duration=post_form.cleaned_data.pop(, None),
poll_user_changes=post_form.cleaned_data.pop(, None),
)
return valid | Processes valid forms. |
def map_with_obj_deep(f, dct):
return _map_deep(lambda k, v: [k, f(k, v)], dct) | Implementation of map that recurses. This tests the same keys at every level of dict and in lists
:param f: 2-ary function expecting a key and value and returns a modified value
:param dct: Dict for deep processing
:return: Modified dct with matching props mapped |
def create_execution_state(self, topologyName, executionState):
if not executionState or not executionState.IsInitialized():
raise_(StateException("Execution State protobuf not init properly",
StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[2])
path = self.get_execution_state_path(topologyName)
LOG.info("Adding topology: {0} to path: {1}".format(
topologyName, path))
executionStateString = executionState.SerializeToString()
try:
self.client.create(path, value=executionStateString, makepath=True)
return True
except NoNodeError:
raise_(StateException("NoNodeError while creating execution state",
StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2])
except NodeExistsError:
raise_(StateException("NodeExistsError while creating execution state",
StateException.EX_TYPE_NODE_EXISTS_ERROR), sys.exc_info()[2])
except ZookeeperError:
raise_(StateException("Zookeeper while creating execution state",
StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[2])
except Exception:
raise | create execution state |
def count(cls, user_id):
return cls.query.with_entities(
cls.user_id).filter_by(user_id=user_id).count() | Count sessions with user_id |
def badges(request):
category = request.GET.getlist("category", [])
product = request.GET.getlist("product", [])
status = request.GET.get("status")
form = forms.InvoicesWithProductAndStatusForm(
request.POST or None,
category=category,
product=product,
status=status,
)
if form.is_valid():
response = HttpResponse()
response["Content-Type"] = "application.zip"
response["Content-Disposition"] =
z = zipfile.ZipFile(response, "w")
for invoice in form.cleaned_data["invoice"]:
user = invoice.user
badge = render_badge(user)
z.writestr("badge_%d.svg" % user.id, badge.encode("utf-8"))
return response
data = {
"form": form,
}
return render(request, "registrasion/badges.html", data) | Either displays a form containing a list of users with badges to
render, or returns a .zip file containing their badges. |
def save(self, *args, **kwargs):
update = getattr(self, , None) is not None
instance = super(
WithDynamicSerializerMixin,
self
).save(
*args,
**kwargs
)
view = self._context.get()
if view and update:
if int(DRF_VERSION[0]) <= 3 and int(DRF_VERSION[1]) < 5:
instance = self.instance = view.get_object()
return instance | Serializer save that address prefetch issues. |
def to_dict(self, drop_null=True, camel=False):
def to_dict(obj, drop_null, camel):
if isinstance(obj, (Body, BodyChild)):
obj = obj.__dict__
if isinstance(obj, dict):
data = {}
for attr, val in six.iteritems(obj):
if camel:
attr = _snake_to_camel(attr)
valid_null = (isinstance(val, bool) or val == 0 or
(val and to_dict(val, drop_null, camel)))
if not drop_null or (drop_null and valid_null):
data[attr] = to_dict(val, drop_null, camel)
return data
elif isinstance(obj, list):
data = []
for val in obj:
valid_null = (isinstance(val, bool) or val == 0 or
(val and to_dict(val, drop_null, camel)))
if not drop_null or (drop_null and valid_null):
data.append(to_dict(val, drop_null, camel))
return data
else:
return obj
return to_dict(self, drop_null, camel) | Serialize self as dict.
Args:
drop_null: bool, default True. Remove 'empty' attributes.
camel: bool, default True. Convert keys to camelCase.
Return:
dict: object params. |
def cell_array_generator(self, key):
for i, key_ele in enumerate(key):
if type(key_ele) is SliceType:
slc_keys = xrange(*key_ele.indices(self.dict_grid.shape[i]))
key_list = list(key)
key_list[i] = None
has_subslice = any(type(ele) is SliceType for ele in key_list)
for slc_key in slc_keys:
key_list[i] = slc_key
if has_subslice:
yield self.cell_array_generator(key_list)
else:
yield self[tuple(key_list)]
break | Generator traversing cells specified in key
Parameters
----------
key: Iterable of Integer or slice
\tThe key specifies the cell keys of the generator |
def RegisterAnyElement(cls):
for k,v in cls.types_dict.items():
what = Any.serialmap.get(k)
if what is None: continue
if v in what.__class__.seriallist: continue
what.__class__.seriallist.append(v)
RegisterType(what.__class__, clobber=1, **what.__dict__) | If find registered TypeCode instance, add Wrapper class
to TypeCode class serialmap and Re-RegisterType. Provides
Any serialzation of any instances of the Wrapper. |
async def stepper_step(self, command):
speed = int(command[0])
num_steps = int(command[1])
await self.core.stepper_step(speed, num_steps) | This method activates a stepper motor motion.
This is a FirmataPlus feature.
:param command: {"method": "stepper_step", "params": [SPEED, NUMBER_OF_STEPS]}
:returns:No message returned. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.