code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def delete_profile_extension_members(self, profile_extension, query_column, ids_to_delete):
profile_extension = profile_extension.get_soap_object(self.client)
result = self.call(
, profile_extension, query_column, ids_to_delete)
if hasattr(result, ):
return [DeleteResult(delete_result) for delete_result in result]
return [DeleteResult(result)] | Responsys.deleteProfileExtensionRecords call
Accepts:
InteractObject profile_extension
list field_list
list ids_to_retrieve
string query_column
default: 'RIID'
Returns list of DeleteResults |
def makeHandler(self, dialect):
if dialect not in self.dialects:
m = "The dialect specified, , wasnbases possible your URL is malformed and we didn{}custom_classcustom_class']
else:
klass = self._plugins.get(dialect)
self._dialect_handlers[dialect] = klass(self.master, self.dialects[dialect])
return self._dialect_handlers[dialect] | create and cache the handler object for this dialect |
def format_value(self):
return format_value(
self.data_type,
self.data,
self.parent.stringpool_main.getString
) | Return the formatted (interpreted) data according to `data_type`. |
def _normalize_to_unit(self, value, unit):
if unit == :
return value / 1000.0
if unit == :
return value / 1000000.0
if unit == :
return value * 1024
if unit == :
return value * 1024 * 1024
if unit == :
return value * 1024 * 1024 * 1024
if unit == :
return value * 1024 * 1024 * 1024 * 1024
return value | Normalize the value to the unit returned.
We use base-1000 for second-based units, and base-1024 for
byte-based units. Sadly, the Nagios-Plugins specification doesn't
disambiguate base-1000 (KB) and base-1024 (KiB). |
def addInternalLink(self, link):
if isinstance(link, InternalLink):
self.internalLinks.append(link)
else:
raise InternalLinkError(
% type(link)) | Appends InternalLink |
def rotate(self, azimuth, axis=None):
target = self._target
y_axis = self._n_pose[:3, 1].flatten()
if axis is not None:
y_axis = axis
x_rot_mat = transformations.rotation_matrix(azimuth, y_axis, target)
self._n_pose = x_rot_mat.dot(self._n_pose)
y_axis = self._pose[:3, 1].flatten()
if axis is not None:
y_axis = axis
x_rot_mat = transformations.rotation_matrix(azimuth, y_axis, target)
self._pose = x_rot_mat.dot(self._pose) | Rotate the trackball about the "Up" axis by azimuth radians.
Parameters
----------
azimuth : float
The number of radians to rotate. |
def add_subgroups(self, subgroups):
if subgroups is None:
subgroups = {}
_subgroups = {}
for sg in subgroups:
assert isinstance(sg, SubGroupDefinition)
_subgroups[sg.name] = sg
self.subgroups = _subgroups | Add a list of SubGroupDefinition objects to this composite.
Note that in contrast to :meth:`BaseTrack`, which takes a single
dictionary indicating the particular subgroups for the track, this
method takes a list of :class:`SubGroupDefinition` objects representing
the allowed subgroups for the composite.
:param subgroups:
List of SubGroupDefinition objects. |
def floats(self, n: int = 2) -> List[float]:
nums = [self.random.random()
for _ in range(10 ** int(n))]
return nums | Generate a list of random float numbers.
:param n: Raise 10 to the 'n' power.
:return: The list of floating-point numbers. |
def add_jac(self, m, val, row, col):
assert m in (, , , , , , , ), \
.format(m)
if isinstance(val, (int, float)):
val = val * ones(len(row), 1)
self._temp[m][] = matrix([self._temp[m][], matrix(row)])
self._temp[m][] = matrix([self._temp[m][], matrix(col)])
self._temp[m][] = matrix([self._temp[m][], matrix(val)]) | Add tuples (val, row, col) to the Jacobian matrix ``m``
Implemented in numpy.arrays for temporary storage. |
def copy(self):
return Retry(max_tries=self.max_tries, delay=self.delay, backoff=self.backoff,
max_jitter=self.max_jitter / 100.0, max_delay=self.max_delay, sleep_func=self.sleep_func,
deadline=self.deadline, retry_exceptions=self.retry_exceptions) | Return a clone of this retry manager |
def accel_fl(q: np.ndarray):
dims: int = len(q)
B: int = dims // 3
f = force(q)
a = np.zeros(dims)
for i in range(B):
a[slices[i]] = f[slices[i]] / mass[i]
return a | Accelaration in the earth-sun system using Fluxion potential energy |
def lint_cli(ctx, exclude, skip_untracked, commit_only):
if ctx.invoked_subcommand:
return
from peltak.logic import lint
lint.lint(exclude, skip_untracked, commit_only) | Run pep8 and pylint on all project files.
You can configure the linting paths using the lint.paths config variable.
This should be a list of paths that will be linted. If a path to a directory
is given, all files in that directory and it's subdirectories will be
used.
The pep8 and pylint config paths are by default stored in ops/tools/pep8.ini
and ops/tools/pylint.ini. You can customise those paths in your config with
lint.pep8_cfg and lint.pylint_cfg variables.
**Config Example**::
\b
lint:
pylint_cfg: 'ops/tools/pylint.ini'
pep8_cfg: 'ops/tools/pep8.ini'
paths:
- 'src/mypkg'
**Examples**::
\b
$ peltak lint # Run linter in default mode, skip untracked
$ peltak lint --commit # Lint only files staged for commit
$ peltak lint --all # Lint all files, including untracked.
$ peltak lint --pretend # Print the list of files to lint
$ peltak lint -e "*.tox*" # Don't lint files inside .tox directory |
def validate_content(*objs):
from .main import Collection, Module
validator = {
Collection: cnxml.validate_collxml,
Module: cnxml.validate_cnxml,
}[type(objs[0])]
return validator(*[obj.file for obj in objs]) | Runs the correct validator for given `obj`ects. Assumes all same type |
def is_best_response(self, own_action, opponents_actions, tol=None):
if tol is None:
tol = self.tol
payoff_vector = self.payoff_vector(opponents_actions)
payoff_max = payoff_vector.max()
if isinstance(own_action, numbers.Integral):
return payoff_vector[own_action] >= payoff_max - tol
else:
return np.dot(own_action, payoff_vector) >= payoff_max - tol | Return True if `own_action` is a best response to
`opponents_actions`.
Parameters
----------
own_action : scalar(int) or array_like(float, ndim=1)
An integer representing a pure action, or an array of floats
representing a mixed action.
opponents_actions : see `best_response`
tol : scalar(float), optional(default=None)
Tolerance level used in determining best responses. If None,
default to the value of the `tol` attribute.
Returns
-------
bool
True if `own_action` is a best response to
`opponents_actions`; False otherwise. |
def set_name(self, name):
path = self._filename.split("/")
parts = path[-1].split(".")
if len(parts) == 1:
path[-1] = name
else:
path[-1] = name + "." + parts[-1]
return File("/".join(path)) | RETURN NEW FILE WITH GIVEN EXTENSION |
def _vector_pattern_uniform_op_right(func):
@wraps(func)
def verif(self, patt):
if isinstance(patt, numbers.Number):
return TransversePatternUniform(func(self, self._tdsphere, patt),
func(self, self._pdsphere, patt),
doublesphere=True)
else:
raise TypeError(err_msg[])
return verif | decorator for operator overloading when VectorPatternUniform is on
the right |
def statvfs(path):
*
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError()
try:
stv = os.statvfs(path)
return dict((key, getattr(stv, key)) for key in (, ,
, , , , , ,
, ))
except (OSError, IOError):
raise CommandExecutionError({0}\.format(path))
return False | .. versionadded:: 2014.1.0
Perform a statvfs call against the filesystem that the file resides on
CLI Example:
.. code-block:: bash
salt '*' file.statvfs /path/to/file |
def get_folders(self):
endpoint =
r = requests.get(endpoint, headers=self._headers)
if check_response(r):
return Folder._json_to_folders(self, r.json()) | Returns a list of all folders for this account
Returns:
List[:class:`Folder <pyOutlook.core.folder.Folder>`] |
def UploadArtifactYamlFile(file_content,
overwrite=True,
overwrite_system_artifacts=False):
loaded_artifacts = []
registry_obj = artifact_registry.REGISTRY
registry_obj.GetArtifacts(reload_datastore_artifacts=True)
new_artifacts = registry_obj.ArtifactsFromYaml(file_content)
new_artifact_names = set()
for artifact_value in new_artifacts:
artifact_registry.ValidateSyntax(artifact_value)
new_artifact_names.add(artifact_value.name)
artifact_coll = artifact_registry.ArtifactCollection(ARTIFACT_STORE_ROOT_URN)
current_artifacts = list(artifact_coll)
filtered_artifacts = [
art for art in current_artifacts if art.name not in new_artifact_names
]
artifact_coll.Delete()
with data_store.DB.GetMutationPool() as pool:
for artifact_value in filtered_artifacts:
artifact_coll.Add(artifact_value, mutation_pool=pool)
for artifact_value in new_artifacts:
registry_obj.RegisterArtifact(
artifact_value,
source="datastore:%s" % ARTIFACT_STORE_ROOT_URN,
overwrite_if_exists=overwrite,
overwrite_system_artifacts=overwrite_system_artifacts)
artifact_coll.Add(artifact_value, mutation_pool=pool)
if data_store.RelationalDBEnabled():
data_store.REL_DB.WriteArtifact(artifact_value)
loaded_artifacts.append(artifact_value)
name = artifact_value.name
logging.info("Uploaded artifact %s to %s", name, ARTIFACT_STORE_ROOT_URN)
for artifact_value in loaded_artifacts:
artifact_registry.ValidateDependencies(artifact_value) | Upload a yaml or json file as an artifact to the datastore. |
def _environment_sanity_check(environment):
assert issubclass(environment.undefined, Undefined), \
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, \
assert environment.newline_sequence in (, , ), \
return environment | Perform a sanity check on the environment. |
def filter(self, query, inplace=True):
with LogDataChanges(self, filter_action=, filter_query=query):
result = self.data.query(
.format(query),
inplace=inplace,
)
return result | Use a query statement to filter data. Note that you specify the data
to be removed!
Parameters
----------
query : string
The query string to be evaluated. Is directly provided to
pandas.DataFrame.query
inplace : bool
if True, change the container dataframe in place (defaults to True)
Returns
-------
result : :py:class:`pandas.DataFrame`
DataFrame that contains the result of the filter application |
def main():
opts = [
Option(),
Option(),
Option(),
Option(, group=, mutually_exclusive=True),
Option(, group=, mutually_exclusive=True),
Option(, group=),
Option(, group=),
Option(, group=),
Option(, mutually_exclusive=True, dest=),
Option(, mutually_exclusive=True, dest=),
]
myconf = Config(options=opts)
if len(sys.argv) == 1:
sys.argv.append()
myconf.parse() | Simple tests. |
def destroy_balancer(balancer_id, profile, **libcloud_kwargs):
s destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1
'
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
balancer = conn.get_balancer(balancer_id)
return conn.destroy_balancer(balancer, **libcloud_kwargs) | Destroy a load balancer
:param balancer_id: LoadBalancer ID which should be used
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1 |
def jsonrpc_method(name,
authenticated=False,
authentication_arguments=[, ],
safe=False,
validate=False,
site=default_site):
def decorator(func):
arg_names = getargspec(func)[0][1:]
X = {: name, : arg_names}
if authenticated:
if authenticated is True or six.callable(authenticated):
X[] = authentication_arguments + X[]
X[] = _inject_args(X[], (, ))
from django.contrib.auth import authenticate as _authenticate
from django.contrib.auth.models import User
else:
authenticate = authenticated
@six.wraps(func)
def _func(request, *args, **kwargs):
user = getattr(request, , None)
is_authenticated = getattr(user, ,
lambda: False)
if ((user is not None and six.callable(is_authenticated) and
not is_authenticated()) or user is None):
user = None
try:
creds = args[:len(authentication_arguments)]
if len(creds) == 0:
raise IndexError
ret_func.json_validate = validate
site.register(method, ret_func)
return ret_func
return decorator | Wraps a function turns it into a json-rpc method. Adds several attributes
to the function specific to the JSON-RPC machinery and adds it to the default
jsonrpc_site if one isn't provided. You must import the module containing
these functions in your urls.py.
name
The name of your method. IE: `namespace.methodName` The method name
can include type information, like `ns.method(String, Array) -> Nil`.
authenticated=False
Adds `username` and `password` arguments to the beginning of your
method if the user hasn't already been authenticated. These will
be used to authenticate the user against `django.contrib.authenticate`
If you use HTTP auth or other authentication middleware, `username`
and `password` will not be added, and this method will only check
against `request.user.is_authenticated`.
You may pass a callable to replace `django.contrib.auth.authenticate`
as the authentication method. It must return either a User or `None`
and take the keyword arguments `username` and `password`.
safe=False
Designates whether or not your method may be accessed by HTTP GET.
By default this is turned off.
validate=False
Validates the arguments passed to your method based on type
information provided in the signature. Supply type information by
including types in your method declaration. Like so:
@jsonrpc_method('myapp.specialSauce(Array, String)', validate=True)
def special_sauce(self, ingredients, instructions):
return SpecialSauce(ingredients, instructions)
Calls to `myapp.specialSauce` will now check each arguments type
before calling `special_sauce`, throwing an `InvalidParamsError`
when it encounters a discrepancy. This can significantly reduce the
amount of code required to write JSON-RPC services.
site=default_site
Defines which site the jsonrpc method will be added to. Can be any
object that provides a `register(name, func)` method. |
def get_rarity_info(self, rarity: str):
for c in self.constants.rarities:
if c.name == rarity:
return c | Returns card info from constants
Parameters
---------
rarity: str
A rarity name
Returns None or Constants |
def clamp(color, min_v, max_v):
h, s, v = rgb_to_hsv(*map(down_scale, color))
min_v, max_v = map(down_scale, (min_v, max_v))
v = min(max(min_v, v), max_v)
return tuple(map(up_scale, hsv_to_rgb(h, s, v))) | Clamps a color such that the value is between min_v and max_v. |
def _init_map(self, record_types=None, **kwargs):
osid_objects.OsidObjectForm._init_map(self, record_types=record_types)
self._my_map[] = self._rubric_default
self._my_map[] = [str(kwargs[])]
self._my_map[] = self._level_default
if self._supports_simple_sequencing():
self._my_map[] = [] | Initialize form map |
def getresponse(self):
resp = self.http.getresponse()
self.log.info("resp is %s", str(resp))
if resp.status < 400:
return resp
else:
errtext = resp.read()
content_type = resp.getheader(, )
raise HttpError(code=resp.status, content_type=content_type, content=errtext) | Pass-thru method to make this class behave a little like HTTPConnection |
def cli(yaml_paths, pptx_template_path, font_size, master_slide_idx, slide_layout_idx, dst_dir, font_name,
slide_txt_alignment, validate):
dst_dir = Path(dst_dir)
pptx_template_path = Path(pptx_template_path)
pptx_template = pick_master_slide(pptx_template_path)
yamlfiles = []
for yaml_path in yaml_paths:
yaml_path = Path(yaml_path)
if yaml_path.is_dir():
yamlfiles.extend([yml for yml in yaml_path.iterdir()])
else:
yamlfiles.append(yaml_path)
if validate:
exit_fail = False
for yamlfile in yamlfiles:
try:
validate_yaml_file(SCHEMA_FOR_YAML, Path(yamlfile))
msg = f"VALIDATE: Validation of {yamlfile} passed"
click.echo(click.style(msg, fg="blue"))
except jsonschema.exceptions.ValidationError as err:
msg = f"ERR: {yamlfile} {str(err.message)} {err.path}"
click.echo(click.style(msg, fg="red"), nl=True)
exit_fail = True
except Exception:
raise
if exit_fail:
sys.exit(1)
for yamlfile in yamlfiles:
try:
r = build_slide(
Path(yamlfile),
pptx_template,
master_slide_idx,
slide_layout_idx,
font_size,
dst_dir,
font_name,
slide_txt_alignment
)
msg = f"PPTX: {r}"
click.echo(click.style(msg, fg="green"))
except Exception:
raise | A powerpoint builder
https://github.com/sukujgrg/pptx-builder-from-yaml |
def Memory_setPressureNotificationsSuppressed(self, suppressed):
assert isinstance(suppressed, (bool,)
), "Argument must be of type bool. Received type: " % type(
suppressed)
subdom_funcs = self.synchronous_command(
, suppressed=suppressed)
return subdom_funcs | Function path: Memory.setPressureNotificationsSuppressed
Domain: Memory
Method name: setPressureNotificationsSuppressed
Parameters:
Required arguments:
'suppressed' (type: boolean) -> If true, memory pressure notifications will be suppressed.
No return value.
Description: Enable/disable suppressing memory pressure notifications in all processes. |
def follow_link_by_selector(self, selector):
elem = find_element_by_jquery(world.browser, selector)
href = elem.get_attribute()
world.browser.get(href) | Navigate to the href of the element matching the CSS selector.
N.B. this does not click the link, but changes the browser's URL. |
def get_all_tags(filters=None, region=None, key=None, keyid=None, profile=None):
{"tag:Name": myInstanceNameTag, resource-type: instance}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.get_all_tags(filters)
tags = {}
for t in ret:
if t.res_id not in tags:
tags[t.res_id] = {}
tags[t.res_id][t.name] = t.value
return tags
except boto.exception.BotoServerError as e:
log.error(e)
return {} | Describe all tags matching the filter criteria, or all tags in the account otherwise.
.. versionadded:: 2018.3.0
filters
(dict) - Additional constraints on which volumes to return. Note that valid filters vary
extensively depending on the resource type. When in doubt, search first without a filter
and then use the returned data to help fine-tune your search. You can generally garner the
resource type from its ID (e.g. `vol-XXXXX` is a volume, `i-XXXXX` is an instance, etc.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.get_all_tags '{"tag:Name": myInstanceNameTag, resource-type: instance}' |
def expire_data(self):
while self.sample_storage_size() > self.samples_cap:
record = self.database[self.sample_collection].find().sort(,pymongo.ASCENDING).limit(1)[0]
self.remove_sample(record[]) | Expire data within the samples collection. |
def fetch(self):
try:
with self.gen_lock(lock_type=):
log.debug(%s\, self.role, self.id)
return self._fetch()
except GitLockError as exc:
if exc.errno == errno.EEXIST:
log.warning(
%s\
salt-run cache.clear_git_lock %s type=update\
,
self.role,
self.id,
self._get_lock_file(lock_type=),
self.role,
)
return False | Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
This function requires that a _fetch() function be implemented in a
sub-class. |
def get_ipv4fs_table(self):
ipv4fs_table = self._global_tables.get(RF_IPv4_FLOWSPEC)
if not ipv4fs_table:
ipv4fs_table = IPv4FlowSpecTable(self._core_service,
self._signal_bus)
self._global_tables[RF_IPv4_FLOWSPEC] = ipv4fs_table
self._tables[(None, RF_IPv4_FLOWSPEC)] = ipv4fs_table
return ipv4fs_table | Returns global IPv4 Flow Specification table.
Creates the table if it does not exist. |
def get_files():
sources = []
headers = ["datatable/include/datatable.h"]
assert os.path.isfile(headers[0])
for dirpath, _, filenames in os.walk("c"):
for f in filenames:
fullname = os.path.join(dirpath, f)
if f.endswith(".h") or f.endswith(".inc"):
headers.append(fullname)
elif f.endswith(".c") or f.endswith(".cc"):
sources.append(fullname)
return (sources, headers) | Return the list of all source/header files in `c/` directory.
The files will have pathnames relative to the current folder, for example
"c/csv/reader_utils.cc". |
def Pack(self, msg, type_url_prefix=):
if len(type_url_prefix) < 1 or type_url_prefix[-1] != :
self.type_url = % (type_url_prefix, msg.DESCRIPTOR.full_name)
else:
self.type_url = % (type_url_prefix, msg.DESCRIPTOR.full_name)
self.value = msg.SerializeToString() | Packs the specified message into current Any message. |
def update_dois(self):
dois = record_get_field_instances(self.record, , ind1="7")
all_dois = {}
for field in dois:
subs = field_get_subfield_instances(field)
subs_dict = dict(subs)
if subs_dict.get():
if subs_dict[] in all_dois:
record_delete_field(self.record, tag=, ind1=, field_position_global=field[4])
continue
all_dois[subs_dict[]] = field | Remove duplicate BibMatch DOIs. |
def send_registration_mail(email, *, request, **kwargs):
render_to_mail(
"registration/email_registration_email",
{"url": get_confirmation_url(email, request, **kwargs)},
to=[email],
).send() | send_registration_mail(email, *, request, **kwargs)
Sends the registration mail
* ``email``: The email address where the registration link should be
sent to.
* ``request``: A HTTP request instance, used to construct the complete
URL (including protocol and domain) for the registration link.
* Additional keyword arguments for ``get_confirmation_url`` respectively
``get_confirmation_code``.
The mail is rendered using the following two templates:
* ``registration/email_registration_email.txt``: The first line of this
template will be the subject, the third to the last line the body of the
email.
* ``registration/email_registration_email.html``: The body of the HTML
version of the mail. This template is **NOT** available by default and
is not required either. |
def store(self, value, context=None):
if isinstance(value, (str, unicode)):
value = self.valueFromString(value)
if self.testFlag(self.Flags.I18n):
if not isinstance(value, dict):
context = context or orb.Context()
return {context.locale: value}
else:
return value
else:
return value | Converts the value to one that is safe to store on a record within
the record values dictionary
:param value | <variant>
:return <variant> |
def code(self):
code = getattr(self, , None)
if not code:
if self.has_body():
code = 200
else:
code = 204
return code | the http status code to return to the client, by default, 200 if a body is present otherwise 204 |
def get_rosetta_sequence_to_atom_json_map(self):
import json
if not self.rosetta_to_atom_sequence_maps and self.rosetta_sequences:
raise Exception()
d = {}
for c, sm in self.rosetta_to_atom_sequence_maps.iteritems():
for k, v in sm.map.iteritems():
d[k] = v
return json.dumps(d, indent = 4, sort_keys = True) | Returns the mapping from Rosetta residue IDs to PDB ATOM residue IDs in JSON format. |
def cylinder(cls, **kwargs):
s = kwargs.get(, Vector(0.0, -1.0, 0.0))
e = kwargs.get(, Vector(0.0, 1.0, 0.0))
if isinstance(s, list):
s = Vector(*s)
if isinstance(e, list):
e = Vector(*e)
r = kwargs.get(, 1.0)
slices = kwargs.get(, 16)
ray = e.minus(s)
axisZ = ray.unit()
isY = (math.fabs(axisZ.y) > 0.5)
axisX = Vector(float(isY), float(not isY), 0).cross(axisZ).unit()
axisY = axisX.cross(axisZ).unit()
start = Vertex(s, axisZ.negated())
end = Vertex(e, axisZ.unit())
polygons = []
def point(stack, angle, normalBlend):
out = axisX.times(math.cos(angle)).plus(
axisY.times(math.sin(angle)))
pos = s.plus(ray.times(stack)).plus(out.times(r))
normal = out.times(1.0 - math.fabs(normalBlend)).plus(
axisZ.times(normalBlend))
return Vertex(pos, normal)
dt = math.pi * 2.0 / float(slices)
for i in range(0, slices):
t0 = i * dt
i1 = (i + 1) % slices
t1 = i1 * dt
polygons.append(Polygon([start.clone(),
point(0., t0, -1.),
point(0., t1, -1.)]))
polygons.append(Polygon([point(0., t1, 0.),
point(0., t0, 0.),
point(1., t0, 0.),
point(1., t1, 0.)]))
polygons.append(Polygon([end.clone(),
point(1., t1, 1.),
point(1., t0, 1.)]))
return CSG.fromPolygons(polygons) | Returns a cylinder.
Kwargs:
start (list): Start of cylinder, default [0, -1, 0].
end (list): End of cylinder, default [0, 1, 0].
radius (float): Radius of cylinder, default 1.0.
slices (int): Number of slices, default 16. |
def __apply(self, migration=None, run_all=False):
out = StringIO()
trace = None
migrate_kwargs = {
: False,
: out,
: self._database_name,
}
if migration is not None:
migrate_kwargs.update({
: migration[0],
: migration[1],
})
elif not run_all:
raise ValueError()
start = self._timer()
try:
call_command("migrate", **migrate_kwargs)
except Exception:
trace = .join(traceback.format_exception(*sys.exc_info()))
finally:
end = self._timer()
successes, failure = self._parse_migrate_output(out.getvalue())
self._migration_state.append({
: self._database_name,
: if run_all else (migration[0], migration[1]),
: end - start,
: _remove_escape_characters(out.getvalue()),
: successes,
: failure,
: trace,
: failure is None and trace is None,
})
if failure is not None:
raise CommandError("Migration failed for app - migration .\n".format(*failure))
elif trace is not None:
raise CommandError("Migrations failed unexpectedly. See self.state[] for details.") | If a migration is supplied, runs that migration and appends to state.
If run_all==True, runs all migrations.
Raises a ValueError if neither "migration" nor "run_all" are provided. |
def on_connection_open(self, connection):
LOGGER.debug()
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel() | This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection |
async def get_departures(self):
from .common import CommonFunctions
common = CommonFunctions(self.loop, self.session)
departures = []
endpoint = .format(BASE_URL,
str(self.stopid))
data = await common.api_call(endpoint)
for entries in data or []:
try:
data = entries[]
if self.destination is not None:
if data[] == self.destination:
data = entries[]
line = data[]
destinationname = data[]
monitored = data[]
time = monitored[]
departures.append({"time": time,
"line": line,
"destination": destinationname})
else:
data = entries[]
line = data[]
destinationname = data[]
monitored = data[]
time = monitored[]
departures.append({"time": time,
"line": line,
"destination": destinationname})
except (TypeError, KeyError, IndexError) as error:
LOGGER.error(, error)
self._departures = await common.sort_data(departures, ) | Get departure info from stopid. |
def build_docs(location="doc-source", target=None, library="icetea_lib"):
cmd_ar = ["sphinx-apidoc", "-o", location, library]
try:
print("Generating api docs.")
retcode = check_call(cmd_ar)
except CalledProcessError as error:
print("Documentation build failed. Return code: {}".format(error.returncode))
return 3
except OSError as error:
print(error)
print("Documentation build failed. Are you missing Sphinx? Please install sphinx using "
".")
return 3
target = "doc{}html".format(os.sep) if target is None else target
cmd_ar = ["sphinx-build", "-b", "html", location, target]
try:
print("Building html documentation.")
retcode = check_call(cmd_ar)
except CalledProcessError as error:
print("Documentation build failed. Return code: {}".format(error.returncode))
return 3
except OSError as error:
print(error)
print("Documentation build failed. Are you missing Sphinx? Please install sphinx using "
".")
return 3
print("Documentation built.")
return 0 | Build documentation for Icetea. Start by autogenerating module documentation
and finish by building html.
:param location: Documentation source
:param target: Documentation target path
:param library: Library location for autodoc.
:return: -1 if something fails. 0 if successfull. |
def _receive(self, msg):
msg = self._convert(msg)
if msg is None:
return
str_msg = self.verbose and self._msg_to_str(msg)
if self.verbose and log.is_debug():
log.debug(, str_msg)
if self.pre_routing:
self.pre_routing.receive(msg)
receiver, msg = self.routing.receive(msg)
if receiver:
receiver.receive(msg)
if self.verbose:
log.info(, str_msg[:128], msg,
repr(receiver)) | Receive a message from the input source and perhaps raise an Exception. |
def get_ribo_counts(ribo_fileobj, transcript_name, read_lengths, read_offsets):
read_counts = {}
total_reads = 0
for record in ribo_fileobj.fetch(transcript_name):
query_length = record.query_length
position_ref = record.pos + 1
for index, read_length in enumerate(read_lengths):
position = position_ref
if read_length == 0 or read_length == query_length:
position += read_offsets[index]
else:
continue
total_reads += 1
try:
read_counts[position]
except KeyError:
read_counts[position] = {1: 0, 2: 0, 3: 0}
rem = position % 3
if rem == 0:
read_counts[position][3] += 1
else:
read_counts[position][rem] += 1
log.debug(.format(total_reads))
log.debug(.format(transcript_name, read_counts))
return read_counts, total_reads | For each mapped read of the given transcript in the BAM file
(pysam AlignmentFile object), return the position (+1) and the
corresponding frame (1, 2 or 3) to which it aligns.
Keyword arguments:
ribo_fileobj -- file object - BAM file opened using pysam AlignmentFile
transcript_name -- Name of transcript to get counts for
read_length (optional) -- If provided, get counts only for reads of this length. |
def _get_select_commands(self, source, tables):
row_queries = {tbl: self.select_all(tbl, execute=False) for tbl in
tqdm(tables, total=len(tables), desc=.format(source))}
for tbl, command in row_queries.items():
if isinstance(command, str):
row_queries[tbl] = [command]
return [(tbl, cmd) for tbl, cmds in row_queries.items() for cmd in cmds] | Create select queries for all of the tables from a source database.
:param source: Source database name
:param tables: Iterable of table names
:return: Dictionary of table keys, command values |
def identify_image(image):
try:
attrs = tuple(getattr(image, attr) for attr in UNIQUE_IMAGE_ATTRIBUTES)
except AttributeError:
attrs = tuple(image.get(attr, None) for attr in UNIQUE_IMAGE_ATTRIBUTES)
ui = UniqueImage(*attrs)
return ui._replace(
unified=ui.unified or False, additional_variants=ui.additional_variants or []
) | Provides a tuple of image's UNIQUE_IMAGE_ATTRIBUTES. Note:
this is not guaranteed to be unique (and will often not be)
for pre-1.1 metadata, as subvariant did not exist. Provided as
a function so consumers can use it on plain image dicts read from
the metadata or PDC. |
def reproject_geometry(
geometry, src_crs=None, dst_crs=None, error_on_clip=False, validity_check=True,
antimeridian_cutting=False
):
src_crs = _validated_crs(src_crs)
dst_crs = _validated_crs(dst_crs)
def _reproject_geom(geometry, src_crs, dst_crs):
if geometry.is_empty:
return geometry
else:
out_geom = to_shape(
transform_geom(
src_crs.to_dict(),
dst_crs.to_dict(),
mapping(geometry),
antimeridian_cutting=antimeridian_cutting
)
)
return _repair(out_geom) if validity_check else out_geom
if src_crs == dst_crs or geometry.is_empty:
return _repair(geometry)
elif (
dst_crs.is_epsg_code and
dst_crs.get("init") in CRS_BOUNDS and
dst_crs.get("init") != "epsg:4326"
):
wgs84_crs = CRS().from_epsg(4326)
crs_bbox = box(*CRS_BOUNDS[dst_crs.get("init")])
geometry_4326 = _reproject_geom(geometry, src_crs, wgs84_crs)
if error_on_clip and not geometry_4326.within(crs_bbox):
raise RuntimeError("geometry outside target CRS bounds")
return _reproject_geom(crs_bbox.intersection(geometry_4326), wgs84_crs, dst_crs)
else:
return _reproject_geom(geometry, src_crs, dst_crs) | Reproject a geometry to target CRS.
Also, clips geometry if it lies outside the destination CRS boundary.
Supported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical
Mercator) and 3035 (ETRS89 / ETRS-LAEA).
Parameters
----------
geometry : ``shapely.geometry``
src_crs : ``rasterio.crs.CRS`` or EPSG code
CRS of source data
dst_crs : ``rasterio.crs.CRS`` or EPSG code
target CRS
error_on_clip : bool
raises a ``RuntimeError`` if a geometry is outside of CRS bounds
(default: False)
validity_check : bool
checks if reprojected geometry is valid and throws ``TopologicalError``
if invalid (default: True)
antimeridian_cutting : bool
cut geometry at Antimeridian; can result in a multipart output geometry
Returns
-------
geometry : ``shapely.geometry`` |
def checkCursor(self):
if self.nRows == 0 or self.cursorRowIndex <= 0:
self.cursorRowIndex = 0
elif self.cursorRowIndex >= self.nRows:
self.cursorRowIndex = self.nRows-1
if self.cursorVisibleColIndex <= 0:
self.cursorVisibleColIndex = 0
elif self.cursorVisibleColIndex >= self.nVisibleCols:
self.cursorVisibleColIndex = self.nVisibleCols-1
if self.topRowIndex <= 0:
self.topRowIndex = 0
elif self.topRowIndex > self.nRows-1:
self.topRowIndex = self.nRows-1
x = self.cursorVisibleColIndex - self.leftVisibleColIndex
y = self.cursorRowIndex - self.topRowIndex + 1
if y < 1:
self.topRowIndex = self.cursorRowIndex
elif y > self.nVisibleRows:
self.topRowIndex = self.cursorRowIndex-self.nVisibleRows+1
if x <= 0:
self.leftVisibleColIndex = self.cursorVisibleColIndex
else:
while True:
if self.leftVisibleColIndex == self.cursorVisibleColIndex:
break
self.calcColLayout()
mincolidx, maxcolidx = min(self.visibleColLayout.keys()), max(self.visibleColLayout.keys())
if self.cursorVisibleColIndex < mincolidx:
self.leftVisibleColIndex -= max((self.cursorVisibleColIndex - mincolid)//2, 1)
continue
elif self.cursorVisibleColIndex > maxcolidx:
self.leftVisibleColIndex += max((maxcolidx - self.cursorVisibleColIndex)//2, 1)
continue
cur_x, cur_w = self.visibleColLayout[self.cursorVisibleColIndex]
if cur_x+cur_w < self.vd.windowWidth:
break
self.leftVisibleColIndex += 1 | Keep cursor in bounds of data and screen. |
def update_payment_request(self, tid, currency=None, amount=None,
action=None, ledger=None, callback_uri=None,
display_message_uri=None, capture_id=None,
additional_amount=None, text=None, refund_id=None,
required_scope=None, required_scope_text=None, line_items=None):
arguments = {: ledger,
: display_message_uri,
: callback_uri,
: currency,
: amount,
: additional_amount,
: capture_id,
: action,
: text,
: refund_id}
if required_scope:
arguments[] = required_scope
arguments[] = required_scope_text
if line_items:
arguments[] = line_items
arguments = {k: v for k, v in arguments.items() if v is not None}
return self.do_req(,
self.merchant_api_base_url + +
tid + , arguments) | Update payment request, reauthorize, capture, release or abort
It is possible to update ledger and the callback URIs for a payment
request. Changes are always appended to the open report of a ledger,
and notifications are sent to the callback registered at the time of
notification.
Capturing an authorized payment or reauthorizing is done with the
action field.
The call is idempotent; that is, if one posts the same amount,
additional_amount and capture_id twice with action CAPTURE, only one
capture is performed. Similarly, if one posts twice with action CAPTURE
without any amount stated, to capture the full amount, only one full
capture is performed.
Arguments:
ledger:
Log entries will be added to the open report on the specified
ledger
display_message_uri:
Messages that can be used to inform the POS operator about the
progress of the payment request will be POSTed to this URI if
provided
callback_uri:
If provided, mCASH will POST to this URI when the status of the
payment request changes, using the message mechanism described
in the introduction. The data in the "object" part of the
message is the same as what can be retrieved by calling GET on
the "/payment_request/<tid>/outcome/" resource URI.
currency:
3 chars https://en.wikipedia.org/wiki/ISO_4217
amount:
The base amount of the payment
additional_amount:
Typically cash withdrawal or gratuity
capture_id:
Local id for capture. Must be set if amount is set, otherwise
capture_id must be unset.
tid:
Transaction id assigned by mCASH
refund_id:
Refund id needed when doing partial refund
text:
For example reason for refund.
action:
Action to perform.
required_scope:
Scopes required to fulfill payment
line_items:
An updated line_items. Will fail if line_items
already set in the payment request or if the sum of the totals
is different from the original amount.
required_scope_text:
Text that is shown to user when asked for permission. |
def create_snapshot(self, systemId, snapshotSpecificationObject):
self.conn.connection._check_login()
response = self.conn.connection._do_post("{}/{}{}/{}".format(self.conn.connection._api_url, "instances/System::", systemId, ), json=snapshotSpecificationObject.__to_dict__())
return response | Create snapshot for list of volumes
:param systemID: Cluster ID
:param snapshotSpecificationObject: Of class SnapshotSpecification
:rtype: SnapshotGroupId |
def login(self, login, password, set_auth=False):
rv = self.session.post(
self.host,
dumps({
"method": "common.db.login",
"params": [login, password]
}),
)
rv = loads(rv.content)[]
if set_auth:
self.set_auth(
SessionAuth(login, *rv)
)
return rv | Attempts a login to the remote server
and on success returns user id and session
or None
Warning: Do not depend on this. This will be deprecated
with SSO.
param set_auth: sets the authentication on the client |
def putParamset(self, paramset, data={}):
try:
if paramset in self._PARAMSETS and data:
self._proxy.putParamset(self._ADDRESS, paramset, data)
self.updateParamsets()
return True
else:
return False
except Exception as err:
LOG.error("HMGeneric.putParamset: Exception: " + str(err))
return False | Some devices act upon changes to paramsets.
A "putted" paramset must not contain all keys available in the specified paramset,
just the ones which are writable and should be changed. |
def get_outputs_from_cm(index, cm):
return tuple(i for i in range(cm.shape[0]) if cm[index][i]) | Return indices of the outputs of node with the given index. |
def load_scenario(self, scenario_name, **kwargs):
scenario = self._known_scenarios.get(scenario_name)
if scenario is None:
raise ArgumentError("Unknown scenario %s" % scenario_name, known_scenarios=list(self._known_scenarios))
scenario(**kwargs) | Load a scenario into the emulated object.
Scenarios are specific states of an an object that can be customized
with keyword parameters. Typical examples are:
- data logger with full storage
- device with low battery indication on
Args:
scenario_name (str): The name of the scenario that we wish to
load.
**kwargs: Any arguments that should be passed to configure
the scenario. These arguments will be passed directly
to the scenario handler. |
def to_api_data(self, restrict_keys=None):
cc = self._cc
data = {
cc(): self._column_hidden,
cc(): self._row_hidden,
cc(): self._formulas,
cc(): self._formulas_local,
cc(): self._formulas_r1_c1,
cc(): self._number_format,
cc(): self._values,
}
if restrict_keys:
for key in list(data.keys()):
if key not in restrict_keys:
del data[key]
return data | Returns a dict to communicate with the server
:param restrict_keys: a set of keys to restrict the returned data to
:rtype: dict |
def get_sample_frame(self):
for frame in self.frames:
return frame.open()
for res in self.results.values():
return res.open()
return None | Return first available image in observation result |
def get_single_allele_from_reads(allele_reads):
allele_reads = list(allele_reads)
if len(allele_reads) == 0:
raise ValueError("Expected non-empty list of AlleleRead objects")
seq = allele_reads[0].allele
if any(read.allele != seq for read in allele_reads):
raise ValueError("Expected all AlleleRead objects to have same allele , got %s" % (
seq, allele_reads))
return seq | Given a sequence of AlleleRead objects, which are expected to all have
the same allele, return that allele. |
def delete(self):
self._engine.data[self.typeof] = \
[loopback for loopback in self._engine.data.get(self.typeof, [])
if loopback.get() != self.address]
self._engine.update() | Delete a loopback cluster virtual interface from this engine.
Changes to the engine configuration are done immediately.
You can find cluster virtual loopbacks by iterating at the
engine level::
for loopbacks in engine.loopback_interface:
...
:raises UpdateElementFailed: failure to delete loopback interface
:return: None |
def write_display(self):
for i, value in enumerate(self.buffer):
self._device.write8(i, value) | Write display buffer to display hardware. |
def char_sets():
if not hasattr(char_sets, ):
clist = []
try:
data = requests.get(
)
except requests.exceptions.RequestException:
return []
for line in data.iter_lines():
if line:
line = line.decode("utf-8")
if line.count() > 0:
vals = line.split()
if vals[0]:
clist.append(vals[0])
else:
clist.append(vals[1])
char_sets.setlist = clist
return char_sets.setlist | Return a list of the IANA Character Sets, or an empty list if the
IANA website is unreachable.
Store it as a function attribute so that we only build the list once. |
def get(self, reset=True):
t = (builtin_time.clock()-self.clock)*1000
if reset is True:
self.reset()
return(t) | Get time since last initialisation / reset.
Parameters
----------
reset = bool, optional
Should the clock be reset after returning time?
Returns
----------
float
Time passed in milliseconds.
Example
----------
>>> import neurokit as nk
>>> time_passed_since_neurobox_loading = nk.time.get()
>>> nk.time.reset()
>>> time_passed_since_reset = nk.time.get()
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- time |
def check_fast(self, r, k=None):
s any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.
returns True/False
'
n = self.n
if not k: k = self.k
g = self.g[k]
if isinstance(r, _str):
r = [ord(x) for x in r]
r = Polynomial([GF2int(x) for x in r])
sz = self._syndromes(r, k=k)
return sz.coefficients.count(GF2int(0)) == len(sz) | Fast check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.
returns True/False |
def create_point(self, x, y):
if (not isinstance(x, six.integer_types) or
not isinstance(y, six.integer_types)):
raise ValueError("The coordinates must be longs.")
return _ECDSA_Point(SECP256k1.curve, x, y) | Create an ECDSA point on the SECP256k1 curve with the given coords.
:param x: The x coordinate on the curve
:type x: long
:param y: The y coodinate on the curve
:type y: long |
def timeseries(self, start, end, **kwargs):
r
self._check_geo_param(kwargs)
kwargs[] = start
kwargs[] = end
kwargs[] = self.token
return self._get_response(, kwargs) | r""" Returns a time series of observations at a user specified location for a specified time. Users must specify
at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below mandatory and optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
start: string, mandatory
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g., start='201306011800'
end: string, mandatory
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g., end='201306011800'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of time series observations through the get_response() function.
Raises:
-------
None. |
def GetFileEntryByPathSpec(self, path_spec):
row_index = getattr(path_spec, , None)
row_condition = getattr(path_spec, , None)
if row_index is None and row_condition is None:
return sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
return sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self, path_spec) | Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileEntry: a file entry or None. |
def _canonicalize_query(self, query):
def transform_query(q):
for encoder in self.query_encoders:
q = encoder.encode(q,[])
if isinstance(q, dict):
nq = {}
for key,value in q.items():
new_key = key
if isinstance(value,dict) and len(value) == 1 and list(value.keys())[0].startswith():
if list(value.keys())[0] in (,):
if list(value.values())[0] and isinstance(list(value.values())[0][0],Document):
if self._use_pk_based_refs:
new_key+=
else:
new_key+=
elif isinstance(value,Document):
if self._use_pk_based_refs:
new_key+=
else:
new_key+=
nq[new_key] = transform_query(value)
return nq
elif isinstance(q, (list,QuerySet,tuple)):
return [transform_query(x) for x in q]
elif isinstance(q,Document):
collection = self.get_collection_for_obj(q)
if self._use_pk_based_refs:
return q.pk
else:
return "%s:%s" % (collection,q.pk)
else:
return q
return transform_query(query) | Transform the query dictionary to replace e.g. documents with __ref__ fields. |
def dragMoveEvent( self, event ):
filt = self.dragDropFilter()
if ( not filt ):
super(XCalendarWidget, self).dragMoveEvent(event)
return
filt(self, event) | Processes the drag drop event using the filter set by the \
setDragDropFilter
:param event | <QDragEvent> |
def cmd_map(self, args):
from MAVProxy.modules.mavproxy_map import mp_slipmap
if len(args) < 1:
print("usage: map <icon|set>")
elif args[0] == "icon":
if len(args) < 3:
print("Usage: map icon <lat> <lon> <icon>")
else:
lat = args[1]
lon = args[2]
flag =
if len(args) > 3:
flag = args[3] +
icon = self.map.icon(flag)
self.map.add_object(mp_slipmap.SlipIcon( % (str(flag),self.icon_counter),
(float(lat),float(lon)),
icon, layer=3, rotation=0, follow=False))
self.icon_counter += 1
elif args[0] == "set":
self.map_settings.command(args[1:])
self.map.add_object(mp_slipmap.SlipBrightness(self.map_settings.brightness))
elif args[0] == "sethome":
self.cmd_set_home(args)
elif args[0] == "sethomepos":
self.cmd_set_homepos(args)
elif args[0] == "setorigin":
self.cmd_set_origin(args)
elif args[0] == "setoriginpos":
self.cmd_set_originpos(args)
elif args[0] == "zoom":
self.cmd_zoom(args)
elif args[0] == "center":
self.cmd_center(args)
elif args[0] == "follow":
self.cmd_follow(args)
else:
print("usage: map <icon|set>") | map commands |
async def i2c_read_request(self, command):
device_address = int(command[0])
register = int(command[1])
number_of_bytes = int(command[2])
if command[3] == "0":
read_type = Constants.I2C_READ_CONTINUOUSLY
elif command[3] == "1":
read_type = Constants.I2C_READ
elif command[3] == "2":
read_type = Constants.I2C_READ | Constants.I2C_END_TX_MASK
elif command[3] == "3":
read_type = Constants.I2C_READ_CONTINUOUSLY | Constants.I2C_END_TX_MASK
else:
read_type = Constants.I2C_STOP_READING
await self.core.i2c_read_request(device_address, register, number_of_bytes, read_type,
self.i2c_read_request_callback)
await asyncio.sleep(.1) | This method sends an I2C read request to Firmata. It is qualified by a single shot, continuous
read, or stop reading command.
Special Note: for the read type supply one of the following string values:
"0" = I2C_READ
"1" = I2C_READ | I2C_END_TX_MASK"
"2" = I2C_READ_CONTINUOUSLY
"3" = I2C_READ_CONTINUOUSLY | I2C_END_TX_MASK
"4" = I2C_STOP_READING
:param command: {"method": "i2c_read_request", "params": [I2C_ADDRESS, I2C_REGISTER,
NUMBER_OF_BYTES, I2C_READ_TYPE ]}
:returns: {"method": "i2c_read_request_reply", "params": [DATA]} |
def read_image(image, path=):
return imageio.imread(os.path.join(path, image)) | Read one image.
Parameters
-----------
image : str
The image file name.
path : str
The image folder path.
Returns
-------
numpy.array
The image. |
def save(package, data, params={}, is_public=False):
for key, value in params.items():
if isinstance(value, np.ndarray):
value = value.astype(float)
params[key] = value.tolist()
build_from_node(package, nodes.DataNode(None, None, data, params))
push(.format(package), is_public=is_public) | Build and push data to Quilt registry at user/package/data_node,
associating params as metadata for the data node.
:param package: short package specifier string, i.e. 'team:user/pkg/subpath'
:param data: data to save (np.ndarray or pd.DataFrame)
:param params: metadata dictionary
:param is_public: boolean kwarg to push the packages publicly |
def _DropCommonSuffixes(filename):
for suffix in (, , ,
, , ):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in (, )):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0] | Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed. |
def psd(self):
if not self._psd:
errMsg = "The PSD has not been set in the metricParameters "
errMsg += "instance."
raise ValueError(errMsg)
return self._psd | A pyCBC FrequencySeries holding the appropriate PSD.
Return the PSD used in the metric calculation. |
def fail_remaining(self):
self._failed.update(self._graph.nodes)
self._graph = Graph()
self._running = set() | Mark all unfinished tasks (including currently running ones) as
failed. |
def _render_our_module_flags(self, module, output_lines, prefix=):
flags = self._get_flags_defined_by_module(module)
if flags:
self._render_module_flags(module, flags, output_lines, prefix) | Returns a help string for a given module. |
def _kamb_count(cos_dist, sigma=3):
n = float(cos_dist.size)
dist = _kamb_radius(n, sigma)
count = (cos_dist >= dist).astype(float)
return count, _kamb_units(n, dist) | Original Kamb kernel function (raw count within radius). |
def cancel_order(self, order_id, stock):
url_fragment = .format(
venue=self.venue,
stock=stock,
order_id=order_id,
)
url = urljoin(self.base_url, url_fragment)
return self.session.delete(url).json() | Cancel An Order
https://starfighter.readme.io/docs/cancel-an-order |
def _flush_graph_val(self):
if not self._graphvals2set:
return
delafter = {}
for graph, key, branch, turn, tick, value in self._graphvals2set:
if (graph, key, branch) in delafter:
delafter[graph, key, branch] = min((
(turn, tick),
delafter[graph, key, branch]
))
else:
delafter[graph, key, branch] = (turn, tick)
self.sqlmany(
,
*((graph, key, branch, turn, turn, tick)
for ((graph, key, branch), (turn, tick)) in delafter.items())
)
self.sqlmany(, *self._graphvals2set)
self._graphvals2set = [] | Send all new and changed graph values to the database. |
def run(self, target, payload, instance_id=None, hook_id=None, **kwargs):
requests.post(
url=target,
data=json.dumps(payload),
headers={
: ,
: % settings.HOOK_AUTH_TOKEN
}
) | target: the url to receive the payload.
payload: a python primitive data structure
instance_id: a possibly None "trigger" instance ID
hook_id: the ID of defining Hook object |
def make_release(cts):
make_release_version = __version__
colorama.init()
text.title("Minchin for Python v{}".format(make_release_version))
print()
text.subtitle("Configuration")
print("base dir -> {}".format(here_directory()))
print("source -> .\{}\\".format(source_directory().relative_to(here_directory())))
print("test dir -> .\{}\\".format(test_directory().relative_to(here_directory())))
print("version file -> .\{}".format(version_file().relative_to(here_directory())))
print()
text.subtitle("Git -- Clean directory?")
print()
text.subtitle("Sort Import Statements")
print()
text.subtitle("Run Tests")
print()
text.subtitle("Update Version Number")
new_version = update_version_number(None)
print()
text.subtitle("Add Release to Changelog")
print()
text.subtitle("Build Documentation")
print()
text.query_yes_quit()
text.subtitle("Build Distributions")
build_distribution()
for server in [
"pypi",
]:
for file_format in ["tar.gz", "whl"]:
print()
text.subtitle("Test {} Build {}".format(file_format, server))
check_local_install(new_version, file_format, server) | Make and upload the release.
Changelog:
- v0.2.1 -- 2016-11-18 -- specify downloading of non-cached version of the
package for multiple formats can be properly and
individually tested.
- 0.2.2 -- 2016-11028 -- move configuration to top of file |
async def _process_latching(self, key, latching_entry):
if latching_entry[Constants.LATCH_CALLBACK]:
if latching_entry[Constants.LATCH_CALLBACK_TYPE]:
await latching_entry[Constants.LATCH_CALLBACK] \
([key, latching_entry[Constants.LATCHED_DATA], time.time()])
else:
latching_entry[Constants.LATCH_CALLBACK] \
([key, latching_entry[Constants.LATCHED_DATA], time.time()])
self.latch_map[key] = [0, 0, 0, 0, 0, None]
else:
updated_latch_entry = latching_entry
updated_latch_entry[Constants.LATCH_STATE] = \
Constants.LATCH_LATCHED
updated_latch_entry[Constants.LATCHED_DATA] = \
latching_entry[Constants.LATCHED_DATA]
updated_latch_entry[Constants.LATCHED_TIME_STAMP] = time.time()
self.latch_map[key] = updated_latch_entry | This is a private utility method.
This method process latching events and either returns them via
callback or stores them in the latch map
:param key: Encoded pin
:param latching_entry: a latch table entry
:returns: Callback or store data in latch map |
def _sanitize(self, value):
if isinstance(value, six.binary_type):
value = value.decode()
if isinstance(value, six.text_type):
new_value = .join(ch for ch in value if self._valid_char(ch))
else:
return value
return value if value == new_value else new_value | Remove the control characters that are not allowed in XML:
https://www.w3.org/TR/xml/#charsets
Leave all other characters. |
def download_file_from_google_drive(ID, destination):
def save_response_content(response, destination, chunk_size=32 * 1024):
total_size = int(response.headers.get(, 0))
with open(destination, "wb") as f:
for chunk in tqdm(response.iter_content(chunk_size), total=total_size, unit=, unit_scale=True,
desc=destination):
if chunk:
f.write(chunk)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith():
return value
return None
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={: ID}, stream=True)
token = get_confirm_token(response)
if token:
params = {: ID, : token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination) | Download file from Google Drive.
See ``tl.files.load_celebA_dataset`` for example.
Parameters
--------------
ID : str
The driver ID.
destination : str
The destination for save file. |
def offset_range(self, start, end):
assert start <= end, \
"Locations should always have start < end, got start=%d, end=%d" % (
start, end)
if start < self.start or end > self.end:
raise ValueError("Range (%d, %d) falls outside %s" % (
start, end, self))
if self.on_forward_strand:
return (start - self.start, end - self.start)
else:
return (self.end - end, self.end - start) | Database start/end entries are always ordered such that
start < end. This makes computing a relative position (e.g. of a stop
codon relative to its transcript) complicated since the "end"
position of a backwards locus is actually earlir on the strand.
This function correctly selects a start vs. end value depending
on this locuses's strand and determines that position's offset from
the earliest position in this locus. |
def GET_user_profile( self, path_info, user_id ):
if not check_name(user_id) and not check_subdomain(user_id):
return self._reply_json({: }, status_code=400)
blockstackd_url = get_blockstackd_url()
resp = blockstackd_client.resolve_profile(user_id, hostport=blockstackd_url)
if json_is_error(resp):
self._reply_json({: resp[]}, status_code=404)
return
self._reply_json(resp[])
return | Get a user profile.
Reply the profile on success
Return 404 on failure to load |
def list_xz (archive, compression, cmd, verbosity, interactive):
cmdlist = [cmd]
cmdlist.append()
if verbosity > 1:
cmdlist.append()
cmdlist.append(archive)
return cmdlist | List a XZ archive. |
def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64_2d():
hparams = image_transformer2d_base()
hparams.unconditional = True
hparams.hidden_size = 512
hparams.batch_size = 1
hparams.img_len = 64
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 1
hparams.max_length = 3075
hparams.max_length = 14000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.dec_attention_type = cia.AttentionType.LOCAL_2D
hparams.query_shape = (16, 16)
hparams.memory_flange = (8, 8)
return hparams | big 1d model for unconditional generation on imagenet. |
def newer_pairwise_group(sources_groups, targets):
if len(sources_groups) != len(targets):
raise ValueError(" and must be the same length")
n_sources = []
n_targets = []
for i in range(len(sources_groups)):
if newer_group(sources_groups[i], targets[i]):
n_sources.append(sources_groups[i])
n_targets.append(targets[i])
return n_sources, n_targets | Walk both arguments in parallel, testing if each source group is newer
than its corresponding target. Returns a pair of lists (sources_groups,
targets) where sources is newer than target, according to the semantics
of 'newer_group()'. |
def get_unread(self, request, notifications, mark_as_read):
notifications = notifications.filter(is_read=False)
serializer = UnreadNotificationSerializer(list(notifications),
many=True,
context=self.get_serializer_context())
if mark_as_read:
notifications.update(is_read=True)
return Response(serializer.data) | return unread notifications and mark as read
(unless read=false param is passed) |
def delete_lifecycle(self, policy=None, params=None):
return self.transport.perform_request(
"DELETE", _make_path("_ilm", "policy", policy), params=params
) | `<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html>`_
:arg policy: The name of the index lifecycle policy |
def geocentric_to_ecef(latitude, longitude, altitude):
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z | Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km |
def delete(overlay):
ALL*
ret = list()
old_overlays = list_local()
cmd = .format(overlay)
delete_attempt = __salt__[](cmd, python_shell=False)
if delete_attempt[] != 0:
raise salt.exceptions.CommandExecutionError(delete_attempt[])
new_overlays = list_local()
ret = [overlay for overlay in old_overlays if overlay not in new_overlays]
return ret | Remove the given overlay from the your locally installed overlays.
Specify 'ALL' to remove all overlays.
Return a list of the overlays(s) that were removed:
CLI Example:
.. code-block:: bash
salt '*' layman.delete <overlay name> |
def shape_vecs(*args):
ret_args = []
flat_vecs = True
for arg in args:
if type(arg) is numpy.ndarray:
if len(arg.shape) == 1:
arg = shape_vec(arg)
else:
flat_vecs = False
ret_args.append(arg)
return flat_vecs, ret_args | Reshape all ndarrays with ``shape==(n,)`` to ``shape==(n,1)``.
Recognizes ndarrays and ignores all others. |
def _first_of_month(self, day_of_week):
dt = self.start_of("day")
if day_of_week is None:
return dt.set(day=1)
month = calendar.monthcalendar(dt.year, dt.month)
calendar_day = (day_of_week - 1) % 7
if month[0][calendar_day] > 0:
day_of_month = month[0][calendar_day]
else:
day_of_month = month[1][calendar_day]
return dt.set(day=day_of_month) | Modify to the first occurrence of a given day of the week
in the current month. If no day_of_week is provided,
modify to the first day of the month. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
:type day_of_week: int
:rtype: DateTime |
def closed(self, reason):
self.server.remove_connection(self)
self.protocol.connection_lost(reason)
if not isinstance(reason, ConnectionClosed):
logger.warn("connection closed, reason: %s" % str(reason))
else:
logger.info("connection closed") | Callback performed when the transport is closed. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.