code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def learn(self, x):
y = self.encode(x)
self.update_statistics([y])
self.update_weights([x],[y])
return y | Encodes an input array, and performs weight updates and updates to the activity
statistics according to the respective methods implemented below. |
def _process_properties(self, properties):
if properties is not None:
self._properties = {}
for p in properties:
d = p.split()
self._properties[d[0]] = d[1] | Transforms the command line properties into python dictionary
:return: |
def config_new(args):
cfg = config_template(args)
while True:
try:
edited = fccore.edit_text(cfg)
if edited == cfg:
eprint("No edits made, method config not installed ...")
break
if __EDITME__ in edited:
eprint("Edit is incomplete, method config not installed ...")
time.sleep(1)
continue
args.config = cfg = edited
config_put(args)
return True
except FireCloudServerError as fce:
__pretty_print_fc_exception(fce)
return False | Attempt to install a new method config into a workspace, by: generating
a template from a versioned method in the methods repo, then launching
a local editor (respecting the $EDITOR environment variable) to fill in
the incomplete input/output fields. Returns True if the config was
successfully installed, otherwise False |
def _update_param(self):
r
if not isinstance(self._gamma_update, type(None)):
self._gamma = self._gamma_update(self._gamma)
if not isinstance(self._lambda_update, type(None)):
self._lambda_param = self._lambda_update(self._lambda_param) | r"""Update parameters
This method updates the values of the algorthm parameters with the
methods provided |
def get_course_data_sharing_consent(username, course_id, enterprise_customer_uuid):
DataSharingConsent = apps.get_model(, )
return DataSharingConsent.objects.proxied_get(
username=username,
course_id=course_id,
enterprise_customer__uuid=enterprise_customer_uuid
) | Get the data sharing consent object associated with a certain user of a customer for a course.
:param username: The user that grants consent.
:param course_id: The course for which consent is granted.
:param enterprise_customer_uuid: The consent requester.
:return: The data sharing consent object |
def set_read_only(self, value):
if self.__read_only__ != value:
self.__read_only__ = value
self._update_read_only() | Sets whether model could be modified or not |
def create_from_pybankid_exception(cls, exception):
return cls(
"{0}: {1}".format(exception.__class__.__name__, str(exception)),
_exception_class_to_status_code.get(exception.__class__),
) | Class method for initiating from a `PyBankID` exception.
:param bankid.exceptions.BankIDError exception:
:return: The wrapped exception.
:rtype: :py:class:`~FlaskPyBankIDError` |
def evaluate_binop_logical(self, operation, left, right, **kwargs):
if not operation in self.binops_logical:
raise ValueError("Invalid logical binary operation ".format(operation))
result = self.binops_logical[operation](left, right)
return bool(result) | Evaluate given logical binary operation with given operands. |
def _resubscribe(self, soft=False):
if self.bitfinex_config:
self.send(**self.bitfinex_config)
q_list = []
while True:
try:
identifier, q = self.channel_configs.popitem(last=True if soft else False)
except KeyError:
break
q_list.append((identifier, q.copy()))
if identifier == :
self.send(**q, auth=True)
continue
if soft:
q[] =
self.send(**q)
if soft:
for identifier, q in reversed(q_list):
self.channel_configs[identifier] = q
self.send(**q)
else:
for identifier, q in q_list:
self.channel_configs[identifier] = q | Resubscribes to all channels found in self.channel_configs.
:param soft: if True, unsubscribes first.
:return: None |
def dump_file_by_path(self, path, **kwargs):
file = self.state.fs.get(path)
if file is None:
return None
return file.concretize(**kwargs) | Returns the concrete content for a file by path.
:param path: file path as string
:param kwargs: passed to state.solver.eval
:return: file contents as string |
def _prepare_io_handler(self, handler):
logger.debug(" preparing handler: {0!r}".format(handler))
ret = handler.prepare()
logger.debug(" prepare result: {0!r}".format(ret))
if isinstance(ret, HandlerReady):
del self._unprepared_handlers[handler]
prepared = True
elif isinstance(ret, PrepareAgain):
if ret.timeout is not None:
if self._timeout is not None:
self._timeout = min(self._timeout, ret.timeout)
else:
self._timeout = ret.timeout
prepared = False
else:
raise TypeError("Unexpected result type from prepare()")
return prepared | Call the `interfaces.IOHandler.prepare` method and
remove the handler from unprepared handler list when done. |
def fail_fast_imap(pool, call, items):
result_queue = LightQueue(maxsize=len(items))
spawned_threads = set()
def handle_result(finished_thread):
try:
thread_result = finished_thread.wait()
spawned_threads.remove(finished_thread)
result_queue.put((thread_result, None))
except Exception:
spawned_threads.remove(finished_thread)
result_queue.put((None, sys.exc_info()))
for item in items:
gt = pool.spawn(call, item)
spawned_threads.add(gt)
gt.link(handle_result)
while spawned_threads:
result, exc_info = result_queue.get()
if exc_info is not None:
for ongoing_thread in spawned_threads:
ongoing_thread.kill()
eventlet.getcurrent().throw(*exc_info)
yield result | Run a function against each item in a given list, yielding each
function result in turn, where the function call is handled in a
:class:`~eventlet.greenthread.GreenThread` spawned by the provided pool.
If any function raises an exception, all other ongoing threads are killed,
and the exception is raised to the caller.
This function is similar to :meth:`~eventlet.greenpool.GreenPool.imap`.
:param pool: Pool to spawn function threads from
:type pool: eventlet.greenpool.GreenPool
:param call: Function call to make, expecting to receive an item from the
given list |
def _prob_match(self, features):
probs = self.kernel.predict_proba(features)
classes = list(self.kernel.classes_)
match_class_position = classes.index(1)
return probs[:, match_class_position] | Compute match probabilities.
Parameters
----------
features : numpy.ndarray
The data to train the model on.
Returns
-------
numpy.ndarray
The match probabilties. |
def find_genus(files, database, threads=12):
genus_dict = dict()
tmpdir = str(time.time()).split()[-1]
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
for file_name, fasta in files.items():
mash.screen(database, fasta,
threads=threads,
w=,
i=0.95,
output_file=os.path.join(tmpdir, ))
screen_output = mash.read_mash_screen(os.path.join(tmpdir, ))
try:
os.remove(os.path.join(tmpdir, ))
except IOError:
pass
try:
genus = screen_output[0].query_id.split()[-3]
if genus == :
genus =
genus_dict[file_name] = genus
except IndexError:
genus_dict[file_name] =
shutil.rmtree(tmpdir)
return genus_dict | Uses MASH to find the genus of fasta files.
:param files: File dictionary returned by filer method.
:param database: Path to reduced refseq database sketch.
:param threads: Number of threads to run mash with.
:return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found. |
def reject(self, f, *args):
match = self.match(f, *args)
if match:
token = self.peek(0)
raise errors.EfilterParseError(
query=self.tokenizer.source, token=token,
message="Was not expecting a %s here." % token.name) | Like 'match', but throw a parse error if 'f' matches.
This is useful when a parser wants to be strict about specific things
being prohibited. For example, DottySQL bans the use of SQL keywords as
variable names. |
def __look_up_geom(self, geomType):
if geomType.lower() == "point":
return "esriGeometryPoint"
elif geomType.lower() == "polyline":
return "esriGeometryPolyline"
elif geomType.lower() == "polygon":
return "esriGeometryPolygon"
elif geomType.lower() == "multipoint":
return "esriGeometryMultipoint"
else:
return None | compares the geometry object's type verse the JSOn
specs for geometry types
Inputs:
geomType - string - geometry object's type
Returns:
string JSON geometry type or None if not an allowed type |
def guard_sample(analysis_request):
if analysis_request.getDateSampled() and analysis_request.getSampler():
return True
current_user = api.get_current_user()
return "Sampler" in current_user.getRolesInContext(analysis_request) | Returns whether 'sample' transition can be performed or not. Returns
True only if the analysis request has the DateSampled and Sampler set or if
the user belongs to the Samplers group |
def plot_ants_plane(off_screen=False, notebook=None):
airplane = vtki.PolyData(planefile)
airplane.points /= 10
ant = vtki.PolyData(antfile)
ant.rotate_x(90)
ant.translate([90, 60, 15])
ant_copy = ant.copy()
ant_copy.translate([30, 0, -10])
plotter = vtki.Plotter(off_screen=off_screen, notebook=notebook)
plotter.add_mesh(ant, )
plotter.add_mesh(ant_copy, )
plane_scalars = airplane.points[:, 1]
plotter.add_mesh(airplane, scalars=plane_scalars, stitle=)
plotter.add_text()
plotter.plot() | Demonstrate how to create a plot class to plot multiple meshes while
adding scalars and text.
Plot two ants and airplane |
def migrated(name,
remote_addr,
cert,
key,
verify_cert,
src_remote_addr,
stop_and_start=False,
src_cert=None,
src_key=None,
src_verify_cert=None):
ret = {
: name,
: remote_addr,
: cert,
: key,
: verify_cert,
: src_remote_addr,
: stop_and_start,
: src_cert,
: src_key,
: {}
}
dest_container = None
try:
dest_container = __salt__[](
name, remote_addr, cert, key,
verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
except SaltInvocationError as e:
pass
if dest_container is not None:
return _success(
ret,
.format(name)
)
if src_verify_cert is None:
src_verify_cert = verify_cert
try:
__salt__[](
name, src_remote_addr, src_cert, src_key, src_verify_cert, _raw=True
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
except SaltInvocationError as e:
return _error(ret, .format(name))
if __opts__[]:
ret[][] = (
).format(name, src_remote_addr, remote_addr)
return _unchanged(ret, ret[][])
try:
__salt__[](
name, stop_and_start, remote_addr, cert, key,
verify_cert, src_remote_addr, src_cert, src_key, src_verify_cert
)
except CommandExecutionError as e:
return _error(ret, six.text_type(e))
ret[][] = (
).format(name, src_remote_addr, remote_addr)
return _success(ret, ret[][]) | Ensure a container is migrated to another host
If the container is running, it either must be shut down
first (use stop_and_start=True) or criu must be installed
on the source and destination machines.
For this operation both certs need to be authenticated,
use :mod:`lxd.authenticate <salt.states.lxd.authenticate`
to authenticate your cert(s).
name :
The container to migrate
remote_addr :
An URL to the destination remote Server
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Zertifikate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
src_remote_addr :
An URL to the source remote Server
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
stop_and_start:
Stop before migrating and start after
src_cert :
PEM Formatted SSL Zertifikate, if None we copy "cert"
Examples:
~/.config/lxc/client.crt
src_key :
PEM Formatted SSL Key, if None we copy "key"
Examples:
~/.config/lxc/client.key
src_verify_cert :
Wherever to verify the cert, if None we copy "verify_cert" |
def hicpro_mapping_chart (self):
keys = OrderedDict()
keys[] = { : , : }
keys[] = { : , : }
keys[] = { : , : }
data = [{},{}]
for s_name in self.hicpro_data:
for r in [1,2]:
data[r-1][.format(s_name, r)] = {
: self.hicpro_data[s_name][.format(r)],
: self.hicpro_data[s_name][.format(r)],
: int(self.hicpro_data[s_name][.format(r)]) - int(self.hicpro_data[s_name][.format(r)])
}
config = {
: ,
: ,
: ,
: ,
: [
{: , : },
{: , : }
]
}
return bargraph.plot(data, [keys, keys], config) | Generate the HiC-Pro Aligned reads plot |
def find(soup, name=None, attrs=None, recursive=True, text=None, **kwargs):
tags = find_all(
soup, name, attrs or {}, recursive, text, 1, **kwargs
)
if tags:
return tags[0] | Modified find method; see `find_all`, above. |
def timestamp_pb(self):
inst = self if self.tzinfo is not None else self.replace(tzinfo=pytz.UTC)
delta = inst - _UTC_EPOCH
seconds = int(delta.total_seconds())
nanos = self._nanosecond or self.microsecond * 1000
return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos) | Return a timestamp message.
Returns:
(:class:`~google.protobuf.timestamp_pb2.Timestamp`): Timestamp message |
def bind(cls):
super(cls, cls).bind()
cls.search_btn_el.bind("click", cls.start)
cls.input_el.bind("keypress", func_on_enter(cls.start)) | Bind the buttons to adapter's event handler. |
def error_response(self, e):
self.add_compliance_header()
return self.make_response(*e.image_server_response(self.api_version)) | Make response for an IIIFError e.
Also add compliance header. |
def run(self):
self.close_connection = False
try:
while True:
self.started_response = False
self.status = ""
self.outheaders = []
self.sent_headers = False
self.chunked_write = False
self.write_buffer = StringIO.StringIO()
self.content_length = None
ENVIRON = self.environ = self.connection_environ.copy()
self.environ.update(self.server_environ)
request_line = yield self.connfh.readline()
if request_line == "\r\n":
tolerance = 5
while tolerance and request_line == "\r\n":
request_line = yield self.connfh.readline()
tolerance -= 1
if not tolerance:
return
method, path, req_protocol = request_line.strip().split(" ", 2)
ENVIRON["REQUEST_METHOD"] = method
ENVIRON["CONTENT_LENGTH"] =
scheme, location, path, params, qs, frag = urlparse(path)
if frag:
yield self.simple_response("400 Bad Request",
"Illegal
return
if scheme:
ENVIRON["wsgi.url_scheme"] = scheme
if params:
path = path + ";" + params
ENVIRON["SCRIPT_NAME"] = ""
atoms = [unquote(x) for x in quoted_slash.split(path)]
path = "%2F".join(atoms)
ENVIRON["PATH_INFO"] = path
ENVIRON["QUERY_STRING"] = qs
rp = int(req_protocol[5]), int(req_protocol[7])
server_protocol = ENVIRON["ACTUAL_SERVER_PROTOCOL"]
sp = int(server_protocol[5]), int(server_protocol[7])
if sp[0] != rp[0]:
yield self.simple_response("505 HTTP Version Not Supported")
return
ENVIRON["SERVER_PROTOCOL"] = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
if location:
ENVIRON["SERVER_NAME"] = location
try:
while True:
line = yield self.connfh.readline()
if line == :
break
if line[0] in :
assert not self.sent_headers
self.sent_headers = True
yield sockets.SendAll(self.conn,
self.render_headers()+self.write_buffer.getvalue()
)
offset = response.filelike.tell()
if self.chunked_write:
fsize = os.fstat(response.filelike.fileno()).st_size
yield sockets.SendAll(self.conn, hex(int(fsize-offset))+"\r\n")
yield self.conn.sendfile(
response.filelike,
blocksize=response.blocksize,
offset=offset,
length=self.content_length,
timeout=self.sendfile_timeout
)
if self.chunked_write:
yield sockets.SendAll(self.conn, "\r\n")
if hasattr(socket, "TCP_CORK"):
self.conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 0)
else:
for chunk in response:
if chunk:
assert self.started_response, "App sended a value but hasnWSGI OP:WSGI OP RESULT:cogen.wsgiWSGI OP EXCEPTION:closet called start_response")
if self.chunked_write:
yield sockets.SendAll(self.conn, "0\r\n\r\n")
if self.close_connection:
return
except (socket.error, OSError, pywinerror), e:
errno = e.args[0]
if errno not in useless_socket_errors:
yield self.simple_response("500 Internal Server Error",
format_exc())
return
except (OperationTimeout, ConnectionClosed, SocketError):
return
except (KeyboardInterrupt, SystemExit, GeneratorExit, MemoryError):
raise
except:
if not self.started_response:
yield self.simple_response(
"500 Internal Server Error",
format_exc()
)
else:
print "*" * 60
traceback.print_exc()
print "*" * 60
sys.exc_clear()
finally:
self.conn.close()
ENVIRON = self.environ = None | A bit bulky atm... |
def _make_key(self):
value = (self._opener,
self._args,
if self._mode == else self._mode,
tuple(sorted(self._kwargs.items())))
return _HashedSequence(value) | Make a key for caching files in the LRU cache. |
def word2vec(
train,
output,
size=100,
window=5,
sample="1e-3",
hs=0,
negative=5,
threads=12,
iter_=5,
min_count=5,
alpha=0.025,
debug=2,
binary=1,
cbow=1,
save_vocab=None,
read_vocab=None,
verbose=False,
):
command = ["word2vec"]
args = [
"-train",
"-output",
"-size",
"-window",
"-sample",
"-hs",
"-negative",
"-threads",
"-iter",
"-min-count",
"-alpha",
"-debug",
"-binary",
"-cbow",
]
values = [
train,
output,
size,
window,
sample,
hs,
negative,
threads,
iter_,
min_count,
alpha,
debug,
binary,
cbow,
]
for arg, value in zip(args, values):
command.append(arg)
command.append(str(value))
if save_vocab is not None:
command.append("-save-vocab")
command.append(str(save_vocab))
if read_vocab is not None:
command.append("-read-vocab")
command.append(str(read_vocab))
run_cmd(command, verbose=verbose) | word2vec execution
Parameters for training:
train <file>
Use text data from <file> to train the model
output <file>
Use <file> to save the resulting word vectors / word clusters
size <int>
Set size of word vectors; default is 100
window <int>
Set max skip length between words; default is 5
sample <float>
Set threshold for occurrence of words. Those that appear with
higher frequency in the training data will be randomly
down-sampled; default is 0 (off), useful value is 1e-5
hs <int>
Use Hierarchical Softmax; default is 1 (0 = not used)
negative <int>
Number of negative examples; default is 0, common values are 5 - 10
(0 = not used)
threads <int>
Use <int> threads (default 1)
min_count <int>
This will discard words that appear less than <int> times; default
is 5
alpha <float>
Set the starting learning rate; default is 0.025
debug <int>
Set the debug mode (default = 2 = more info during training)
binary <int>
Save the resulting vectors in binary moded; default is 0 (off)
cbow <int>
Use the continuous back of words model; default is 1 (use 0 for
skip-gram model)
save_vocab <file>
The vocabulary will be saved to <file>
read_vocab <file>
The vocabulary will be read from <file>, not constructed from the
training data
verbose
Print output from training |
def get(self, name, default=None):
s no such object. If
"default" is provided, return it if no object is found.
'
session = self.__get_session_from_db()
return session.get(name, default) | Gets the object for "name", or None if there's no such object. If
"default" is provided, return it if no object is found. |
def get_computation(self,
message: Message,
transaction_context: ) -> :
if self.computation_class is None:
raise AttributeError("No `computation_class` has been set for this State")
else:
computation = self.computation_class(self, message, transaction_context)
return computation | Return a computation instance for the given `message` and `transaction_context` |
def _GetUsernameFromProfilePath(self, path):
while path and path[-1] == :
path = path[:-1]
if path:
_, _, path = path.rpartition()
return path | Retrieves the username from a Windows profile path.
Trailing path path segment are ignored.
Args:
path (str): a Windows path with '\\' as path segment separator.
Returns:
str: basename which is the last path segment. |
def is_depsignal_handler(class_, signal_name, cb, *, defer=False):
try:
handlers = get_magic_attr(cb)
except AttributeError:
return False
return _depsignal_spec(class_, signal_name, cb, defer) in handlers | Return true if `cb` has been decorated with :func:`depsignal` for the given
signal, class and connection mode. |
def _handle_next_task(self):
self._idle_since = None
while True:
self._purge_children()
try:
task_id, status, expl, missing, new_requirements = (
self._task_result_queue.get(
timeout=self._config.wait_interval))
except Queue.Empty:
return
task = self._scheduled_tasks[task_id]
if not task or task_id not in self._running_tasks:
continue
external_task_retryable = _is_external(task) and self._config.retry_external_tasks
if status == FAILED and not external_task_retryable:
self._email_task_failure(task, expl)
new_deps = []
if new_requirements:
new_req = [load_task(module, name, params)
for module, name, params in new_requirements]
for t in new_req:
self.add(t)
new_deps = [t.task_id for t in new_req]
self._add_task(worker=self._id,
task_id=task_id,
status=status,
expl=json.dumps(expl),
resources=task.process_resources(),
runnable=None,
params=task.to_str_params(),
family=task.task_family,
module=task.task_module,
new_deps=new_deps,
assistant=self._assistant,
retry_policy_dict=_get_retry_policy_dict(task))
self._running_tasks.pop(task_id)
if missing:
reschedule = True
for task_id in missing:
self.unfulfilled_counts[task_id] += 1
if (self.unfulfilled_counts[task_id] >
self._config.max_reschedules):
reschedule = False
if reschedule:
self.add(task)
self.run_succeeded &= (status == DONE) or (len(new_deps) > 0)
return | We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately. |
def local_open(url):
scheme, server, path, param, query, frag = urlparse(url)
filename = url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith() and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f==:
with open(os.path.join(filename,f),) as fp:
body = fp.read()
break
elif os.path.isdir(os.path.join(filename,f)):
f+=
files.append("<a href=%r>%s</a>" % (f,f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % .join(files)
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {: }
return HTTPError(url, status, message, headers, StringIO(body)) | Read a local path, with special support for directories |
def _encode_long(name, value, dummy0, dummy1):
try:
return b"\x12" + name + _PACK_LONG(value)
except struct.error:
raise OverflowError("BSON can only handle up to 8-byte ints") | Encode a python long (python 2.x) |
def _update_message(self, sending_cluster):
objective_cluster = self.objective[sending_cluster.cluster_variables]
for current_intersect in sending_cluster.intersection_sets_for_cluster_c:
objective_cluster += self.objective[current_intersect]
updated_results = []
objective = []
for current_intersect in sending_cluster.intersection_sets_for_cluster_c:
phi = objective_cluster.maximize(list(sending_cluster.cluster_variables - current_intersect),
inplace=False)
intersection_length = len(sending_cluster.intersection_sets_for_cluster_c)
phi *= (1 / intersection_length)
objective.append(phi)
updated_results.append(phi + -1 * (self.objective[current_intersect] + -1 * sending_cluster.
message_from_cluster[current_intersect]))
index = -1
cluster_potential = copy.deepcopy(sending_cluster.cluster_potential)
for current_intersect in sending_cluster.intersection_sets_for_cluster_c:
index += 1
sending_cluster.message_from_cluster[current_intersect] = updated_results[index]
self.objective[current_intersect] = objective[index]
cluster_potential += (-1) * updated_results[index]
self.objective[sending_cluster.cluster_variables] = cluster_potential | This is the message-update method.
Parameters
----------
sending_cluster: The resulting messages are lambda_{c-->s} from the given
cluster 'c' to all of its intersection_sets 's'.
Here 's' are the elements of intersection_sets_for_cluster_c.
Reference
---------
Fixing Max-Product: Convergent Message-Passing Algorithms for MAP LP Relaxations
by Amir Globerson and Tommi Jaakkola.
Section 6, Page: 5; Beyond pairwise potentials: Generalized MPLP
Later Modified by Sontag in "Introduction to Dual decomposition for Inference" Pg: 7 & 17 |
def distance_to_semi_arc(alon, alat, aazimuth, plons, plats):
if type(plons) is float:
plons = numpy.array([plons])
plats = numpy.array([plats])
azimuth_to_target = azimuth(alon, alat, plons, plats)
idx = numpy.nonzero(numpy.cos(
numpy.radians((aazimuth-azimuth_to_target))) > 0.0)
idx_not = numpy.nonzero(numpy.cos(
numpy.radians((aazimuth-azimuth_to_target))) <= 0.0)
idx_ll_quadr = numpy.nonzero(
(numpy.cos(numpy.radians((aazimuth-azimuth_to_target))) <= 0.0) &
(numpy.sin(numpy.radians((aazimuth-azimuth_to_target))) > 0.0))
distance = numpy.zeros_like(plons)
if len(idx):
distance_to_target = geodetic_distance(alon, alat,
plons[idx], plats[idx])
t_angle = (azimuth_to_target[idx] - aazimuth + 360) % 360
angle = numpy.arccos((numpy.sin(numpy.radians(t_angle)) *
numpy.sin(distance_to_target /
EARTH_RADIUS)))
distance[idx] = (numpy.pi / 2 - angle) * EARTH_RADIUS
if len(idx_not):
distance[idx_not] = geodetic_distance(alon, alat,
plons[idx_not], plats[idx_not])
distance[idx_ll_quadr] = -1 * distance[idx_ll_quadr]
return distance | In this method we use a reference system centerd on (alon, alat) and with
the y-axis corresponding to aazimuth direction to calculate the minimum
distance from a semiarc with generates in (alon, alat).
Parameters are the same as for :func:`distance_to_arc`. |
def set_timeout(name, value, power=, scheme=None):
ret = {: name,
: True,
: ,
: {}}
name = name.lower()
if name not in [, , , ]:
ret[] = False
ret[] = .format(name)
log.debug(ret[])
return ret
power = power.lower()
if power not in [, ]:
ret[] = False
ret[] = .format(power)
log.debug(ret[])
return ret
old = __salt__[.format(name)](scheme=scheme)
if old[power] == value:
ret[] = \
.format(name.capitalize(), power.upper(), value)
return ret
else:
ret[] = \
.format(name.capitalize(), power.upper(), value)
if __opts__[]:
ret[] = None
return ret
__salt__[.format(name)](
timeout=value,
power=power,
scheme=scheme)
new = __salt__[.format(name)](scheme=scheme)
changes = salt.utils.data.compare_dicts(old, new)
if changes:
ret[] = {name: changes}
ret[] = \
.format(name.capitalize(), power.upper(), value)
log.debug(ret[])
else:
ret[] = {}
ret[] = \
.format(name, power.upper(), value)
log.debug(ret[])
ret[] = False
return ret | Set the sleep timeouts of specific items such as disk, monitor, etc.
Args:
name (str)
The setting to change, can be one of the following:
- ``monitor``
- ``disk``
- ``standby``
- ``hibernate``
value (int):
The amount of time in minutes before the item will timeout
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
CLI Example:
.. code-block:: yaml
# Set monitor timeout to 30 minutes on Battery
monitor:
powercfg.set_timeout:
- value: 30
- power: dc
# Set disk timeout to 10 minutes on AC Power
disk:
powercfg.set_timeout:
- value: 10
- power: ac |
def fast_compare(tree1, tree2):
geta = ast.AST.__getattribute__
work = [(tree1, tree2)]
pop = work.pop
extend = work.extend
exception = TypeError, AttributeError
zipl = zip_longest
type_ = type
list_ = list
while work:
n1, n2 = pop()
try:
f1 = geta(n1, )
f2 = geta(n2, )
except exception:
if type_(n1) is list_:
extend(zipl(n1, n2))
continue
if n1 == n2:
continue
return False
else:
f1 = [x for x in f1 if x != ]
if f1 != [x for x in f2 if x != ]:
return False
extend((geta(n1, fname), geta(n2, fname)) for fname in f1)
return True | This is optimized to compare two AST trees for equality.
It makes several assumptions that are currently true for
AST trees used by rtrip, and it doesn't examine the _attributes. |
def _call_cmd_line(self):
try:
logging.info("Calling Popen with: {}".format(self.args))
p = Popen(self.args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError:
raise(RuntimeError("No such command found in PATH"))
self.stdout, self.stderr = p.communicate("\n".encode())
self.stdout = self.stdout.decode("utf-8")
self.stderr = self.stderr.decode("utf-8")
self.returncode = p.returncode | Run the command line tool. |
def wait_for_operation_to_complete(
has_operation_completed, retries=10, delay_bw_retries=5,
delay_before_attempts=10, failover_exc=exception.IloError,
failover_msg=("Operation did not complete even after multiple "
"attempts."), is_silent_loop_exit=False):
retry_count = retries
time.sleep(delay_before_attempts)
while retry_count:
try:
LOG.debug("Calling , retries left: %d",
has_operation_completed.__name__, retry_count)
if has_operation_completed():
break
except exception.IloError:
pass
time.sleep(delay_bw_retries)
retry_count -= 1
else:
LOG.debug("Max retries exceeded with: ",
has_operation_completed.__name__)
if not is_silent_loop_exit:
raise failover_exc(failover_msg) | Attempts the provided operation for a specified number of times.
If it runs out of attempts, then it raises an exception. On success,
it breaks out of the loop.
:param has_operation_completed: the method to retry and it needs to return
a boolean to indicate success or failure.
:param retries: number of times the operation to be (re)tried, default 10
:param delay_bw_retries: delay in seconds before attempting after
each failure, default 5.
:param delay_before_attempts: delay in seconds before beginning any
operation attempt, default 10.
:param failover_exc: the exception which gets raised in case of failure
upon exhausting all the attempts, default IloError.
:param failover_msg: the msg with which the exception gets raised in case
of failure upon exhausting all the attempts.
:param is_silent_loop_exit: decides if exception has to be raised (in case
of failure upon exhausting all the attempts)
or not, default False (will be raised).
:raises: failover_exc, if failure happens even after all the attempts,
default IloError. |
def metadata(dataset, node, entityids, extended=False, api_key=None):
api_key = _get_api_key(api_key)
url = .format(USGS_API)
payload = {
"jsonRequest": payloads.metadata(dataset, node, entityids, api_key=api_key)
}
r = requests.post(url, payload)
response = r.json()
_check_for_usgs_error(response)
if extended:
metadata_urls = map(_get_metadata_url, response[])
results = _async_requests(metadata_urls)
data = map(lambda idx: _get_extended(response[][idx], results[idx]), range(len(response[])))
return response | Request metadata for a given scene in a USGS dataset.
:param dataset:
:param node:
:param entityids:
:param extended:
Send a second request to the metadata url to get extended metadata on the scene.
:param api_key: |
def WriteOutput(self, output_file, feed_merger,
old_feed_path, new_feed_path, merged_feed_path):
if merged_feed_path is None:
html_merged_feed_path =
else:
html_merged_feed_path = % (
merged_feed_path)
html_header = % locals()
html_stats = self._GenerateStatsTable(feed_merger)
html_summary = self._GenerateSummary()
html_notices = self._GenerateNotices()
html_errors = self._GenerateSection(transitfeed.TYPE_ERROR)
html_warnings = self._GenerateSection(transitfeed.TYPE_WARNING)
html_footer = % (transitfeed.__version__,
time.strftime())
output_file.write(transitfeed.EncodeUnicode(html_header))
output_file.write(transitfeed.EncodeUnicode(html_stats))
output_file.write(transitfeed.EncodeUnicode(html_summary))
output_file.write(transitfeed.EncodeUnicode(html_notices))
output_file.write(transitfeed.EncodeUnicode(html_errors))
output_file.write(transitfeed.EncodeUnicode(html_warnings))
output_file.write(transitfeed.EncodeUnicode(html_footer)) | Write the HTML output to a file.
Args:
output_file: The file object that the HTML output will be written to.
feed_merger: The FeedMerger instance.
old_feed_path: The path to the old feed file as a string.
new_feed_path: The path to the new feed file as a string
merged_feed_path: The path to the merged feed file as a string. This
may be None if no merged feed was written. |
def is_valid_line(self, line):
adjusted_line = line.strip().lower()
return any([
adjusted_line.startswith(directive)
for directive in directives_by_section[self.section_name]
]) | Validates a given line against the associated "section" (e.g. 'global'
or 'frontend', etc.) of a stanza.
If a line represents a directive that shouldn't be within the stanza
it is rejected. See the `directives.json` file for a condensed look
at valid directives based on section. |
def addMethod(self, m):
if m.nargs == -1:
m.nargs = len([a for a in marshal.genCompleteTypes(m.sigIn)])
m.nret = len([a for a in marshal.genCompleteTypes(m.sigOut)])
self.methods[m.name] = m
self._xml = None | Adds a L{Method} to the interface |
def _callRestartAgent(self, ev_data: RestartLogData, failTimeout) -> None:
logger.info("{}'s restart calling agent for restart".format(self))
self._actionLog.append_started(ev_data)
self._action_start_callback()
self.scheduledAction = None
asyncio.ensure_future(
self._sendUpdateRequest(ev_data, failTimeout)) | Callback which is called when restart time come.
Writes restart record to restart log and asks
node control service to perform restart
:param ev_data: restart event data
:param version: version to restart to |
def on_api_socket_reconnected(self):
resub_count = 0
subtype_list = []
code_list = []
resub_dict = copy(self._ctx_subscribe)
subtype_all_cnt = len(resub_dict.keys())
subtype_cur_cnt = 0
ret_code = RET_OK
ret_msg =
for subtype in resub_dict.keys():
subtype_cur_cnt += 1
code_set = resub_dict[subtype]
code_list_new = [code for code in code_set]
if len(code_list_new) == 0:
continue
if len(code_list) == 0:
code_list = code_list_new
subtype_list = [subtype]
is_need_sub = False
if code_list == code_list_new:
if subtype not in subtype_list:
subtype_list.append(subtype)
else:
ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list)
logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(
len(code_list), ret_code, ret_msg, subtype_list, code_list))
if ret_code != RET_OK:
break
resub_count += len(code_list)
code_list = code_list_new
subtype_list = [subtype]
if subtype_cur_cnt == subtype_all_cnt and len(code_list):
ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list)
logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(len(code_list), ret_code, ret_msg, subtype_list, code_list))
if ret_code != RET_OK:
break
resub_count += len(code_list)
code_list = []
subtype_list = []
logger.debug("reconnect subscribe all code_count={} ret_code={} ret_msg={}".format(resub_count, ret_code, ret_msg))
if ret_code != RET_OK:
logger.error("reconnect subscribe error, close connect and retry!!")
self._status = ContextStatus.Start
self._wait_reconnect()
return ret_code, ret_msg | for API socket reconnected |
def _execute(self,
native,
command,
data=None,
returning=True,
mapper=dict):
if data is None:
data = {}
with native.cursor() as cursor:
log.debug()
log.debug(command % data)
log.debug()
try:
rowcount = 0
for cmd in command.split():
cmd = cmd.strip()
if cmd:
cursor.execute(cmd.strip() + , data)
rowcount += cursor.rowcount
except pymysql.InterfaceError:
raise orb.errors.ConnectionLost()
except (pymysql.IntegrityError, pymysql.OperationalError) as err:
native.rollback()
if err[0] == 1062:
raise orb.errors.DuplicateEntryFound(err[1])
reference_error = re.search(, nstr(err))
if reference_error:
msg =
raise orb.errors.CannotDelete(msg)
log.debug(traceback.print_exc())
raise orb.errors.QueryFailed(command, data, nstr(err))
except pymysql.Error as err:
native.rollback()
log.error(traceback.print_exc())
raise orb.errors.QueryFailed(command, data, nstr(err))
try:
raw = cursor.fetchall()
results = [mapper(record) for record in raw]
except pymysql.ProgrammingError:
results = []
return results, rowcount | Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
:return [{<str> key: <variant>, ..}, ..], <int> count |
def zeroing(dev):
| zeroing last few blocks of device |
def get(self, frame_to, frame_from=None):
if frame_from is None:
frame_from = self.base_frame
cache_key = str(frame_from) + + str(frame_to)
cached = self._cache[cache_key]
if cached is not None:
return cached
transform = np.eye(4)
path = self._get_path(frame_from, frame_to)
for i in range(len(path) - 1):
data, direction = self.transforms.get_edge_data_direction(
path[i], path[i + 1])
matrix = data[]
if direction < 0:
matrix = np.linalg.inv(matrix)
transform = np.dot(transform, matrix)
geometry = None
if in self.transforms.node[frame_to]:
geometry = self.transforms.node[frame_to][]
self._cache[cache_key] = (transform, geometry)
return transform, geometry | Get the transform from one frame to another, assuming they are connected
in the transform tree.
If the frames are not connected a NetworkXNoPath error will be raised.
Parameters
---------
frame_from: hashable object, usually a string (eg 'world').
If left as None it will be set to self.base_frame
frame_to: hashable object, usually a string (eg 'mesh_0')
Returns
---------
transform: (4,4) homogenous transformation matrix |
def docker_pull(image):
args = [, , image]
ret = call(args)
if ret != 0:
raise DockerError(.format(image)) | Pulls an image |
def run_cli_options(args):
if _interactive_mode(args.interactive):
os.environ[] =
if in_ipython():
return
exclusive_choices = [[None, args.command], [, args.string], [, args.module]]
for flag_choice in exclusive_choices:
try:
a = sys.argv.index(flag_choice[0] or flag_choice[1])
except ValueError:
a = 1000
flag_choice.append(a)
exclusive_choices.sort(key=lambda v: v[2])
for i, (flag, choice, _) in enumerate(exclusive_choices):
if not choice:
continue
sys.argv = [choice] + sys.argv[sys.argv.index(choice)+1:]
if not flag:
if choice == :
launch_ipython(argv=sys.argv[1:])
elif choice == :
launch_notebook()
else:
globals().update(runpy.run_path(choice, run_name="__main__"))
elif flag == :
if in sys.argv[1:2] :
sys.argv.pop(1)
globals().update(runpy.run_module(choice, run_name="__main__"))
elif flag == :
exec choice in globals(), locals()
else:
continue
break | Quick implementation of Python interpreter's -m, -c and file execution.
The resulting dictionary is imported into global namespace, just in case
someone is using interactive mode.
We try to keep argument order as to pass them correctly to the subcommands. |
def on_augassign(self, node):
return self.on_assign(ast.Assign(targets=[node.target],
value=ast.BinOp(left=node.target,
op=node.op,
right=node.value))) | Augmented assign. |
def cls_build(inst, state):
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return inst
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
d = inst.__dict__
try:
for k, v in six.iteritems(state):
d[six.moves.intern(k)] = v
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
return inst | Apply the setstate protocol to initialize `inst` from `state`.
INPUT:
- ``inst`` -- a raw instance of a class
- ``state`` -- the state to restore; typically a dictionary mapping attribute names to their values
EXAMPLES::
>>> from openmath.convert_pickle import cls_build
>>> class A(object): pass
>>> inst = A.__new__(A)
>>> state = {"foo": 1, "bar": 4}
>>> inst2 = cls_build(inst,state)
>>> inst is inst2
True
>>> inst.foo
1
>>> inst.bar
4 |
def rowsWithin(self, bbox):
ret = {}
for y in range(bbox.ymin, bbox.ymax+1):
for x in range(bbox.xmin, bbox.xmax+1):
for attr, rows in self.pixels[y][x].items():
if attr not in self.hiddenAttrs:
for r in rows:
ret[id(r)] = r
return list(ret.values()) | return list of deduped rows within bbox |
def get_credentials():
home_dir = os.path.expanduser()
credential_dir = os.path.join(home_dir, )
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
)
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else:
credentials = tools.run(flow, store)
print( + credential_path)
return credentials | Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential. |
def _set_mark(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("dscp_in_values",mark.mark, yang_name="mark", rest_name="mark", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: u, u: None, u: None}}), is_container=, yang_name="mark", rest_name="mark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: u, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__mark = t
if hasattr(self, ):
self._set() | Setter method for mark, mapped from YANG variable /qos/map/dscp_mutation/mark (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mark is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mark() directly. |
def hash_reference_links(text, hashes, markdown_obj):
def sub(match):
is_img = match.group(1) !=
content = match.group(2)
ref = match.group(3).strip().lower()
if not ref:
ref = content.strip().lower()
ref = ref.replace(, )
if ref not in markdown_obj.references:
link, title = ,
else:
link, title = markdown_obj.references[ref]
if title:
title = .format(title)
if is_img:
result = .format(
link, content, title)
else:
result = .format(link,
markdown_obj.convert(content).replace(, ).replace(, ).strip(),
title)
hashed = hash_text(result, )
hashes[hashed] = result
return hashed
return re_reference_link.sub(sub, text) | Hashes an <a> link or an <img> link.
This function only converts reference link styles:
[text here][ref id]
![alt text here][ref id]
For inline style links, see hash_inline_links.
Reference ids can be defined anywhere in the Markdown text.
Reference ids can also be omitted, in which case te text in the
first box is used as the reference id:
[ref id][]
This is known as an "implicit link" reference. |
def nodes_to_check(self, docs):
nodes_to_check = []
for doc in docs:
for tag in [, , ]:
items = self.parser.getElementsByTag(doc, tag=tag)
nodes_to_check += items
return nodes_to_check | \
returns a list of nodes we want to search
on like paragraphs and tables |
def prepare_method_call(self, method, args):
if self._method_requires_handler_ref(method):
if isinstance(args, list):
args = [self] + args
elif isinstance(args, dict):
args["handler"] = self
if isinstance(args, list):
to_call = partial(method, *args)
elif isinstance(args, dict):
to_call = partial(method, **args)
else:
raise TypeError(
"args must be list or dict but got {} instead".format(type(args).__name__))
return to_call | Wraps a method so that method() will call ``method(*args)`` or ``method(**args)``,
depending of args type
:param method: a callable object (method)
:param args: dict or list with the parameters for the function
:return: a 'patched' callable |
def jacobi( a, n ):
assert n >= 3
assert n%2 == 1
a = a % n
if a == 0: return 0
if a == 1: return 1
a1, e = a, 0
while a1%2 == 0:
a1, e = a1//2, e+1
if e%2 == 0 or n%8 == 1 or n%8 == 7: s = 1
else: s = -1
if a1 == 1: return s
if n%4 == 3 and a1%4 == 3: s = -s
return s * jacobi( n % a1, a1 ) | Jacobi symbol |
def get_value_for_datastore(self, model_instance):
value = super(JsonProperty, self).get_value_for_datastore(model_instance)
if not value:
return None
json_value = value
if not isinstance(value, dict):
json_value = value.to_json()
if not json_value:
return None
return datastore_types.Text(json.dumps(
json_value, sort_keys=True, cls=JsonEncoder)) | Gets value for datastore.
Args:
model_instance: instance of the model class.
Returns:
datastore-compatible value. |
def get_conn(self):
if self.session and not self.session.is_shutdown:
return self.session
self.session = self.cluster.connect(self.keyspace)
return self.session | Returns a cassandra Session object |
def get_form_field_dict(self, model_dict):
return_dict = OrderedDict()
return return_dict | Takes a model dictionary representation and creates a dictionary
keyed by form field. Each value is a keyed 4 tuple of:
(widget, mode_field_instance, model_field_type, field_key) |
def add_my_api_key_to_groups(self, body, **kwargs):
kwargs[] = True
if kwargs.get():
return self.add_my_api_key_to_groups_with_http_info(body, **kwargs)
else:
(data) = self.add_my_api_key_to_groups_with_http_info(body, **kwargs)
return data | Add API key to a list of groups. # noqa: E501
An endpoint for adding API key to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_my_api_key_to_groups(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread. |
def read_from_file(path, file_type=, exception=ScriptWorkerException):
FILE_TYPE_MAP = {: , : }
if file_type not in FILE_TYPE_MAP:
raise exception("Unknown file_type {} not in {}!".format(file_type, FILE_TYPE_MAP))
try:
with open(path, FILE_TYPE_MAP[file_type]) as fh:
return fh.read()
except (OSError, FileNotFoundError) as exc:
raise exception("Can't read_from_file {}: {}".format(path, str(exc))) | Read from ``path``.
Small helper function to read from ``file``.
Args:
path (str): the path to read from.
file_type (str, optional): the type of file. Currently accepts
``text`` or ``binary``. Defaults to ``text``.
exception (Exception, optional): the exception to raise
if unable to read from the file. Defaults to ``ScriptWorkerException``.
Returns:
None: if unable to read from ``path`` and ``exception`` is ``None``
str or bytes: the contents of ``path``
Raises:
Exception: if ``exception`` is set. |
def method(self, value):
keys = self._methods.keys()
if value not in keys:
raise AttributeError("Method value not in " + str(keys))
else:
self._method = value | Before assigning the value validate that is in one of the
HTTP methods we implement |
def createNotification(self, ulOverlayHandle, ulUserValue, type_, pchText, style):
fn = self.function_table.createNotification
pImage = NotificationBitmap_t()
pNotificationId = VRNotificationId()
result = fn(ulOverlayHandle, ulUserValue, type_, pchText, style, byref(pImage), byref(pNotificationId))
return result, pImage, pNotificationId | Create a notification and enqueue it to be shown to the user.
An overlay handle is required to create a notification, as otherwise it would be impossible for a user to act on it.
To create a two-line notification, use a line break ('\n') to split the text into two lines.
The pImage argument may be NULL, in which case the specified overlay's icon will be used instead. |
def _populate_lp(self, dataset, **kwargs):
logger.debug("{}._populate_lp(dataset={})".format(self.component, dataset))
profile_rest = kwargs.get(, self.lp_profile_rest.get(dataset))
rv_cols = self._populate_rv(dataset, **kwargs)
cols = rv_cols
return cols | Populate columns necessary for an LP dataset
This should not be called directly, but rather via :meth:`Body.populate_observable`
or :meth:`System.populate_observables` |
def pick_scalar_condition(pred, true_value, false_value, name=None):
with tf.name_scope(name or "pick_scalar_condition"):
pred = tf.convert_to_tensor(
value=pred, dtype_hint=tf.bool, name="pred")
true_value = tf.convert_to_tensor(value=true_value, name="true_value")
false_value = tf.convert_to_tensor(value=false_value, name="false_value")
pred_ = tf.get_static_value(pred)
if pred_ is None:
return tf.where(pred, true_value, false_value)
return true_value if pred_ else false_value | Convenience function that chooses one of two values based on the predicate.
This utility is equivalent to a version of `tf.where` that accepts only a
scalar predicate and computes its result statically when possible. It may also
be used in place of `tf.cond` when both branches yield a `Tensor` of the same
shape; the operational difference is that `tf.cond` uses control flow to
evaluate only the branch that's needed, while `tf.where` (and thus
this method) may evaluate both branches before the predicate's truth is known.
This means that `tf.cond` is preferred when one of the branches is expensive
to evaluate (like performing a large matmul), while this method is preferred
when both branches are cheap, e.g., constants. In the latter case, we expect
this method to be substantially faster than `tf.cond` on GPU and to give
similar performance on CPU.
Args:
pred: Scalar `bool` `Tensor` predicate.
true_value: `Tensor` to return if `pred` is `True`.
false_value: `Tensor` to return if `pred` is `False`. Must have the same
shape as `true_value`.
name: Python `str` name given to ops managed by this object.
Returns:
result: a `Tensor` (or `Tensor`-convertible Python value) equal to
`true_value` if `pred` evaluates to `True` and `false_value` otherwise.
If the condition can be evaluated statically, the result returned is one
of the input Python values, with no graph side effects. |
def get_day_and_year():
pattern_year = r"201[5-9]|202[0-9]"
pattern_day = r"2[0-5]|1[0-9]|[1-9]"
stack = [f[0] for f in traceback.extract_stack()]
for name in stack:
basename = os.path.basename(name)
reasons_to_skip_frame = [
not re.search(pattern_day, basename),
name == __file__,
"importlib" in name,
"/IPython/" in name,
name.startswith("<"),
name.endswith("ython3"),
]
if not any(reasons_to_skip_frame):
abspath = os.path.abspath(name)
break
log.debug("skipping frame %s", name)
else:
import __main__
try:
__main__.__file__
except AttributeError:
log.debug("running within REPL")
day = current_day()
year = most_recent_year()
return day, year
else:
log.debug("non-interactive")
raise AocdError("Failed introspection of filename")
years = {int(year) for year in re.findall(pattern_year, abspath)}
if len(years) > 1:
raise AocdError("Failed introspection of year")
year = years.pop() if years else None
basename_no_years = re.sub(pattern_year, "", basename)
try:
[day] = set(re.findall(pattern_day, basename_no_years))
except ValueError:
pass
else:
assert not day.startswith("0"), "regex pattern_day must prevent any leading 0"
day = int(day)
assert 1 <= day <= 25, "regex pattern_day must only match numbers in range 1-25"
log.debug("year=%d day=%d", year, day)
return day, year
log.debug("giving up introspection for %s", abspath)
raise AocdError("Failed introspection of day") | Returns tuple (day, year).
Here be dragons!
The correct date is determined with introspection of the call stack, first
finding the filename of the module from which ``aocd`` was imported.
This means your filenames should be something sensible, which identify the
day and year unambiguously. The examples below should all parse correctly,
because they have unique digits in the file path that are recognisable as
AoC years (2015+) or days (1-25).
A filename like ``problem_one.py`` will not work, so don't do that. If you
don't like weird frame hacks, just use the ``aocd.get_data()`` function
directly instead and have a nice day! |
def search(cls,
query_string,
options=None,
enable_facet_discovery=False,
return_facets=None,
facet_options=None,
facet_refinements=None,
deadline=None,
**kwargs):
search_class = cls.search_get_class_names()[-1]
query_string += + % (search_class,)
q = search.Query(
query_string=query_string,
options=options,
enable_facet_discovery=enable_facet_discovery,
return_facets=return_facets,
facet_options=facet_options,
facet_refinements=facet_refinements
)
index = cls.search_get_index()
return index.search(q, deadline=deadline, **kwargs) | Searches the index. Conveniently searches only for documents that belong to instances of this class.
:param query_string: The query to match against documents in the index. See search.Query() for details.
:param options: A QueryOptions describing post-processing of search results.
:param enable_facet_discovery: discovery top relevent facets to this search query and return them.
:param return_facets: An iterable of FacetRequest or basestring as facet name to
return specific facet with the result.
:param facet_options: A FacetOption describing processing of facets.
:param facet_refinements: An iterable of FacetRefinement objects or refinement
token strings used to filter out search results based on a facet value.
refinements for different facets will be conjunction and refinements for
the same facet will be disjunction.
:param deadline: Deadline for RPC call in seconds; if None use the default.
:param kwargs: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:return: A SearchResults containing a list of documents matched, number returned
and number matched by the query.
:raises: QueryError: If the query string is not parseable.
TypeError: If any of the parameters have invalid types, or an unknown
attribute is passed.
ValueError: If any of the parameters have invalid values (e.g., a
negative deadline). |
def _fetch_file(url, file_name, resume=True,
hash_=None, timeout=10., progressbar=True, verbose=True):
if hash_ is not None and (not isinstance(hash_, string_types) or
len(hash_) != 32):
raise ValueError(
% (hash_,))
temp_file_name = file_name + ".part"
try:
if in url:
try:
import requests
except ModuleNotFoundError:
raise ValueError(
)
resp = requests.get(url)
chunk_size = 8192
with open(temp_file_name, ) as ff:
for chunk in resp.iter_content(chunk_size=chunk_size):
if chunk:
ff.write(chunk)
else:
u = urllib.request.urlopen(url, timeout=timeout)
u.close()
url = u.geturl()
u = urllib.request.urlopen(url, timeout=timeout)
try:
file_size = int(u.headers.get(, ).strip())
finally:
u.close()
del u
if verbose:
tqdm.write(
% (url, sizeof_fmt(file_size)))
if not os.path.exists(temp_file_name):
resume = False
if resume:
with open(temp_file_name, , buffering=0) as local_file:
local_file.seek(0, 2)
initial_size = local_file.tell()
del local_file
else:
initial_size = 0
if initial_size > file_size:
raise RuntimeError(
% (sizeof_fmt(initial_size),
sizeof_fmt(file_size)))
scheme = urllib.parse.urlparse(url).scheme
fun = _get_http if scheme in (, ) else _get_ftp
fun(url, temp_file_name, initial_size, file_size, verbose,
progressbar, ncols=80)
if hash_ is not None:
if verbose:
tqdm.write()
md5 = md5sum(temp_file_name)
if hash_ != md5:
raise RuntimeError(
% (temp_file_name, hash_, md5))
shutil.move(temp_file_name, file_name)
except Exception as ee:
raise RuntimeError(
% (url, ee)) | Load requested file, downloading it if needed or requested.
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
resume: bool, optional
If true, try to resume partially downloaded files.
hash_ : str | None
The hash of the file to check. If None, no checking is
performed.
timeout : float
The URL open timeout.
verbose : bool
Whether to print download status. |
def list_locked(**kwargs):
****
return [.format(pkgname, version(pkgname, **kwargs))
for pkgname in _lockcmd(, name=None, **kwargs)] | Query the package database those packages which are
locked against reinstallation, modification or deletion.
Returns returns a list of package names with version strings
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked
jail
List locked packages within the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked jail=<jail name or id>
chroot
List locked packages within the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked chroot=/path/to/chroot
root
List locked packages within the specified root (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked root=/path/to/chroot |
def create(self, repo_name, scm=, private=True, **kwargs):
url = self.bitbucket.url()
return self.bitbucket.dispatch(, url, auth=self.bitbucket.auth, name=repo_name, scm=scm, is_private=private, **kwargs) | Creates a new repository on own Bitbucket account and return it. |
def _init_usrgos(self, goids):
usrgos = set()
goids_missing = set()
_go2obj = self.gosubdag.go2obj
for goid in goids:
if goid in _go2obj:
usrgos.add(goid)
else:
goids_missing.add(goid)
if goids_missing:
print("MISSING GO IDs: {GOs}".format(GOs=goids_missing))
print("{N} of {M} GO IDs ARE MISSING".format(N=len(goids_missing), M=len(goids)))
return usrgos | Return user GO IDs which have GO Terms. |
def finite_pixels(self):
finite_px = np.where(np.isfinite(self.data))
finite_px = np.c_[finite_px[0], finite_px[1]]
return finite_px | Return an array of the finite pixels.
Returns
-------
:obj:`numpy.ndarray`
Nx2 array of the finite pixels |
def get_strip_metadata(self, catID):
self.logger.debug()
url = % {
: self.base_url, : catID
}
r = self.gbdx_connection.get(url)
if r.status_code == 200:
return r.json()[]
elif r.status_code == 404:
self.logger.debug( % catID)
r.raise_for_status()
else:
self.logger.debug( % catID)
r.raise_for_status() | Retrieves the strip catalog metadata given a cat ID.
Args:
catID (str): The source catalog ID from the platform catalog.
Returns:
metadata (dict): A metadata dictionary .
TODO: have this return a class object with interesting information exposed. |
def max_pathlen(self):
pathlen = self.pathlen
if self.parent is None:
return pathlen
max_parent = self.parent.max_pathlen
if max_parent is None:
return pathlen
elif pathlen is None:
return max_parent - 1
else:
return min(self.pathlen, max_parent - 1) | The maximum pathlen for any intermediate CAs signed by this CA.
This value is either ``None``, if this and all parent CAs don't have a ``pathlen`` attribute, or an
``int`` if any parent CA has the attribute. |
def add(self, rid, data, raise_on_error=True):
cache_data = {: self._dt_to_epoch(datetime.now()), : data}
return self.ds.post(rid, cache_data, raise_on_error) | Write cache data to the data store.
Args:
rid (str): The record identifier.
data (dict): The record data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response. |
def GET_close_server(self) -> None:
def _close_server():
self.server.shutdown()
self.server.server_close()
shutter = threading.Thread(target=_close_server)
shutter.deamon = True
shutter.start() | Stop and close the *HydPy* server. |
def info(vm, info_type=, key=):
vm****
ret = {}
if info_type not in [, , , , , , , , , ]:
ret[] =
return ret
if key not in [, , ]:
ret[] =
return ret
vm = lookup(.format(key, vm), one=True)
if in vm:
return vm
cmd = .format(
uuid=vm,
type=info_type
)
res = __salt__[](cmd)
retcode = res[]
if retcode != 0:
ret[] = res[] if in res else _exit_status(retcode)
return ret
return salt.utils.json.loads(res[]) | Lookup info on running kvm
vm : string
vm to be targeted
info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc]
info type to return
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc
salt '*' vmadm.info nacl key=alias
salt '*' vmadm.info nacl vnc key=alias |
def get_ts_stats_significance(self, x, ts, stat_ts_func, null_ts_func, B=1000, permute_fast=False, label_ts=):
stats_ts, pvals, nums = ts_stats_significance(
ts, stat_ts_func, null_ts_func, B=B, permute_fast=permute_fast)
return stats_ts, pvals, nums | Returns the statistics, pvalues and the actual number of bootstrap
samples. |
def synchelp(f):
def wrap(*args, **kwargs):
coro = f(*args, **kwargs)
if not iAmLoop():
return sync(coro)
return coro
return wrap | The synchelp decorator allows the transparent execution of
a coroutine using the global loop from a thread other than
the event loop. In both use cases, teh actual work is done
by the global event loop.
Examples:
Use as a decorator::
@s_glob.synchelp
async def stuff(x, y):
await dostuff()
Calling the stuff function as regular async code using the standard await syntax::
valu = await stuff(x, y)
Calling the stuff function as regular sync code outside of the event loop thread::
valu = stuff(x, y) |
def on_message(self, fragment):
try:
message = yield self._receive(fragment)
except Exception as e:
log.error("Unhandled exception receiving a message: %r: %r", e, fragment, exc_info=True)
self._internal_error("server failed to parse a message")
try:
if message:
if _message_test_port is not None:
_message_test_port.received.append(message)
work = yield self._handle(message)
if work:
yield self._schedule(work)
except Exception as e:
log.error("Handler or its work threw an exception: %r: %r", e, message, exc_info=True)
self._internal_error("server failed to handle a message")
raise gen.Return(None) | Process an individual wire protocol fragment.
The websocket RFC specifies opcodes for distinguishing text frames
from binary frames. Tornado passes us either a text or binary string
depending on that opcode, we have to look at the type of the fragment
to see what we got.
Args:
fragment (unicode or bytes) : wire fragment to process |
def weld_combine_scalars(scalars, weld_type):
weld_obj = create_empty_weld_object()
obj_ids = (get_weld_obj_id(weld_obj, scalar) for scalar in scalars)
merges = .join((.format(obj_id) for obj_id in obj_ids))
weld_template =
weld_obj.weld_code = weld_template.format(type=weld_type,
merges=merges)
return weld_obj | Combine column-wise aggregations (so resulting scalars) into a single array.
Parameters
----------
scalars : tuple of WeldObjects
WeldObjects to combine.
weld_type : WeldType
The Weld type of the result. Currently expecting scalars to be of the same type.
Returns
-------
WeldObject
Representation of this computation. |
def send_response(self, transaction):
if transaction.cacheHit is False:
logger.debug("handling response")
self._handle_response(transaction)
return transaction | updates the cache with the response if there was a cache miss
:param transaction:
:return: |
def arg(self, state, index, stack_base=None):
session = self.arg_session
if self.args is None:
arg_loc = [session.next_arg(False) for _ in range(index + 1)][-1]
else:
arg_loc = self.args[index]
return arg_loc.get_value(state, stack_base=stack_base) | Returns a bitvector expression representing the nth argument of a function.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless
you've customized this CC. |
def UpsertStoredProcedure(self, collection_link, sproc, options=None):
if options is None:
options = {}
collection_id, path, sproc = self._GetContainerIdWithPathForSproc(collection_link, sproc)
return self.Upsert(sproc,
path,
,
collection_id,
None,
options) | Upserts a stored procedure in a collection.
:param str collection_link:
The link to the document collection.
:param str sproc:
:param dict options:
The request options for the request.
:return:
The upserted Stored Procedure.
:rtype:
dict |
def GetOobResult(self, param, user_ip, gitkit_token=None):
if in param:
try:
if param[] == GitkitClient.RESET_PASSWORD_ACTION:
request = self._PasswordResetRequest(param, user_ip)
oob_code, oob_link = self._BuildOobLink(request,
param[])
return {
: GitkitClient.RESET_PASSWORD_ACTION,
: param[],
: oob_link,
: oob_code,
: simplejson.dumps({: True})
}
elif param[] == GitkitClient.CHANGE_EMAIL_ACTION:
if not gitkit_token:
return self._FailureOobResponse()
request = self._ChangeEmailRequest(param, user_ip, gitkit_token)
oob_code, oob_link = self._BuildOobLink(request,
param[])
return {
: GitkitClient.CHANGE_EMAIL_ACTION,
: param[],
: param[],
: oob_link,
: oob_code,
: simplejson.dumps({: True})
}
except errors.GitkitClientError as error:
return self._FailureOobResponse(error.value)
return self._FailureOobResponse() | Gets out-of-band code for ResetPassword/ChangeEmail request.
Args:
param: dict of HTTP POST params
user_ip: string, end user's IP address
gitkit_token: string, the gitkit token if user logged in
Returns:
A dict of {
email: user email who initializes the request
new_email: the requested new email, for ChangeEmail action only
oob_link: the generated link to be send to user's email
oob_code: the one time out-of-band code
action: OobAction
response_body: the http body to be returned to Gitkit widget
} |
def reset_rammbock(self):
for client in self._clients:
client.close()
for server in self._servers:
server.close()
self._init_caches() | Closes all connections, deletes all servers, clients, and protocols.
You should call this method before exiting your test run. This will
close all the connections and the ports will therefore be available for
reuse faster. |
def _run_vardict_paired(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(vrs, region,
out_file, items=items, do_merge=True)
paired = vcfutils.get_paired_bams(align_bams, items)
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config,
samples=[x for x in [paired.tumor_name, paired.normal_name] if x])
else:
if not paired.normal_bam:
ann_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return ann_file
vardict = get_vardict_command(items[0])
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
freq = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
opts, var2vcf_opts = _vardict_options_from_config(items, config, out_file, target)
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
remove_dup = vcfutils.remove_dup_cl()
if any("vardict_somatic_filter" in tz.get_in(("config", "algorithm", "tools_off"), data, [])
for data in items):
somatic_filter = ""
freq_filter = ""
else:
var2vcf_opts += " -M "
somatic_filter = ("| sed "
"| sed "
% (sys.executable, paired.tumor_name, paired.normal_name))
freq_filter = ("| bcftools filter -m -s -e 2> /dev/null "
"| %s -x "
"| %s "
"| %s -x " %
(os.path.join(os.path.dirname(sys.executable), "py"),
_lowfreq_linear_filter(0, True),
os.path.join(os.path.dirname(sys.executable), "py"),
0, bam.aligner_from_header(paired.tumor_bam)))
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
py_cl = os.path.join(utils.get_bcbio_bin(), "py")
setup = ("%s && unset JAVA_HOME &&" % utils.get_R_exports())
contig_cl = vcfutils.add_contig_to_header_cl(ref_file, tx_out_file)
cmd = ("{setup}{jvm_opts}{vardict} -G {ref_file} "
"-N {paired.tumor_name} -b \"{paired.tumor_bam}|{paired.normal_bam}\" {opts} "
"| awk | testsomatic.R "
"| var2vcf_paired.pl -P 0.9 -m 4.25 {var2vcf_opts} "
"-N \"{paired.tumor_name}|{paired.normal_name}\" "
"| {contig_cl} {freq_filter} "
"| bcftools filter -i "
"{somatic_filter} | {fix_ambig_ref} | {fix_ambig_alt} | {remove_dup} | {vcfstreamsort} "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
return out_file | Detect variants with Vardict.
This is used for paired tumor / normal samples. |
def locked_execute(self, sql, parameters = None, cursorClass = DictCursor, quiet = False):
return self.execute(sql, parameters, cursorClass, quiet = quiet, locked = True) | We are lock-happy here but SQL performance is not currently an issue daemon-side. |
def _handle_args(self, cmd, args):
if cmd == :
if args.upgrade:
if args.initdb or args.upgradedb:
raise Stop(10, (
))
newinstall = None
else:
newinstall = True
if args.managedb:
if args.initdb or args.upgradedb:
raise Stop(10, (
))
args.initdb = True
args.upgradedb = True
else:
if args.initdb or args.upgradedb:
log.warn(
)
elif cmd == :
log.warn(
)
cmd =
args.upgrade = True
newinstall = False
else:
raise Exception( % cmd)
return args, newinstall | We need to support deprecated behaviour for now which makes this
quite complicated
Current behaviour:
- install: Installs a new server, existing server causes an error
- install --upgrade: Installs or upgrades a server
- install --managedb: Automatically initialise or upgrade the db
Deprecated:
- install --upgradedb --initdb: Replaced by install --managedb
- install --upgradedb: upgrade the db, must exist
- install --initdb: initialise the db
- upgrade: Upgrades a server, must already exist
- upgrade --upgradedb: Automatically upgrade the db
returns:
- Modified args object, flag to indicate new/existing/auto install |
def log_message(self, format, *args):
code = args[1][0]
levels = {
: ,
:
}
log_handler = getattr(logger, levels.get(code, ))
log_handler(format % args) | overrides the ``log_message`` method from the wsgiref server so that
normal logging works with whatever configuration the application has
been set to.
Levels are inferred from the HTTP status code, 4XX codes are treated as
warnings, 5XX as errors and everything else as INFO level. |
def get_transaction_result(
self,
transaction: BaseOrSpoofTransaction,
at_header: BlockHeader) -> bytes:
with self.get_vm(at_header).state_in_temp_block() as state:
computation = state.costless_execute_transaction(transaction)
computation.raise_if_error()
return computation.output | Return the result of running the given transaction.
This is referred to as a `call()` in web3. |
def _ScanFileSystem(self, scan_node, base_path_specs):
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError(
)
base_path_specs.append(scan_node.path_spec) | Scans a file system scan node for file systems.
Args:
scan_node (SourceScanNode): file system scan node.
base_path_specs (list[PathSpec]): file system base path specifications.
Raises:
SourceScannerError: if the scan node is invalid. |
def decode(data):
dialect = None
try:
dialect = csv.Sniffer().sniff(data)
except Exception:
pass
handler = None
try:
data = data.splitlines()
handler = csv.reader(data, dialect)
except Exception, e:
raise MetaParsingException("Can't parse your CSV data: %s" % e.message)
decoded = []
for cnt, line in enumerate(handler):
usable_data = filter(lambda x: x.strip(), line)
if not usable_data:
continue
if len(usable_data) != 2:
raise MetaParsingException(
"Bad number of elements - line %d:\n\t%s\n" % (cnt, data[cnt])
)
usable_data = map(lambda x: x.strip().decode("utf-8"), usable_data)
usable_data = map(lambda x: _remove_quotes(x), usable_data)
decoded.append(usable_data)
decoded = validator.check_structure(decoded)
return decoded | Handles decoding of the CSV `data`.
Args:
data (str): Data which will be decoded.
Returns:
dict: Dictionary with decoded data. |
def dump(self):
report_as_json_string = utils.dict_to_json(self.report)
if self.out_file:
utils.string_to_file(self.out_file, report_as_json_string)
else:
print report_as_json_string | Dump the output to json. |
def get_siblings_score(self, top_node):
base = 100000
paragraphs_number = 0
paragraphs_score = 0
nodes_to_check = self.parser.getElementsByTag(top_node, tag=)
for node in nodes_to_check:
text_node = self.parser.getText(node)
word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node)
high_link_density = self.is_highlink_density(node)
if word_stats.get_stopword_count() > 2 and not high_link_density:
paragraphs_number += 1
paragraphs_score += word_stats.get_stopword_count()
if paragraphs_number > 0:
base = paragraphs_score / paragraphs_number
return base | \
we could have long articles that have tons of paragraphs
so if we tried to calculate the base score against
the total text score of those paragraphs it would be unfair.
So we need to normalize the score based on the average scoring
of the paragraphs within the top node.
For example if our total score of 10 paragraphs was 1000
but each had an average value of 100 then 100 should be our base. |
def _next_slide_partname(self):
sldIdLst = self._element.get_or_add_sldIdLst()
partname_str = % (len(sldIdLst)+1)
return PackURI(partname_str) | Return |PackURI| instance containing the partname for a slide to be
appended to this slide collection, e.g. ``/ppt/slides/slide9.xml``
for a slide collection containing 8 slides. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.