code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def literal_to_dict(value):
if isinstance(value, Literal):
if value.language is not None:
return {"@value": str(value), "@language": value.language}
return value.toPython()
elif isinstance(value, URIRef):
return {"@id": str(value)}
elif value is None:
return None
return str(value) | Transform an object value into a dict readable value
:param value: Object of a triple which is not a BNode
:type value: Literal or URIRef
:return: dict or str or list |
def commit_transaction(self):
self._check_ended()
retry = False
state = self._transaction.state
if state is _TxnState.NONE:
raise InvalidOperation("No transaction started")
elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY):
self._transaction.state = _TxnState.COMMITTED_EMPTY
return
elif state is _TxnState.ABORTED:
raise InvalidOperation(
"Cannot call commitTransaction after calling abortTransaction")
elif state is _TxnState.COMMITTED:
self._transaction.state = _TxnState.IN_PROGRESS
retry = True
try:
self._finish_transaction_with_retry("commitTransaction", retry)
except ConnectionFailure as exc:
exc._remove_error_label("TransientTransactionError")
_reraise_with_unknown_commit(exc)
except WTimeoutError as exc:
_reraise_with_unknown_commit(exc)
except OperationFailure as exc:
if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES:
raise
_reraise_with_unknown_commit(exc)
finally:
self._transaction.state = _TxnState.COMMITTED | Commit a multi-statement transaction.
.. versionadded:: 3.7 |
def subjects(self):
for sms in SetMemberSubject.where(subject_set_id=self.id):
yield sms.links.subject | A generator which yields :py:class:`.Subject` objects which are in this
subject set.
Examples::
for subject in subject_set.subjects:
print(subject.id) |
def gps_velocity_body(GPS_RAW_INT, ATTITUDE):
r = rotation(ATTITUDE)
return r.transposed() * Vector3(GPS_RAW_INT.vel*0.01*cos(radians(GPS_RAW_INT.cog*0.01)),
GPS_RAW_INT.vel*0.01*sin(radians(GPS_RAW_INT.cog*0.01)),
-tan(ATTITUDE.pitch)*GPS_RAW_INT.vel*0.01) | return GPS velocity vector in body frame |
async def handle_job_closing(self, container_id, retval):
try:
self._logger.debug("Closing %s", container_id)
try:
message, container_path, future_results = self._containers_running[container_id]
del self._containers_running[container_id]
except asyncio.CancelledError:
raise
except:
self._logger.warning("Container %s that has finished(p1) was not launched by this agent", str(container_id), exc_info=True)
return
for student_container_id_loop in self._student_containers_for_job[message.job_id]:
async def close_and_delete(student_container_id=student_container_id_loop):
try:
await self._docker.kill_container(student_container_id)
await self._docker.remove_container(student_container_id)
except asyncio.CancelledError:
raise
except:
pass
self._create_safe_task(close_and_delete(student_container_id_loop))
del self._student_containers_for_job[message.job_id]
if container_id in self._assigned_external_ports:
for p in self._assigned_external_ports[container_id]:
self._external_ports.add(p)
del self._assigned_external_ports[container_id]
killed = await self._timeout_watcher.was_killed(container_id)
if container_id in self._containers_killed:
killed = self._containers_killed[container_id]
del self._containers_killed[container_id]
stdout = ""
stderr = ""
result = "crash" if retval == -1 else None
error_msg = None
grade = None
problems = {}
custom = {}
tests = {}
archive = None
state = ""
if killed is not None:
result = killed
if result is None:
try:
return_value = await future_results
accepted_types = {"stdout": str, "stderr": str, "result": str, "text": str, "grade": float,
"problems": dict, "custom": dict, "tests": dict, "state": str, "archive": str}
keys_fct = {"problems": id_checker, "custom": id_checker, "tests": id_checker_tests}
for key, item in return_value.items():
if not isinstance(item, accepted_types[key]):
raise Exception("Feedback file is badly formatted.")
elif accepted_types[key] == dict and key != "custom":
for sub_key, sub_item in item.items():
if not keys_fct[key](sub_key) or isinstance(sub_item, dict):
raise Exception("Feedback file is badly formatted.")
stdout = return_value.get("stdout", "")
stderr = return_value.get("stderr", "")
result = return_value.get("result", "error")
error_msg = return_value.get("text", "")
grade = return_value.get("grade", None)
problems = return_value.get("problems", {})
custom = return_value.get("custom", {})
tests = return_value.get("tests", {})
state = return_value.get("state", "")
archive = return_value.get("archive", None)
if archive is not None:
archive = base64.b64decode(archive)
except Exception as e:
self._logger.exception("Cannot get back output of container %s! (%s)", container_id, str(e))
result = "crash"
error_msg = .format(str(e))
if error_msg is None:
error_msg = ""
if grade is None:
if result == "success":
grade = 100.0
else:
grade = 0.0
try:
await self._docker.remove_container(container_id)
except asyncio.CancelledError:
raise
except:
pass
try:
await self._ashutil.rmtree(container_path)
except PermissionError:
self._logger.debug("Cannot remove old container path!")
pass
await self.send_job_result(message.job_id, result, error_msg, grade, problems, tests, custom, state, archive, stdout, stderr)
del self._container_for_job[message.job_id]
except asyncio.CancelledError:
raise
except:
self._logger.exception("Exception in handle_job_closing") | Handle a closing student container. Do some cleaning, verify memory limits, timeouts, ... and returns data to the backend |
def resize(image, width=None, height=None, crop=False):
width, height, crop = _normalize_params(image, width, height, crop)
try:
is_closed = image.closed
if is_closed:
image.open()
resized_image = _resize(image, width, height, crop)
finally:
if is_closed:
image.close()
return ImageFile(resized_image) | Resize an image and return the resized file. |
def set(self, client_id, code, request, *args, **kwargs):
expires = datetime.utcnow() + timedelta(seconds=100)
grant = self.model(
client_id=request.client.client_id,
code=code[],
redirect_uri=request.redirect_uri,
scope=.join(request.scopes),
user=self.current_user(),
expires=expires
)
self.session.add(grant)
self.session.commit() | Creates Grant object with the given params
:param client_id: ID of the client
:param code:
:param request: OAuthlib request object |
def find_asm_blocks(asm_lines):
blocks = []
last_labels = OrderedDict()
packed_ctr = 0
avx_ctr = 0
xmm_references = []
ymm_references = []
zmm_references = []
gp_references = []
mem_references = []
increments = {}
for i, line in enumerate(asm_lines):
zmm_references += re.findall(, line)
ymm_references += re.findall(, line)
xmm_references += re.findall(, line)
gp_references += re.findall(, line)
if re.search(r, line):
m = re.search(r
r,
line)
mem_references.append((
int(m.group()) if m.group() else 0,
m.group(),
m.group(),
int(m.group()) if m.group() else 1,
if m.group() is None else ))
if re.match(r"^[v]?(mul|add|sub|div|fmadd(132|213|231)?)[h]?p[ds]", line):
if line.startswith():
avx_ctr += 1
packed_ctr += 1
elif re.match(r, line):
last_labels[line[0:line.find()]] =i
packed_ctr = 0
avx_ctr = 0
xmm_references = []
ymm_references = []
zmm_references = []
gp_references = []
mem_references = []
increments = {}
elif re.match(r, line):
reg_start = line.find() + 1
increments[line[reg_start:]] = 1
elif re.match(r, line):
const_start = line.find() + 1
const_end = line[const_start + 1:].find() + const_start + 1
reg_start = line.find() + 1
increments[line[reg_start:]] = int(line[const_start:const_end])
elif re.match(r, line):
reg_start = line.find() + 1
increments[line[reg_start:]] = -1
elif re.match(r, line):
const_start = line.find() + 1
const_end = line[const_start + 1:].find() + const_start + 1
reg_start = line.find() + 1
increments[line[reg_start:]] = -int(line[const_start:const_end])
elif last_labels and re.match(r, line):
last_label = None
last_label_line = -1
for label_name, label_line in last_labels.items():
if re.match(r + re.escape(label_name) + r, line):
last_label = label_name
last_label_line = label_line
labels = list(last_labels.keys())
if last_label:
pointer_increment = None
possible_idx_regs = None
if mem_references:
store_references = [mref for mref in mem_references
if mref[4] == ]
refs = store_references or mem_references
possible_idx_regs = list(set(increments.keys()).intersection(
set([r[1] for r in refs if r[1] is not None] +
[r[2] for r in refs if r[2] is not None])))
for mref in refs:
for reg in list(possible_idx_regs):
if None not in mref[1:3]:
if not (reg == mref[1] or reg == mref[2]):
possible_idx_regs.remove(reg)
idx_reg = None
if len(possible_idx_regs) == 1:
idx_reg = possible_idx_regs[0]
elif possible_idx_regs and itemsEqual([increments[pidxreg]
for pidxreg in possible_idx_regs]):
idx_reg = possible_idx_regs[0]
if idx_reg:
mem_scales = [mref[3] for mref in refs
if idx_reg == mref[2] or idx_reg == mref[1]]
if itemsEqual(mem_scales):
try:
pointer_increment = mem_scales[0] * increments[idx_reg]
except:
print("labels", pformat(labels[labels.index(last_label):]))
print("lines", pformat(asm_lines[last_label_line:i + 1]))
print("increments", increments)
print("mem_references", pformat(mem_references))
print("idx_reg", idx_reg)
print("mem_scales", mem_scales)
raise
blocks.append({: last_label_line,
: i,
: i - last_label_line,
: labels[labels.index(last_label):],
: packed_ctr,
: avx_ctr,
: (len(xmm_references), len(set(xmm_references))),
: (len(ymm_references), len(set(ymm_references))),
: (len(zmm_references), len(set(zmm_references))),
: (len(gp_references), len(set(gp_references))),
: (len(xmm_references) + len(ymm_references) +
len(zmm_references) + len(gp_references),
len(set(xmm_references)) + len(set(ymm_references)) +
len(set(zmm_references)) +
len(set(gp_references))),
: pointer_increment,
: asm_lines[last_label_line:i + 1],
: possible_idx_regs,
: mem_references,
: increments, })
packed_ctr = 0
avx_ctr = 0
xmm_references = []
ymm_references = []
zmm_references = []
gp_references = []
mem_references = []
increments = {}
last_labels = OrderedDict()
return list(enumerate(blocks)) | Find blocks probably corresponding to loops in assembly. |
def get_multiple_data():
all_labs = {}
all_labs["diybio_org"] = diybio_org.get_labs(format="dict")
all_labs["fablabs_io"] = fablabs_io.get_labs(format="dict")
all_labs["makeinitaly_foundation"] = makeinitaly_foundation.get_labs(
format="dict")
all_labs["hackaday_io"] = hackaday_io.get_labs(format="dict")
all_labs["hackerspaces_org"] = hackerspaces_org.get_labs(format="dict")
all_labs["makery_info"] = makery_info.get_labs(format="dict")
all_labs["nesta"] = nesta.get_labs(format="dict")
return all_labs | Get data from all the platforms listed in makerlabs. |
def check_physical(self, line):
self.physical_line = line
for name, check, argument_names in self._physical_checks:
self.init_checker_state(name, argument_names)
result = self.run_check(check, argument_names)
if result is not None:
(offset, text) = result
self.report_error(self.line_number, offset, text, check)
if text[:4] == :
self.indent_char = line[0] | Run all physical checks on a raw input line. |
def sign_key(self, keyid, default_key=None, passphrase=None):
args = []
input_command = ""
if passphrase:
passphrase_arg = "--passphrase-fd 0"
input_command = "%s\n" % passphrase
args.append(passphrase_arg)
if default_key:
args.append(str("--default-key %s" % default_key))
args.extend(["--command-fd 0", "--sign-key %s" % keyid])
p = self._open_subprocess(args)
result = self._result_map[](self)
confirm_command = "%sy\n" % input_command
p.stdin.write(b(confirm_command))
self._collect_output(p, result, stdin=p.stdin)
return result | sign (an imported) public key - keyid, with default secret key
>>> import gnupg
>>> gpg = gnupg.GPG(homedir="doctests")
>>> key_input = gpg.gen_key_input()
>>> key = gpg.gen_key(key_input)
>>> gpg.sign_key(key['fingerprint'])
>>> gpg.list_sigs(key['fingerprint'])
:param str keyid: key shortID, longID, fingerprint or email_address
:param str passphrase: passphrase used when creating the key, leave None otherwise
:returns: The result giving status of the key signing...
success can be verified by gpg.list_sigs(keyid) |
def steal_docstring_from(obj):
def deco(fn):
docs = [obj.__doc__]
if fn.__doc__:
docs.append(fn.__doc__)
fn.__doc__ = .join(docs)
return fn
return deco | Decorator that lets you steal a docstring from another object
Example
-------
::
@steal_docstring_from(superclass.meth)
def meth(self, arg):
"Extra subclass documentation"
pass
In this case the docstring of the new 'meth' will be copied from superclass.meth, and
if an additional dosctring was defined for meth it will be appended to the superclass
docstring with a two newlines inbetween. |
def add_manager(model):
if model._meta.abstract:
return
model._meta.local_managers = model._meta.managers
for current_manager in model._meta.local_managers:
prev_class = current_manager.__class__
patch_manager_class(current_manager)
if model._default_manager.__class__ is prev_class:
model._meta._expire_cache() | Monkey patches the original model to use MultilingualManager instead of
default managers (not only ``objects``, but also every manager defined and inherited).
Custom managers are merged with MultilingualManager. |
def count_relations(graph) -> Counter:
return Counter(
data[RELATION]
for _, _, data in graph.edges(data=True)
) | Return a histogram over all relationships in a graph.
:param pybel.BELGraph graph: A BEL graph
:return: A Counter from {relation type: frequency} |
def _get_query_argument(args, cell, env):
sql_arg = args.get(, None)
if sql_arg is None:
if not isinstance(cell, basestring):
raise Exception()
return bigquery.Query(cell, env=env)
item = google.datalab.utils.commands.get_notebook_item(sql_arg)
if isinstance(item, bigquery.Query):
return item
else:
raise Exception( % type(item)) | Get a query argument to a cell magic.
The query is specified with args['query']. We look that up and if it is a BQ query
object, just return it. If it is a string, build a query object out of it and return
that
Args:
args: the dictionary of magic arguments.
cell: the cell contents which can be variable value overrides (if args has a 'query'
value) or inline SQL otherwise.
env: a dictionary that is used for looking up variable values.
Returns:
A Query object. |
def freqz_cas(sos,w):
Ns,Mcol = sos.shape
w,Hcas = signal.freqz(sos[0,:3],sos[0,3:],w)
for k in range(1,Ns):
w,Htemp = signal.freqz(sos[k,:3],sos[k,3:],w)
Hcas *= Htemp
return w, Hcas | Cascade frequency response
Mark Wickert October 2016 |
def select_labels(self, labels=None):
self._resize_if_required()
segmentation = self._select_labels(self.resized_segmentation, labels)
self.resized_binar_segmentation = segmentation | Prepare binar segmentation based on input segmentation and labels.
:param labels:
:return: |
def family_name(self):
def find_first(dict_, keys, default=None):
for key in keys:
value = dict_.get(key)
if value is not None:
return value
return default
return find_first(self._names, ((0, 1), (1, 1), (3, 1))) | The name of the typeface family for this font, e.g. 'Arial'. |
async def play(self, ctx, *, query):
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))
ctx.voice_client.play(source, after=lambda e: print( % e) if e else None)
await ctx.send(.format(query)) | Plays a file from the local filesystem |
def _find_reader_dataset(self, dataset_key, **dfilter):
too_many = False
for reader_name, reader_instance in self.readers.items():
try:
ds_id = reader_instance.get_dataset_key(dataset_key, **dfilter)
except TooManyResults:
LOG.trace("Too many datasets matching key {} in reader {}".format(dataset_key, reader_name))
too_many = True
continue
except KeyError:
LOG.trace("Cant created a node yet, create it now
return Node(ds_id, {: reader_name})
if too_many:
raise TooManyResults("Too many keys matching: {}".format(dataset_key)) | Attempt to find a `DatasetID` in the available readers.
Args:
dataset_key (str, float, DatasetID):
Dataset name, wavelength, or a combination of `DatasetID`
parameters to use in searching for the dataset from the
available readers.
**dfilter (list or str): `DatasetID` parameters besides `name`
and `wavelength` to use to filter the
available datasets. Passed directly to
`get_dataset_key` of the readers, see
that method for more information. |
def fetch_points_of_sales(self, ticket=None):
ticket = ticket or self.get_or_create_ticket()
client = clients.get_client(, self.is_sandboxed)
response = client.service.FEParamGetPtosVenta(
serializers.serialize_ticket(ticket),
)
check_response(response)
results = []
for pos_data in response.ResultGet.PtoVenta:
results.append(PointOfSales.objects.update_or_create(
number=pos_data.Nro,
issuance_type=pos_data.EmisionTipo,
owner=self,
defaults={
: pos_data.Bloqueado == ,
: parsers.parse_date(pos_data.FchBaja),
}
))
return results | Fetch all point of sales objects.
Fetch all point of sales from the WS and store (or update) them
locally.
Returns a list of tuples with the format (pos, created,). |
def check_basic_auth(self, username, password):
valid = self.users.check_password(
username, password
)
if not valid:
log.warning(, username)
valid = False
return (
valid,
username
) | This function is called to check if a username /
password combination is valid via the htpasswd file. |
def clear_all(self):
"Remove all items and column headings"
self.clear()
for ch in reversed(self.columns):
del self[ch.name] | Remove all items and column headings |
def ensure_int64_or_float64(arr, copy=False):
try:
return arr.astype(, copy=copy, casting=)
except TypeError:
return arr.astype(, copy=copy) | Ensure that an dtype array of some integer dtype
has an int64 dtype if possible
If it's not possible, potentially because of overflow,
convert the array to float64 instead.
Parameters
----------
arr : array-like
The array whose data type we want to enforce.
copy: boolean
Whether to copy the original array or reuse
it in place, if possible.
Returns
-------
out_arr : The input array cast as int64 if
possible without overflow.
Otherwise the input array cast to float64. |
def _create_with_scope(body, kwargs):
return ast.With(
items=[
ast.withitem(
context_expr=_a.Call(
_a.Name(),
[_a.Name()],
keywords=kwargs,
),
optional_vars=_a.Name(, ctx=ast.Store())
),
],
body=body,
) | Helper function to wrap a block in a scope stack:
with ContextScope(context, **kwargs) as context:
... body ... |
def create_reader(name, *args, format=None, registry=default_registry, **kwargs):
return registry.get_reader_factory_for(name, format=format)(name, *args, **kwargs) | Create a reader instance, guessing its factory using filename (and eventually format).
:param name:
:param args:
:param format:
:param registry:
:param kwargs:
:return: mixed |
def init(FILE):
try:
cfg.read(FILE)
global _loaded
_loaded = True
except:
file_not_found_message(FILE) | Read config file
:param FILE: Absolute path to config file (incl. filename)
:type FILE: str |
def after_third_friday(day=None):
day = day if day is not None else datetime.datetime.now()
now = day.replace(day=1, hour=16, minute=0, second=0, microsecond=0)
now += relativedelta.relativedelta(weeks=2, weekday=relativedelta.FR)
return day > now | check if day is after month's 3rd friday |
def get_weights(self):
self._check_sess()
return {
k: v.eval(session=self.sess)
for k, v in self.variables.items()
} | Returns a dictionary containing the weights of the network.
Returns:
Dictionary mapping variable names to their weights. |
def join_keys(x, y, by=None):
if by is None:
by = slice(None, None, None)
if isinstance(by, tuple):
by = list(by)
joint = x[by].append(y[by], ignore_index=True)
keys = ninteraction(joint, drop=True)
keys = np.asarray(keys)
nx, ny = len(x), len(y)
return {: keys[np.arange(nx)],
: keys[nx + np.arange(ny)]} | Join keys.
Given two data frames, create a unique key for each row.
Parameters
-----------
x : dataframe
y : dataframe
by : list-like
Column names to join by
Returns
-------
out : dict
Dictionary with keys x and y. The values of both keys
are arrays with integer elements. Identical rows in
x and y dataframes would have the same key in the
output. The key elements start at 1. |
def datagram_received(self, data, addr):
response = unpack_lifx_message(data)
response.ip_addr = addr[0]
mac_addr = response.target_addr
if mac_addr == BROADCAST_MAC:
return
if type(response) == StateService and response.service == 1:
remote_port = response.port
elif type(response) == LightState:
remote_port = UDP_BROADCAST_PORT
else:
return
if self.ipv6prefix:
family = socket.AF_INET6
remote_ip = mac_to_ipv6_linklocal(mac_addr, self.ipv6prefix)
else:
family = socket.AF_INET
remote_ip = response.ip_addr
if mac_addr in self.lights:
light = self.lights[mac_addr]
if light.registered:
return
light.cleanup()
light.ip_addr = remote_ip
light.port = remote_port
else:
light = Light(self.loop, mac_addr, remote_ip, remote_port, parent=self)
self.lights[mac_addr] = light
coro = self.loop.create_datagram_endpoint(
lambda: light, family=family, remote_addr=(remote_ip, remote_port))
light.task = self.loop.create_task(coro) | Method run when data is received from the devices
This method will unpack the data according to the LIFX protocol.
If a new device is found, the Light device will be created and started aa
a DatagramProtocol and will be registered with the parent.
:param data: raw data
:type data: bytestring
:param addr: sender IP address 2-tuple for IPv4, 4-tuple for IPv6
:type addr: tuple |
def _get_canonical_map(self):
cacheattr = "_token2canonical"
if not hasattr(self, cacheattr):
token2canonical = {}
cmd2funcname = {}
for attr in self.get_names():
if attr.startswith("do_"): cmdname = attr[3:]
elif attr.startswith("_do_"): cmdname = attr[4:]
else:
continue
cmd2funcname[cmdname] = attr
token2canonical[cmdname] = cmdname
for cmdname, funcname in cmd2funcname.items():
func = getattr(self, funcname)
aliases = getattr(func, "aliases", [])
for alias in aliases:
if alias in cmd2funcname:
import warnings
warnings.warn(" alias for command conflicts "
"with handler" %
(alias, cmdname, cmd2funcname[alias]))
continue
token2canonical[alias] = cmdname
setattr(self, cacheattr, token2canonical)
return getattr(self, cacheattr) | Return a mapping of available command names and aliases to
their canonical command name. |
def iter_symbols(code):
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, six.string_types):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name | Yield names and strings used by `code` and its nested code objects |
def get_port_chann_detail_request(last_aggregator_id):
port_channel_ns =
request_port_channel = ET.Element(,
xmlns=port_channel_ns)
if last_aggregator_id != :
last_received_port_chann_el = ET.SubElement(request_port_channel,
"last-aggregator-id")
last_received_port_chann_el.text = last_aggregator_id
return request_port_channel | Creates a new Netconf request based on the last received
aggregator id when the hasMore flag is true |
def thermal_conductivity(self, temperature, volume):
gamma = self.gruneisen_parameter(temperature, volume)
theta_d = self.debye_temperature(volume)
theta_a = theta_d * self.natoms**(-1./3.)
prefactor = (0.849 * 3 * 4**(1./3.)) / (20. * np.pi**3)
prefactor = prefactor * (self.kb/self.hbar)**3 * self.avg_mass
kappa = prefactor / (gamma**2 - 0.514 * gamma + 0.228)
kappa = kappa * theta_a**2 * volume**(1./3.) * 1e-10
return kappa | Eq(17) in 10.1103/PhysRevB.90.174107
Args:
temperature (float): temperature in K
volume (float): in Ang^3
Returns:
float: thermal conductivity in W/K/m |
def _UnpackGdbVal(self, gdb_value):
val_type = gdb_value.type.code
if val_type == gdb.TYPE_CODE_INT or val_type == gdb.TYPE_CODE_ENUM:
return int(gdb_value)
if val_type == gdb.TYPE_CODE_VOID:
return None
if val_type == gdb.TYPE_CODE_PTR:
return long(gdb_value)
if val_type == gdb.TYPE_CODE_ARRAY:
return str(gdb_value)
return str(gdb_value) | Unpacks gdb.Value objects and returns the best-matched python object. |
def pygal_parser(preprocessor, tag, markup):
data = loads(markup)
if tag == and data is not None:
output = run_pygal(data)
return % output
else:
raise ValueError(.format(SYNTAX)) | Simple pygal parser |
def remove(self, node, dirty=True):
if node.id in self._children:
self._children[node.id].parent = None
del self._children[node.id]
if dirty:
self.touch() | Remove the given child node.
Args:
node (gkeepapi.Node): Node to remove.
dirty (bool): Whether this node should be marked dirty. |
def compile_template_str(template, renderers, default, blacklist, whitelist):
fn_ = salt.utils.files.mkstemp()
with salt.utils.files.fopen(fn_, ) as ofile:
ofile.write(SLS_ENCODER(template)[0])
return compile_template(fn_, renderers, default, blacklist, whitelist) | Take template as a string and return the high data structure
derived from the template. |
def _auto_commit(self):
if not self.auto_commit or self.auto_commit_every_n is None:
return
if self.count_since_commit >= self.auto_commit_every_n:
self.commit() | Check if we have to commit based on number of messages and commit |
def delete_process_behavior(self, process_id, behavior_ref_name):
route_values = {}
if process_id is not None:
route_values[] = self._serialize.url(, process_id, )
if behavior_ref_name is not None:
route_values[] = self._serialize.url(, behavior_ref_name, )
self._send(http_method=,
location_id=,
version=,
route_values=route_values) | DeleteProcessBehavior.
[Preview API] Removes a behavior in the process.
:param str process_id: The ID of the process
:param str behavior_ref_name: The reference name of the behavior |
def match(line, keyword):
line = line.lstrip()
length = len(keyword)
if line[:length] == keyword:
return line[length:]
else:
if keyword in line:
return line[line.index(keyword):]
else:
return None | If the first part of line (modulo blanks) matches keyword,
returns the end of that line. Otherwise checks if keyword is
anywhere in the line and returns that section, else returns None |
def auth_access(self, auth_code):
data = {
: self.client_id,
: self.client_secret,
: ,
: auth_code,
: self.redirect_url
}
return self.request("post", "access_token", data=data) | verify the fist authorization response url code
response data
返回值字段 字段类型 字段说明
access_token string 用户授权的唯一票据,用于调用微博的开放接口,同时也是第三方应用验证微博用户登录的唯一票据,
第三方应用应该用该票据和自己应用内的用户建立唯一影射关系,来识别登录状态,不能使用本返回值里的UID
字段来做登录识别。
expires_in string access_token的生命周期,单位是秒数。
remind_in string access_token的生命周期(该参数即将废弃,开发者请使用expires_in)。
uid string 授权用户的UID,本字段只是为了方便开发者,减少一次user/show接口调用而返回的,第三方应用不能用此字段作为用户
登录状态的识别,只有access_token才是用户授权的唯一票据。
:param auth_code: authorize_url response code
:return:
normal:
{
"access_token": "ACCESS_TOKEN",
"expires_in": 1234,
"remind_in":"798114",
"uid":"12341234"
}
mobile:
{
"access_token": "SlAV32hkKG",
"remind_in": 3600,
"expires_in": 3600
"refresh_token": "QXBK19xm62"
} |
def f_get_parent(self):
if self.v_is_root:
raise TypeError()
elif self.v_location == :
return self.v_root
else:
return self.v_root.f_get(self.v_location, fast_access=False, shortcuts=False) | Returns the parent of the node.
Raises a TypeError if current node is root. |
def merge_ticket(self, ticket_id, into_id):
msg = self.__request(.format(str(ticket_id),
str(into_id)))
state = msg.split()[2]
return self.RE_PATTERNS[].match(state) is not None | Merge ticket into another (undocumented API feature).
:param ticket_id: ID of ticket to be merged
:param into: ID of destination ticket
:returns: ``True``
Operation was successful
``False``
Either origin or destination ticket does not
exist or user does not have ModifyTicket permission. |
def clear_created_date(self):
if (self.get_created_date_metadata().is_read_only() or
self.get_created_date_metadata().is_required()):
raise errors.NoAccess()
self._my_map[] = self._created_date_default | Removes the created date.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* |
def fake_chars_or_choice(self, field_name):
return self.djipsum_fields().randomCharField(
self.model_class(),
field_name=field_name
) | Return fake chars or choice it if the `field_name` has choices.
Then, returning random value from it.
This specially for `CharField`.
Usage:
faker.fake_chars_or_choice('field_name')
Example for field:
TYPE_CHOICES = (
('project', 'I wanna to talk about project'),
('feedback', 'I want to report a bugs or give feedback'),
('hello', 'I just want to say hello')
)
type = models.CharField(max_length=200, choices=TYPE_CHOICES) |
def owned_pre_save(sender, document, **kwargs):
if not isinstance(document, Owned):
return
changed_fields = getattr(document, , [])
if in changed_fields:
if document.owner:
document._previous_owner = document.owner
document.owner = None
else:
original = sender.objects.only().get(pk=document.pk)
document._previous_owner = original.organization
elif in changed_fields:
if document.organization:
document._previous_owner = document.organization
document.organization = None
else:
original = sender.objects.only().get(pk=document.pk)
document._previous_owner = original.owner | Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it. |
def list_app(self):
kwd = {
: ,
:
}
self.render(, kwd=kwd,
userinfo=self.userinfo) | List the apps. |
def scan_index(index, model):
query = {"query": {"type": {"value": model._meta.model_name}}}
client = get_client()
for hit in helpers.scan(client, index=index, query=query):
yield hit | Yield all documents of model type in an index.
This function calls the elasticsearch.helpers.scan function,
and yields all the documents in the index that match the doc_type
produced by a specific Django model.
Args:
index: string, the name of the index to scan, must be a configured
index as returned from settings.get_index_names.
model: a Django model type, used to filter the the documents that
are scanned.
Yields each document of type model in index, one at a time. |
def shared_atts(self):
atts = {}
first = self.chunks[0]
for att in sorted(first.atts):
if all(fs.atts.get(att, ) == first.atts[att] for fs in self.chunks if len(fs) > 0):
atts[att] = first.atts[att]
return atts | Gets atts shared among all nonzero length component Chunk |
def build_srcdict(gta, prop):
o = {}
for s in gta.roi.sources:
o[s.name] = s[prop]
return o | Build a dictionary that maps from source name to the value of a source property
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
prop : str
The name of the property we are mapping
Returns
-------
odict : dict
Dictionary that maps from source name to the value of the specified property |
def parse_san(self, board: chess.Board, san: str) -> chess.Move:
if san == "0-0":
san = "O-O"
elif san == "0-0-0":
san = "O-O-O"
return board.parse_san(san) | When the visitor is used by a parser, this is called to parse a move
in standard algebraic notation.
You can override the default implementation to work around specific
quirks of your input format. |
def visit_ifexp(self, node):
return "%s if %s else %s" % (
self._precedence_parens(node, node.body, is_left=True),
self._precedence_parens(node, node.test, is_left=True),
self._precedence_parens(node, node.orelse, is_left=False),
) | return an astroid.IfExp node as string |
def p_if_then_part(p):
if is_number(p[2]):
api.errmsg.warning_condition_is_always(p.lineno(1), bool(p[2].value))
p[0] = p[2] | if_then_part : IF expr then |
def validate_model_parameters(self, algo, training_frame, parameters, timeoutSecs=60, **kwargs):
assert algo is not None,
assert parameters is not None,
model_builders = self.model_builders(timeoutSecs=timeoutSecs)
assert model_builders is not None, "/ModelBuilders REST call failed"
assert algo in model_builders[]
builder = model_builders[][algo]
timeout=timeoutSecs, postData=parameters, ignoreH2oError=True, noExtraErrorCheck=True)
verboseprint("model parameters validation: " + repr(result))
return result | Check a dictionary of model builder parameters on the h2o cluster
using the given algorithm and model parameters. |
def is_present(conf, atom):
*
if conf in SUPPORTED_CONFS:
if not isinstance(atom, portage.dep.Atom):
atom = portage.dep.Atom(atom, allow_wildcard=True)
has_wildcard = in atom
package_file = _get_config_file(conf, six.text_type(atom))
if has_wildcard:
match_list = set(atom)
else:
match_list = set(_porttree().dbapi.xmatch("match-all", atom))
try:
with salt.utils.files.fopen(package_file) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line).strip()
line_package = line.split()[0]
if has_wildcard:
if line_package == six.text_type(atom):
return True
else:
line_list = _porttree().dbapi.xmatch("match-all", line_package)
if match_list.issubset(line_list):
return True
except IOError:
pass
return False | Tell if a given package or DEPEND atom is present in the configuration
files tree.
Warning: This only works if the configuration files tree is in the correct
format (the one enforced by enforce_nice_config)
CLI Example:
.. code-block:: bash
salt '*' portage_config.is_present unmask salt |
def _split_comment(lineno, comment):
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())] | Return the multiline comment at lineno split into a list of
comment line numbers and the accompanying comment line |
def get_colorscheme(self, scheme_file):
scheme = get_yaml_dict(scheme_file)
scheme_slug = builder.slugify(scheme_file)
builder.format_scheme(scheme, scheme_slug)
try:
temp_base, temp_sub = self.temp.split()
except ValueError:
temp_base, temp_sub = (self.temp.strip(), )
temp_path = rel_to_cwd(, temp_base)
temp_group = builder.TemplateGroup(temp_path)
try:
single_temp = temp_group.templates[temp_sub]
except KeyError:
raise FileNotFoundError(None,
None,
self.path + )
colorscheme = pystache.render(single_temp[], scheme)
return colorscheme | Return a string object with the colorscheme that is to be
inserted. |
def in_network(scope, prefixes, destination, default_pfxlen=[24]):
needle = ipv4.ip2int(destination[0])
for prefix in prefixes:
network, pfxlen = ipv4.parse_prefix(prefix, default_pfxlen[0])
mask = ipv4.pfxlen2mask_int(pfxlen)
if needle & mask == ipv4.ip2int(network) & mask:
return [True]
return [False] | Returns True if the given destination is in the network range that is
defined by the given prefix (e.g. 10.0.0.1/22). If the given prefix
does not have a prefix length specified, the given default prefix length
is applied. If no such prefix length is given, the default length is
/24.
If a list of prefixes is passed, this function returns True only if
the given destination is in ANY of the given prefixes.
:type prefixes: string
:param prefixes: A prefix, or a list of IP prefixes.
:type destination: string
:param destination: An IP address.
:type default_pfxlen: int
:param default_pfxlen: The default prefix length.
:rtype: True
:return: Whether the given destination is in the given network. |
def post_dissection(self, m):
s = self.tls_session
if s.client_kx_ffdh_params:
y = pkcs_os2ip(self.dh_Yc)
param_numbers = s.client_kx_ffdh_params.parameter_numbers()
public_numbers = dh.DHPublicNumbers(y, param_numbers)
s.client_kx_pubkey = public_numbers.public_key(default_backend())
if s.server_kx_privkey and s.client_kx_pubkey:
ZZ = s.server_kx_privkey.exchange(s.client_kx_pubkey)
s.pre_master_secret = ZZ
s.compute_ms_and_derive_keys() | First we update the client DHParams. Then, we try to update the server
DHParams generated during Server*DHParams building, with the shared
secret. Finally, we derive the session keys and update the context. |
def I(self):
r
return list(set(range(self.nstates)) - set(self._A) - set(self._B)) | r"""Returns the set of intermediate states |
def time_to_hhmmssmmm(time_value, decimal_separator="."):
if time_value is None:
time_value = 0
tmp = time_value
hours = int(math.floor(tmp / 3600))
tmp -= (hours * 3600)
minutes = int(math.floor(tmp / 60))
tmp -= minutes * 60
seconds = int(math.floor(tmp))
tmp -= seconds
milliseconds = int(math.floor(tmp * 1000))
return "%02d:%02d:%02d%s%03d" % (
hours,
minutes,
seconds,
decimal_separator,
milliseconds
) | Format the given time value into a ``HH:MM:SS.mmm`` string.
Examples: ::
12 => 00:00:12.000
12.345 => 00:00:12.345
12.345432 => 00:00:12.345
12.345678 => 00:00:12.346
83 => 00:01:23.000
83.456 => 00:01:23.456
83.456789 => 00:01:23.456
3600 => 01:00:00.000
3612.345 => 01:00:12.345
:param float time_value: a time value, in seconds
:param string decimal_separator: the decimal separator, default ``.``
:rtype: string |
def main(demo=False, aschild=False, targets=[]):
if aschild:
print("Starting pyblish-qml")
compat.main()
app = Application(APP_PATH, targets)
app.listen()
print("Done, don't forget to call `show()`")
return app.exec_()
else:
print("Starting pyblish-qml server..")
service = ipc.service.MockService() if demo else ipc.service.Service()
server = ipc.server.Server(service, targets=targets)
proxy = ipc.server.Proxy(server)
proxy.show(settings.to_dict())
server.listen()
server.wait() | Start the Qt-runtime and show the window
Arguments:
aschild (bool, optional): Run as child of parent process |
def list_servers(self, datacenter_id, depth=1):
response = self._perform_request(
% (datacenter_id, str(depth)))
return response | Retrieves a list of all servers bound to the specified data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int`` |
def conv_ast_to_sym(self, math_ast):
if type(math_ast) is c_ast.ID:
return symbol_pos_int(math_ast.name)
elif type(math_ast) is c_ast.Constant:
return sympy.Integer(math_ast.value)
else:
op = {
: operator.mul,
: operator.add,
: operator.sub
}
return op[math_ast.op](
self.conv_ast_to_sym(math_ast.left),
self.conv_ast_to_sym(math_ast.right)) | Convert mathematical expressions to a sympy representation.
May only contain paranthesis, addition, subtraction and multiplication from AST. |
def createSpatialAnchorFromDescriptor(self, pchDescriptor):
fn = self.function_table.createSpatialAnchorFromDescriptor
pHandleOut = SpatialAnchorHandle_t()
result = fn(pchDescriptor, byref(pHandleOut))
return result, pHandleOut | Returns a handle for an spatial anchor described by "descriptor". On success, pHandle
will contain a handle valid for this session. Caller can wait for an event or occasionally
poll GetSpatialAnchorPose() to find the virtual coordinate associated with this anchor. |
def setStyles(self, styleUpdatesDict):
setStyleMethod = self.setStyle
for newName, newValue in styleUpdatesDict.items():
setStyleMethod(newName, newValue)
return self.style | setStyles - Sets one or more style params.
This all happens in one shot, so it is much much faster than calling setStyle for every value.
To remove a style, set its value to empty string.
When all styles are removed, the "style" attribute will be nullified.
@param styleUpdatesDict - Dictionary of attribute : value styles.
@return - String of current value of "style" after change is made. |
def HardwareInput(uMsg: int, param: int = 0) -> INPUT:
return _CreateInput(HARDWAREINPUT(uMsg, param & 0xFFFF, param >> 16 & 0xFFFF)) | Create Win32 struct `HARDWAREINPUT` for `SendInput`. |
def dlafns(handle, descr):
assert isinstance(descr, stypes.SpiceDLADescr)
handle = ctypes.c_int(handle)
nxtdsc = stypes.SpiceDLADescr()
found = ctypes.c_int()
libspice.dlafns_c(handle, ctypes.byref(descr), ctypes.byref(nxtdsc), ctypes.byref(found))
return nxtdsc, bool(found.value) | Find the segment following a specified segment in a DLA file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dlafns_c.html
:param handle: Handle of open DLA file.
:type handle: c_int
:param descr: Descriptor of a DLA segment.
:type descr: spiceypy.utils.support_types.SpiceDLADescr
:return: Descriptor of next segment in DLA file
:rtype: spiceypy.utils.support_types.SpiceDLADescr |
def _get_modules_map(self, path=None):
paths = {}
root = ansible.modules.__path__[0]
if not path:
path = root
for p_el in os.listdir(path):
p_el_path = os.path.join(path, p_el)
if os.path.islink(p_el_path):
continue
if os.path.isdir(p_el_path):
paths.update(self._get_modules_map(p_el_path))
else:
if (any(p_el.startswith(elm) for elm in [, ]) or
not p_el.endswith() or
p_el in ansible.constants.IGNORE_FILES):
continue
p_el_path = p_el_path.replace(root, ).split()[0]
als_name = p_el_path.replace(, ).replace(, , 1).replace(, )
paths[als_name] = p_el_path
return paths | Get installed Ansible modules
:return: |
def authorized_connect_apps(self):
if self._authorized_connect_apps is None:
self._authorized_connect_apps = AuthorizedConnectAppList(
self._version,
account_sid=self._solution[],
)
return self._authorized_connect_apps | Access the authorized_connect_apps
:returns: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList
:rtype: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList |
def get_time_slice(time, z, zdot=None, timeStart=None, timeEnd=None):
if timeStart == None:
timeStart = time[0]
if timeEnd == None:
timeEnd = time[-1]
StartIndex = _np.where(time == take_closest(time, timeStart))[0][0]
EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0]
time_sliced = time[StartIndex:EndIndex]
z_sliced = z[StartIndex:EndIndex]
if zdot != None:
zdot_sliced = zdot[StartIndex:EndIndex]
else:
zdot_sliced = None
return time_sliced, z_sliced, zdot_sliced | Get slice of time, z and (if provided) zdot from timeStart to timeEnd.
Parameters
----------
time : ndarray
array of time values
z : ndarray
array of z values
zdot : ndarray, optional
array of zdot (velocity) values.
timeStart : float, optional
time at which to start the slice.
Defaults to beginnging of time trace
timeEnd : float, optional
time at which to end the slide.
Defaults to end of time trace
Returns
-------
time_sliced : ndarray
array of time values from timeStart to timeEnd
z_sliced : ndarray
array of z values from timeStart to timeEnd
zdot_sliced : ndarray
array of zdot values from timeStart to timeEnd.
None if zdot not provided |
def parse_seq(tokens, options):
result = []
while tokens.current() not in [None, , , ]:
atom = parse_atom(tokens, options)
if tokens.current() == :
atom = [OneOrMore(*atom)]
tokens.move()
result += atom
return result | seq ::= ( atom [ '...' ] )* ; |
def load_raw_arrays(self, columns, start_date, end_date, assets):
rolls_by_asset = {}
tc = self.trading_calendar
start_session = tc.minute_to_session_label(start_date)
end_session = tc.minute_to_session_label(end_date)
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol,
start_session,
end_session, asset.offset)
sessions = tc.sessions_in_range(start_date, end_date)
minutes = tc.minutes_in_range(start_date, end_date)
num_minutes = len(minutes)
shape = num_minutes, len(assets)
results = []
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = minutes.searchsorted(start)
if roll_date is not None:
_, end = tc.open_and_close_for_session(
roll_date - sessions.freq)
end_loc = minutes.searchsorted(end)
else:
end = end_date
end_loc = len(minutes) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start, _ = tc.open_and_close_for_session(
tc.minute_to_session_label(minutes[end_loc + 1]))
for column in columns:
if column != :
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != :
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results | Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range. |
def process_metric(self, message, **kwargs):
self.store_labels(message)
if message.name in self.ignore_metrics:
return
self.join_labels(message)
send_histograms_buckets = kwargs.get(, True)
send_monotonic_counter = kwargs.get(, False)
custom_tags = kwargs.get()
ignore_unmapped = kwargs.get(, False)
try:
if not self._dry_run:
try:
self._submit(
self.metrics_mapper[message.name],
message,
send_histograms_buckets,
send_monotonic_counter,
custom_tags,
)
except KeyError:
if not ignore_unmapped:
handler = getattr(self, message.name)
try:
handler(message, **kwargs)
except Exception as err:
self.log.warning("Error handling metric: {} - error: {}".format(message.name, err))
else:
if self._metrics_wildcards is None:
self._metrics_wildcards = [x for x in self.metrics_mapper.keys() if in x]
for wildcard in self._metrics_wildcards:
if fnmatchcase(message.name, wildcard):
self._submit(
message.name, message, send_histograms_buckets, send_monotonic_counter, custom_tags
)
except AttributeError as err:
self.log.debug("Unable to handle metric: {} - error: {}".format(message.name, err)) | Handle a prometheus metric message according to the following flow:
- search self.metrics_mapper for a prometheus.metric <--> datadog.metric mapping
- call check method with the same name as the metric
- log some info if none of the above worked
`send_histograms_buckets` is used to specify if yes or no you want to send
the buckets as tagged values when dealing with histograms. |
def toggle(self, *args):
self._stop_blink()
if args:
for index in args:
self[index].toggle()
else:
super(LEDBoard, self).toggle() | If no arguments are specified, toggle the state of all LEDs. If
arguments are specified, they must be the indexes of the LEDs you wish
to toggle. For example::
from gpiozero import LEDBoard
leds = LEDBoard(2, 3, 4, 5)
leds.toggle(0) # turn on the first LED (pin 2)
leds.toggle(-1) # turn on the last LED (pin 5)
leds.toggle() # turn the first and last LED off, and the
# middle pair on
If :meth:`blink` is currently active, it will be stopped first.
:param int args:
The index(es) of the LED(s) to toggle. If no indexes are specified
toggle the state of all LEDs. |
def get_volume_by_name(self, name):
for vol in self.conn.volumes:
if vol.name == name:
return vol
raise KeyError("Volume with NAME " + name + " not found") | Get ScaleIO Volume object by its Name
:param name: Name of volume
:return: ScaleIO Volume object
:raise KeyError: No Volume with specified name found
:rtype: ScaleIO Volume object |
def get_item(identifier,
config=None,
config_file=None,
archive_session=None,
debug=None,
http_adapter_kwargs=None,
request_kwargs=None):
if not archive_session:
archive_session = get_session(config, config_file, debug, http_adapter_kwargs)
return archive_session.get_item(identifier, request_kwargs=request_kwargs) | Get an :class:`Item` object.
:type identifier: str
:param identifier: The globally unique Archive.org item identifier.
:type config: dict
:param config: (optional) A dictionary used to configure your session.
:type config_file: str
:param config_file: (optional) A path to a config file used to configure your session.
:type archive_session: :class:`ArchiveSession`
:param archive_session: (optional) An :class:`ArchiveSession` object can be provided
via the ``archive_session`` parameter.
:type http_adapter_kwargs: dict
:param http_adapter_kwargs: (optional) Keyword arguments that
:py:class:`requests.adapters.HTTPAdapter` takes.
:type request_kwargs: dict
:param request_kwargs: (optional) Keyword arguments that
:py:class:`requests.Request` takes.
Usage:
>>> from internetarchive import get_item
>>> item = get_item('nasa')
>>> item.item_size
121084 |
def _parse(self, str):
str = replace_entities(str)
str = strip_tags(str)
str = collapse_spaces(str)
return str | Parses the text data from an XML element defined by tag. |
def build_instruction_coverage_plugin() -> LaserPlugin:
from mythril.laser.ethereum.plugins.implementations.coverage import (
InstructionCoveragePlugin,
)
return InstructionCoveragePlugin() | Creates an instance of the instruction coverage plugin |
def import_entries(self, items):
self.write_out(self.style.STEP())
for item_node in items:
title = (item_node.find().text or )[:255]
post_type = item_node.find( % WP_NS).text
content = item_node.find(
).text
if post_type == and content and title:
self.write_out( % title)
entry, created = self.import_entry(title, content, item_node)
if created:
self.write_out(self.style.ITEM())
image_id = self.find_image_id(
item_node.findall( % WP_NS))
if image_id:
self.import_image(entry, items, image_id)
self.import_comments(entry, item_node.findall(
% WP_NS))
else:
self.write_out(self.style.NOTICE(
))
else:
self.write_out( % title, 2)
self.write_out(self.style.NOTICE(), 2) | Loops over items and find entry to import,
an entry need to have 'post_type' set to 'post' and
have content. |
def mobile_sign(self, id_code, country, phone_nr, language=None, signing_profile=):
if not (self.container and isinstance(self.container, PreviouslyCreatedContainer)):
assert self.data_files, \
response = self.__invoke(, {
: id_code,
: country,
: phone_nr,
: self.parse_language(language),
: SkipValue,
: SkipValue,
: SkipValue,
: SkipValue,
: SkipValue,
: self.service_name,
: self.mobile_message,
: signing_profile,
: ,
: SkipValue,
: SkipValue,
: SkipValue,
})
return response | This can be used to add a signature to existing data files
WARNING: Must have at least one datafile in the session |
def __display_header(self, stat_display):
self.new_line()
self.space_between_column = 0
l_uptime = (self.get_stats_display_width(stat_display["system"]) +
self.get_stats_display_width(stat_display["ip"]) +
self.get_stats_display_width(stat_display["uptime"]) + 1)
self.display_plugin(
stat_display["system"],
display_optional=(self.screen.getmaxyx()[1] >= l_uptime))
self.space_between_column = 3
self.new_column()
self.display_plugin(stat_display["ip"])
self.new_column()
self.display_plugin(
stat_display["uptime"],
add_space=-(self.get_stats_display_width(stat_display["cloud"]) != 0))
self.init_column()
self.new_line()
self.display_plugin(stat_display["cloud"]) | Display the firsts lines (header) in the Curses interface.
system + ip + uptime
(cloud) |
def _step4func(self, samples, force, ipyclient):
if self._headers:
print("\n Step 4: Joint estimation of error rate and heterozygosity")
samples = _get_samples(self, samples)
if not self._samples_precheck(samples, 4, force):
raise IPyradError(FIRST_RUN_3)
elif not force:
if all([i.stats.state >= 4 for i in samples]):
print(JOINTS_EXIST.format(len(samples)))
return
assemble.jointestimate.run(self, samples, force, ipyclient) | hidden wrapped function to start step 4 |
def _ppf(self, q, left, right, cache):
left = evaluation.get_inverse_cache(left, cache)
right = evaluation.get_inverse_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return left**right
else:
out = evaluation.evaluate_inverse(right, q, cache=cache)
out = numpy.where(left < 0, 1-out, out)
out = left**out
return out
right = right + numpy.zeros(q.shape)
q = numpy.where(right < 0, 1-q, q)
out = evaluation.evaluate_inverse(left, q, cache=cache)**right
return out | Point percentile function.
Example:
>>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9]))
[0.1 0.2 0.9]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9]))
[0.01 0.04 0.81]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9]))
[0.52631579 0.55555556 0.90909091]
>>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9]))
[1.07177346 1.14869835 1.86606598]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9]))
[0.53588673 0.57434918 0.93303299]
>>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9]))
[8. 8. 8.] |
def decode(self, data):
return Zchunk(lib.zarmour_decode(self._as_parameter_, data), True) | Decode an armoured string into a chunk. The decoded output is
null-terminated, so it may be treated as a string, if that's what
it was prior to encoding. |
def inject_basic_program(self, ascii_listing):
program_start = self.cpu.memory.read_word(
self.machine_api.PROGRAM_START_ADDR
)
tokens = self.machine_api.ascii_listing2program_dump(ascii_listing)
self.cpu.memory.load(program_start, tokens)
log.critical("BASIC program injected into Memory.")
program_end = program_start + len(tokens)
self.cpu.memory.write_word(self.machine_api.VARIABLES_START_ADDR, program_end)
self.cpu.memory.write_word(self.machine_api.ARRAY_START_ADDR, program_end)
self.cpu.memory.write_word(self.machine_api.FREE_SPACE_START_ADDR, program_end)
log.critical("BASIC addresses updated.") | save the given ASCII BASIC program listing into the emulator RAM. |
def createmergerequest(self, project_id, sourcebranch, targetbranch,
title, target_project_id=None, assignee_id=None):
data = {
: sourcebranch,
: targetbranch,
: title,
: assignee_id,
: target_project_id
}
request = requests.post(
.format(self.projects_url, project_id),
data=data, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 201:
return request.json()
else:
return False | Create a new merge request.
:param project_id: ID of the project originating the merge request
:param sourcebranch: name of the branch to merge from
:param targetbranch: name of the branch to merge to
:param title: Title of the merge request
:param assignee_id: Assignee user ID
:return: dict of the new merge request |
def pre_save(cls, sender, instance, *args, **kwargs):
instance.constant_contact_id = str(instance.data[]) | Pull constant_contact_id out of data. |
def func_frame(function_index, function_name=None):
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split()[0]
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try:
if frm.f_code.co_name == :
frm = frm.f_back
except:
pass
return frm | This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function |
def _add_indent(self, val, indent_count):
if isinstance(val, Value):
val = val.string_value()
return String(val).indent(indent_count) | add whitespace to the beginning of each line of val
link -- http://code.activestate.com/recipes/66055-changing-the-indentation-of-a-multi-line-string/
val -- string
indent -- integer -- how much whitespace we want in front of each line of val
return -- string -- val with more whitespace |
async def _notify_event_internal(self, conn_string, name, event):
try:
self._currently_notifying = True
conn_id = self._get_conn_id(conn_string)
event_maps = self._monitors.get(conn_string, {})
wildcard_maps = self._monitors.get(None, {})
wildcard_handlers = wildcard_maps.get(name, {})
event_handlers = event_maps.get(name, {})
for handler, func in itertools.chain(event_handlers.items(), wildcard_handlers.items()):
try:
result = func(conn_string, conn_id, name, event)
if inspect.isawaitable(result):
await result
except:
self._logger.warning("Error calling notification callback id=%s, func=%s", handler, func, exc_info=True)
finally:
for action in self._deferred_adjustments:
self._adjust_monitor_internal(*action)
self._deferred_adjustments = []
self._currently_notifying = False | Notify that an event has occured.
This method will send a notification and ensure that all callbacks
registered for it have completed by the time it returns. In
particular, if the callbacks are awaitable, this method will await
them before returning. The order in which the callbacks are called
is undefined.
This is a low level method that is not intended to be called directly.
You should use the high level public notify_* methods for each of the
types of events to ensure consistency in how the event objects are
created.
Args:
conn_string (str): The connection string for the device that the
event is associated with.
name (str): The name of the event. Must be in SUPPORTED_EVENTS.
event (object): The event object. The type of this object will
depend on what is being notified. |
def parse_pdb(self):
if self.as_string:
fil = self.pdbpath.rstrip().split()
else:
f = read(self.pdbpath)
fil = f.readlines()
f.close()
corrected_lines = []
i, j = 0, 0
d = {}
modres = set()
covalent = []
alt = []
previous_ter = False
if not config.NOFIX:
if not config.PLUGIN_MODE:
lastnum = 0
other_models = False
for line in fil:
if not other_models:
corrected_line, newnum = self.fix_pdbline(line, lastnum)
if corrected_line is not None:
if corrected_line.startswith():
try:
model_num = int(corrected_line[10:14])
if model_num > 1:
other_models = True
except ValueError:
write_message("Ignoring invalid MODEL entry: %s\n" % corrected_line, mtype=)
corrected_lines.append(corrected_line)
lastnum = newnum
corrected_pdb = .join(corrected_lines)
else:
corrected_pdb = self.pdbpath
corrected_lines = fil
else:
corrected_pdb = self.pdbpath
corrected_lines = fil
for line in corrected_lines:
if line.startswith(("ATOM", "HETATM")):
atomid, location = int(line[6:11]), line[16]
location = if location == else location
if location != :
alt.append(atomid)
if not previous_ter:
i += 1
j += 1
else:
i += 1
j += 2
d[i] = j
previous_ter = False
if line.startswith("TER"):
previous_ter = True
if line.startswith("MODRES"):
modres.add(line[12:15].strip())
if line.startswith("LINK"):
covalent.append(self.get_linkage(line))
return d, modres, covalent, alt, corrected_pdb | Extracts additional information from PDB files.
I. When reading in a PDB file, OpenBabel numbers ATOMS and HETATOMS continously.
In PDB files, TER records are also counted, leading to a different numbering system.
This functions reads in a PDB file and provides a mapping as a dictionary.
II. Additionally, it returns a list of modified residues.
III. Furthermore, covalent linkages between ligands and protein residues/other ligands are identified
IV. Alternative conformations |
def set_data(self, data_np, metadata=None, order=None, astype=None):
if astype:
data = data_np.astype(astype, copy=False)
else:
data = data_np
self._data = data
self._calc_order(order)
if metadata:
self.update_metadata(metadata)
self._set_minmax()
self.make_callback() | Use this method to SHARE (not copy) the incoming array. |
def sfs(dac, n=None):
dac, n = _check_dac_n(dac, n)
dac = dac.astype(int, copy=False)
x = n + 1
s = np.bincount(dac, minlength=x)
return s | Compute the site frequency spectrum given derived allele counts at
a set of biallelic variants.
Parameters
----------
dac : array_like, int, shape (n_variants,)
Array of derived allele counts.
n : int, optional
The total number of chromosomes called.
Returns
-------
sfs : ndarray, int, shape (n_chromosomes,)
Array where the kth element is the number of variant sites with k
derived alleles. |
def load_metadata_from_desc_file(self, desc_file, partition=,
max_duration=16.0,):
logger = logUtil.getlogger()
logger.info(
.format(desc_file, partition))
audio_paths, durations, texts = [], [], []
with open(desc_file) as json_line_file:
for line_num, json_line in enumerate(json_line_file):
try:
spec = json.loads(json_line)
if float(spec[]) > max_duration:
continue
audio_paths.append(spec[])
durations.append(float(spec[]))
texts.append(spec[])
except Exception as e:
logger.warn(
.format(line_num, json_line))
logger.warn(str(e))
if partition == :
self.count = len(audio_paths)
self.train_audio_paths = audio_paths
self.train_durations = durations
self.train_texts = texts
elif partition == :
self.val_audio_paths = audio_paths
self.val_durations = durations
self.val_texts = texts
self.val_count = len(audio_paths)
elif partition == :
self.test_audio_paths = audio_paths
self.test_durations = durations
self.test_texts = texts
else:
raise Exception("Invalid partition to load metadata. "
"Must be train/validation/test") | Read metadata from the description file
(possibly takes long, depending on the filesize)
Params:
desc_file (str): Path to a JSON-line file that contains labels and
paths to the audio files
partition (str): One of 'train', 'validation' or 'test'
max_duration (float): In seconds, the maximum duration of
utterances to train or test on |
def translate(self, body, params=None):
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument .")
return self.transport.perform_request(
"POST", "/_sql/translate", params=params, body=body
) | `<Translate SQL into Elasticsearch queries>`_
:arg body: Specify the query in the `query` element. |
def copy(self):
templates = self.prepare_templates()
if self.params.interactive:
keys = list(self.parser.default)
for key in keys:
if key.startswith():
continue
prompt = "{0} (default is \"{1}\")? ".format(
key, self.parser.default[key])
if _compat.PY2:
value = raw_input(prompt.encode()).decode()
else:
value = input(prompt.encode())
value = value.strip()
if value:
self.parser.default[key] = value
self.parser.default[] = tt = .join(
t.name for t in templates)
logging.warning("Paste templates: {0}".format(tt))
self.make_directory(self.params.TARGET)
logging.debug("\nDefault context:\n----------------")
logging.debug(
.join(.format(*v)
for v in self.parser.default.items())
)
return [t.paste(
**dict(self.parser.default.items())) for t in templates] | Prepare and paste self templates. |
def server_call(method, server, timeout=DEFAULT_TIMEOUT, verify_ssl=True, **parameters):
if method is None:
raise Exception("A method name must be specified")
if server is None:
raise Exception("A server (eg. my3.geotab.com) must be specified")
parameters = process_parameters(parameters)
return _query(server, method, parameters, timeout=timeout, verify_ssl=verify_ssl) | Makes a call to an un-authenticated method on a server
:param method: The method name.
:type method: str
:param server: The MyGeotab server.
:type server: str
:param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes).
:type timeout: float
:param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this.
:type verify_ssl: bool
:param parameters: Additional parameters to send (for example, search=dict(id='b123') ).
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
:return: The result from the server. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.