code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def accumulate(self, buf):
accum = self.crc
for b in buf:
tmp = b ^ (accum & 0xff)
tmp = (tmp ^ (tmp<<4)) & 0xFF
accum = (accum>>8) ^ (tmp<<8) ^ (tmp<<3) ^ (tmp>>4)
self.crc = accum | add in some more bytes |
def get_instance(self, payload):
return SyncMapItemInstance(
self._version,
payload,
service_sid=self._solution[],
map_sid=self._solution[],
) | Build an instance of SyncMapItemInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemInstance |
def _finalize_arguments(self, args):
gps = args.gps
search = args.search
max_plot = max(args.plot)
search = max(search, max_plot * 2 + 8)
args.search = search
self.log(3, "Search window: {0:.0f} sec, max plot window {1:.0f}".
format(search, max_plot))
xmin = args.xmin
xmax = args.xmax
super(Qtransform, self)._finalize_arguments(args)
args.xmin = xmin
args.xmax = xmax | Derive standard args from our weird ones
:type args: Namespace with command line arguments |
def find_prime_polynomials(generator=2, c_exp=8, fast_primes=False, single=False):
root_charac = 2
if not conflict:
correct_primes.append(prim)
if single: return prim
return correct_primes | Compute the list of prime polynomials for the given generator and galois field characteristic exponent. |
def get_outputs(self, input_value):
output_value = self.convert_to_xmlrpc(input_value)
output = {}
for name in self.output_names:
output[name] = output_value
return output | Generate a set of output values for a given input. |
def _run_vagrant_command(self, args):
upmy_vm_name--no-provisionup--no-provision
command = self._make_vagrant_command(args)
with self.err_cm() as err_fh:
return compat.decode(subprocess.check_output(command, cwd=self.root,
env=self.env, stderr=err_fh)) | Run a vagrant command and return its stdout.
args: A sequence of arguments to a vagrant command line.
e.g. ['up', 'my_vm_name', '--no-provision'] or
['up', None, '--no-provision'] for a non-Multi-VM environment. |
def to_coverage(ctx):
sm = Smother.load(ctx.obj[])
sm.coverage = coverage.coverage()
sm.write_coverage() | Produce a .coverage file from a smother file |
def not_right(self, num):
if num == None:
return FlatList([_get_list(self)[:-1:]])
if num <= 0:
return FlatList.EMPTY
return FlatList(_get_list(self)[:-num:]) | WITH SLICES BEING FLAT, WE NEED A SIMPLE WAY TO SLICE FROM THE LEFT [:-num:] |
def _call(self, name, soapheaders):
def call_closure(*args, **kwargs):
if len(args) and len(kwargs):
raise TypeError,
if len(args) > 0:
raise TypeError,
if len(kwargs):
args = kwargs
callinfo = getattr(self, name).callinfo
for method in self._methods[name]:
if len(method.callinfo.inparams) == len(kwargs):
callinfo = method.callinfo
binding = _Binding(url=self._url or callinfo.location,
soapaction=callinfo.soapAction,
**self._kw)
kw = dict(unique=True)
if callinfo.use == :
kw[] = False
if callinfo.style == :
request = TC.Struct(None, ofwhat=[],
pname=(callinfo.namespace, name), **kw)
response = TC.Struct(None, ofwhat=[],
pname=(callinfo.namespace, name+"Response"), **kw)
if len(callinfo.getInParameters()) != len(args):
raise RuntimeError( %(
str(callinfo.getInParameters(), str(args))))
for msg,pms in ((request,callinfo.getInParameters()),
(response,callinfo.getOutParameters())):
msg.ofwhat = []
for part in pms:
klass = GTD(*part.type)
if klass is None:
if part.type:
klass = filter(lambda gt: part.type==gt.type,TC.TYPES)
if len(klass) == 0:
klass = filter(lambda gt: part.type[1]==gt.type[1],TC.TYPES)
if not len(klass):klass = [TC.Any]
if len(klass) > 1:
klass = filter(lambda i: i.__dict__.has_key(), klass)
klass = klass[0]
else:
klass = TC.Any
msg.ofwhat.append(klass(part.name))
msg.ofwhat = tuple(msg.ofwhat)
if not args: args = {}
else:
ipart,opart = callinfo.getInParameters(),callinfo.getOutParameters()
if ( len(ipart) != 1 or not ipart[0].element_type or
ipart[0].type is None ):
raise RuntimeError, %callinfo.name
if ( len(opart) not in (0,1) or not opart[0].element_type or
opart[0].type is None ):
raise RuntimeError, %callinfo.name
ipart = ipart[0]
request,response = GED(*ipart.type),None
if opart: response = GED(*opart[0].type)
msg = args
if self._asdict:
if not msg: msg = dict()
self._nullpyclass(request)
elif request.pyclass is not None:
if type(args) is dict:
msg = request.pyclass()
msg.__dict__.update(args)
elif type(args) is list and len(args) == 1:
msg = request.pyclass(args[0])
else:
msg = request.pyclass()
binding.Send(None, None, msg,
requesttypecode=request,
soapheaders=soapheaders,
encodingStyle=callinfo.encodingStyle)
if response is None:
return None
if self._asdict: self._nullpyclass(response)
return binding.Receive(replytype=response,
encodingStyle=callinfo.encodingStyle)
return call_closure | return the Call to the named remote web service method.
closure used to prevent multiple values for name and soapheaders
parameters |
def Arrow(startPoint, endPoint, s=None, c="r", alpha=1, res=12):
axis = np.array(endPoint) - np.array(startPoint)
length = np.linalg.norm(axis)
if length:
axis = axis / length
theta = np.arccos(axis[2])
phi = np.arctan2(axis[1], axis[0])
arr = vtk.vtkArrowSource()
arr.SetShaftResolution(res)
arr.SetTipResolution(res)
if s:
sz = 0.02
arr.SetTipRadius(sz)
arr.SetShaftRadius(sz / 1.75)
arr.SetTipLength(sz * 15)
arr.Update()
t = vtk.vtkTransform()
t.RotateZ(phi * 57.3)
t.RotateY(theta * 57.3)
t.RotateY(-90)
if s:
sz = 800.0 * s
t.Scale(length, sz, sz)
else:
t.Scale(length, length, length)
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputData(arr.GetOutput())
tf.SetTransform(t)
tf.Update()
actor = Actor(tf.GetOutput(), c, alpha)
actor.GetProperty().SetInterpolationToPhong()
actor.SetPosition(startPoint)
actor.DragableOff()
actor.PickableOff()
actor.base = np.array(startPoint)
actor.top = np.array(endPoint)
settings.collectable_actors.append(actor)
return actor | Build a 3D arrow from `startPoint` to `endPoint` of section size `s`,
expressed as the fraction of the window size.
.. note:: If ``s=None`` the arrow is scaled proportionally to its length,
otherwise it represents the fraction of the window size.
|OrientedArrow| |
def send_content(
self,
http_code,
content,
mime_type="text/html",
http_message=None,
content_length=-1,
):
self.set_response(http_code, http_message)
if mime_type and not self.is_header_set("content-type"):
self.set_header("content-type", mime_type)
raw_content = to_bytes(content)
if content_length is not None and not self.is_header_set(
"content-length"
):
if content_length < 0:
content_length = len(raw_content)
self.set_header("content-length", content_length)
self.end_headers()
self.write(raw_content) | Utility method to send the given content as an answer.
You can still use get_wfile or write afterwards, if you forced the
content length.
If content_length is negative (default), it will be computed as the
length of the content;
if it is positive, the given value will be used;
if it is None, the content-length header won't be sent.
:param http_code: HTTP result code
:param content: Data to be sent (must be a string)
:param mime_type: Content MIME type (content-type)
:param http_message: HTTP code description
:param content_length: Forced content length |
def QA_util_get_real_date(date, trade_list=trade_date_sse, towards=-1):
date = str(date)[0:10]
if towards == 1:
while date not in trade_list:
date = str(
datetime.datetime.strptime(str(date)[0:10],
) +
datetime.timedelta(days=1)
)[0:10]
else:
return str(date)[0:10]
elif towards == -1:
while date not in trade_list:
date = str(
datetime.datetime.strptime(str(date)[0:10],
) -
datetime.timedelta(days=1)
)[0:10]
else:
return str(date)[0:10] | 获取真实的交易日期,其中,第三个参数towards是表示向前/向后推
towards=1 日期向后迭代
towards=-1 日期向前迭代
@ yutiansut |
def geom_check_axis(g, atwts, ax,
nmax=_DEF.SYMM_MATCH_NMAX,
tol=_DEF.SYMM_MATCH_TOL):
import numpy as np
order = geom_find_rotsymm(g, atwts, ax, \
False, nmax, tol)[0]
refl = geom_symm_match(g, atwts, ax, 0, True) < tol
return order, refl | [Get max proper order and reflection for an axis]
.. todo:: Complete geom_parse_axis docstring |
def _get_fwl_port_speed(self, server_id, is_virt=True):
fwl_port_speed = 0
if is_virt:
mask = ()
svc = self.client[]
primary = svc.getObject(mask=mask, id=server_id)
fwl_port_speed = primary[][]
else:
mask = ()
svc = self.client[]
network_components = svc.getFrontendNetworkComponents(
mask=mask, id=server_id)
grouped = [interface[][]
for interface in network_components
if in interface]
ungrouped = [interface
for interface in network_components
if not in interface]
group_speeds = []
for group in grouped:
group_speed = 0
for interface in group:
group_speed += interface[]
group_speeds.append(group_speed)
max_grouped_speed = max(group_speeds)
max_ungrouped = 0
for interface in ungrouped:
max_ungrouped = max(max_ungrouped, interface[])
fwl_port_speed = max(max_grouped_speed, max_ungrouped)
return fwl_port_speed | Determines the appropriate speed for a firewall.
:param int server_id: The ID of server the firewall is for
:param bool is_virt: True if the server_id is for a virtual server
:returns: a integer representing the Mbps speed of a firewall |
def parse_domains(self, domain, params):
domain_id = self.get_non_aws_id(domain[])
domain[] = domain.pop()
self.domains[domain_id] = domain | Parse a single Route53Domains domain |
def _get_field_type(self, key, value):
if isinstance(value, bool):
return BooleanField(name=key)
elif isinstance(value, int):
return IntegerField(name=key)
elif isinstance(value, float):
return FloatField(name=key)
elif isinstance(value, str):
return StringField(name=key)
elif isinstance(value, time):
return TimeField(name=key)
elif isinstance(value, datetime):
return DateTimeField(name=key)
elif isinstance(value, date):
return DateField(name=key)
elif isinstance(value, timedelta):
return TimedeltaField(name=key)
elif isinstance(value, Enum):
return EnumField(name=key, enum_class=type(value))
elif isinstance(value, (dict, BaseDynamicModel, Mapping)):
return ModelField(name=key, model_class=self.__dynamic_model__ or self.__class__)
elif isinstance(value, BaseModel):
return ModelField(name=key, model_class=value.__class__)
elif isinstance(value, (list, set, ListModel)):
if not len(value):
return None
field_type = self._get_field_type(None, value[0])
return ArrayField(name=key, field_type=field_type)
elif value is None:
return None
else:
raise TypeError("Invalid parameter: %s. Type not supported." % (key,)) | Helper to create field object based on value type |
def get_members(self, **query_params):
members = self.get_members_json(self.base_uri,
query_params=query_params)
members_list = []
for member_json in members:
members_list.append(self.create_member(member_json))
return members_list | Get all members attached to this organisation. Returns a list of
Member objects
Returns:
list(Member): The members attached to this organisation |
def nlp(self, inputString, sourceTime=None, version=None):
orig_inputstring = inputString
inputString = re.sub(r, r, inputString).lower()
inputString = re.sub(r|")(\s|$)\1 \3(\s|^)(\, r, inputString)
startpos = 0
matches = []
while startpos < len(inputString):
leftmost_match = [0, 0, None, 0, None]
m = self.ptc.CRE_MODIFIER.search(inputString[startpos:])
if m is not None:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 0
leftmost_match[4] =
m = self.ptc.CRE_UNITS.search(inputString[startpos:])
if m is not None:
debug and log.debug()
if self._UnitsTrapped(inputString[startpos:], m, ):
debug and log.debug()
else:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 3
leftmost_match[4] =
if m.start() > 0 and \
inputString[m.start() - 1] == :
leftmost_match[0] = leftmost_match[0] - 1
leftmost_match[2] = + leftmost_match[2]
m = self.ptc.CRE_QUNITS.search(inputString[startpos:])
if m is not None:
debug and log.debug()
if self._UnitsTrapped(inputString[startpos:], m, ):
debug and log.debug()
else:
if leftmost_match[1] == 0 or \
leftmost_match[0] > m.start() + startpos:
leftmost_match[0] = m.start() + startpos
leftmost_match[1] = m.end() + startpos
leftmost_match[2] = m.group()
leftmost_match[3] = 3
leftmost_match[4] =
if m.start() > 0 and \
inputString[m.start() - 1] == :
leftmost_match[0] = leftmost_match[0] - 1
leftmost_match[2] = + leftmost_match[2]
m = self.ptc.CRE_DATE3.search(inputString[startpos:])
if date or time or units:
combined = orig_inputstring[matches[from_match_index]
[0]:matches[i - 1][1]]
parsed_datetime, flags = self.parse(combined,
sourceTime,
version)
proximity_matches.append((
datetime.datetime(*parsed_datetime[:6]),
flags,
matches[from_match_index][0],
matches[i - 1][1],
combined))
from_match_index = i
date = matches[i][3] == 1
time = matches[i][3] == 2
units = matches[i][3] == 3
continue
else:
if matches[i][3] == 1:
date = True
if matches[i][3] == 2:
time = True
if matches[i][3] == 3:
units = True
if date or time or units:
combined = orig_inputstring[matches[from_match_index][0]:
matches[len(matches) - 1][1]]
parsed_datetime, flags = self.parse(combined, sourceTime,
version)
proximity_matches.append((
datetime.datetime(*parsed_datetime[:6]),
flags,
matches[from_match_index][0],
matches[len(matches) - 1][1],
combined))
elif len(matches) == 0:
return None
else:
if matches[0][3] == 0:
return None
else:
combined = orig_inputstring[matches[0][0]:matches[0][1]]
parsed_datetime, flags = self.parse(matches[0][2], sourceTime,
version)
proximity_matches.append((
datetime.datetime(*parsed_datetime[:6]),
flags,
matches[0][0],
matches[0][1],
combined))
return tuple(proximity_matches) | Utilizes parse() after making judgements about what datetime
information belongs together.
It makes logical groupings based on proximity and returns a parsed
datetime for each matched grouping of datetime text, along with
location info within the given inputString.
@type inputString: string
@param inputString: natural language text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@type version: integer
@param version: style version, default will use L{Calendar}
parameter version value
@rtype: tuple or None
@return: tuple of tuples in the format (parsed_datetime as
datetime.datetime, flags as int, start_pos as int,
end_pos as int, matched_text as string) or None if there
were no matches |
def example_delta_alter_configs(a, args):
resources = []
for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]):
resource = ConfigResource(restype, resname)
resources.append(resource)
for k, v in [conf.split() for conf in configs.split()]:
resource.set_config(k, v)
for res, f in fs.items():
f.add_done_callback(lambda fut, resource=res: delta_alter_configs(resource, fut.result()))
print("Waiting for {} resource updates to finish".format(len(wait_zero)))
wait_zero.wait() | The AlterConfigs Kafka API requires all configuration to be passed,
any left out configuration properties will revert to their default settings.
This example shows how to just modify the supplied configuration entries
by first reading the configuration from the broker, updating the supplied
configuration with the broker configuration (without overwriting), and
then writing it all back.
The async nature of futures is also show-cased, which makes this example
a bit more complex than it needs to be in the synchronous case. |
def _set_edgeport(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=edgeport.edgeport, is_container=, presence=False, yang_name="edgeport", rest_name="edgeport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__edgeport = t
if hasattr(self, ):
self._set() | Setter method for edgeport, mapped from YANG variable /interface/port_channel/spanning_tree/edgeport (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_edgeport is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_edgeport() directly. |
def get_time(self) -> float:
if self.paused:
return self.pause_time
return mixer.music.get_pos() / 1000.0 | Get the current position in the music in seconds |
def update(self, *, name=None, show_headers=None, show_totals=None, style=None):
if name is None and show_headers is None and show_totals is None and style is None:
raise ValueError()
data = {}
if name:
data[] = name
if show_headers:
data[] = show_headers
if show_totals:
data[] = show_totals
if style:
data[] = style
response = self.session.patch(self.build_url(), data=data)
if not response:
return False
data = response.json()
self.name = data.get(, self.name)
self.show_headers = data.get(, self.show_headers)
self.show_totals = data.get(, self.show_totals)
self.style = data.get(, self.style)
return True | Updates this table
:param str name: the name of the table
:param bool show_headers: whether or not to show the headers
:param bool show_totals: whether or not to show the totals
:param str style: the style of the table
:return: Success or Failure |
def neighbor_add(self, address, remote_as,
remote_port=DEFAULT_BGP_PORT,
enable_ipv4=DEFAULT_CAP_MBGP_IPV4,
enable_ipv6=DEFAULT_CAP_MBGP_IPV6,
enable_vpnv4=DEFAULT_CAP_MBGP_VPNV4,
enable_vpnv6=DEFAULT_CAP_MBGP_VPNV6,
enable_evpn=DEFAULT_CAP_MBGP_EVPN,
enable_ipv4fs=DEFAULT_CAP_MBGP_IPV4FS,
enable_ipv6fs=DEFAULT_CAP_MBGP_IPV6FS,
enable_vpnv4fs=DEFAULT_CAP_MBGP_VPNV4FS,
enable_vpnv6fs=DEFAULT_CAP_MBGP_VPNV6FS,
enable_l2vpnfs=DEFAULT_CAP_MBGP_L2VPNFS,
enable_enhanced_refresh=DEFAULT_CAP_ENHANCED_REFRESH,
enable_four_octet_as_number=DEFAULT_CAP_FOUR_OCTET_AS_NUMBER,
next_hop=None, password=None, multi_exit_disc=None,
site_of_origins=None,
is_route_server_client=DEFAULT_IS_ROUTE_SERVER_CLIENT,
is_route_reflector_client=DEFAULT_IS_ROUTE_REFLECTOR_CLIENT,
is_next_hop_self=DEFAULT_IS_NEXT_HOP_SELF,
local_address=None,
local_port=None, local_as=None,
connect_mode=DEFAULT_CONNECT_MODE):
bgp_neighbor = {
neighbors.IP_ADDRESS: address,
neighbors.REMOTE_AS: remote_as,
REMOTE_PORT: remote_port,
PEER_NEXT_HOP: next_hop,
PASSWORD: password,
IS_ROUTE_SERVER_CLIENT: is_route_server_client,
IS_ROUTE_REFLECTOR_CLIENT: is_route_reflector_client,
IS_NEXT_HOP_SELF: is_next_hop_self,
CONNECT_MODE: connect_mode,
CAP_ENHANCED_REFRESH: enable_enhanced_refresh,
CAP_FOUR_OCTET_AS_NUMBER: enable_four_octet_as_number,
CAP_MBGP_IPV4: enable_ipv4,
CAP_MBGP_IPV6: enable_ipv6,
CAP_MBGP_VPNV4: enable_vpnv4,
CAP_MBGP_VPNV6: enable_vpnv6,
CAP_MBGP_EVPN: enable_evpn,
CAP_MBGP_IPV4FS: enable_ipv4fs,
CAP_MBGP_IPV6FS: enable_ipv6fs,
CAP_MBGP_VPNV4FS: enable_vpnv4fs,
CAP_MBGP_VPNV6FS: enable_vpnv6fs,
CAP_MBGP_L2VPNFS: enable_l2vpnfs,
}
if multi_exit_disc:
bgp_neighbor[MULTI_EXIT_DISC] = multi_exit_disc
if site_of_origins:
bgp_neighbor[SITE_OF_ORIGINS] = site_of_origins
if local_address:
bgp_neighbor[LOCAL_ADDRESS] = local_address
if local_port:
bgp_neighbor[LOCAL_PORT] = local_port
if local_as:
bgp_neighbor[LOCAL_AS] = local_as
call(, **bgp_neighbor) | This method registers a new neighbor. The BGP speaker tries to
establish a bgp session with the peer (accepts a connection
from the peer and also tries to connect to it).
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address. Only IPv4 is
supported now.
``remote_as`` specifies the AS number of the peer. It must be
an integer between 1 and 65535.
``remote_port`` specifies the TCP port number of the peer.
``enable_ipv4`` enables IPv4 address family for this
neighbor.
``enable_ipv6`` enables IPv6 address family for this
neighbor.
``enable_vpnv4`` enables VPNv4 address family for this
neighbor.
``enable_vpnv6`` enables VPNv6 address family for this
neighbor.
``enable_evpn`` enables Ethernet VPN address family for this
neighbor.
``enable_ipv4fs`` enables IPv4 Flow Specification address family
for this neighbor.
``enable_ipv6fs`` enables IPv6 Flow Specification address family
for this neighbor.
``enable_vpnv4fs`` enables VPNv4 Flow Specification address family
for this neighbor.
``enable_vpnv6fs`` enables VPNv6 Flow Specification address family
for this neighbor.
``enable_l2vpnfs`` enables L2VPN Flow Specification address family
for this neighbor.
``enable_enhanced_refresh`` enables Enhanced Route Refresh for this
neighbor.
``enable_four_octet_as_number`` enables Four-Octet AS Number
capability for this neighbor.
``next_hop`` specifies the next hop IP address. If not
specified, host's ip address to access to a peer is used.
``password`` is used for the MD5 authentication if it's
specified. By default, the MD5 authentication is disabled.
``multi_exit_disc`` specifies multi exit discriminator (MED) value
as an int type value.
If omitted, MED is not sent to the neighbor.
``site_of_origins`` specifies site_of_origin values.
This parameter must be a list of string.
``is_route_server_client`` specifies whether this neighbor is a
router server's client or not.
``is_route_reflector_client`` specifies whether this neighbor is a
router reflector's client or not.
``is_next_hop_self`` specifies whether the BGP speaker announces
its own ip address to iBGP neighbor or not as path's next_hop address.
``local_address`` specifies Loopback interface address for
iBGP peering.
``local_port`` specifies source TCP port for iBGP peering.
``local_as`` specifies local AS number per-peer.
If omitted, the AS number of BGPSpeaker instance is used.
``connect_mode`` specifies how to connect to this neighbor.
This parameter must be one of the following.
- CONNECT_MODE_ACTIVE = 'active'
- CONNECT_MODE_PASSIVE = 'passive'
- CONNECT_MODE_BOTH (default) = 'both' |
def get_ids(a):
a_id = % (a.rsplit(, 1)[0])
a_id_lookup = % (a.rsplit(, 1)[0])
if check(a_id) is True:
return a_id, a_id_lookup
a_id_f = open(a_id, )
a_id_lookup_f = open(a_id_lookup, )
ids = []
for seq in parse_fasta(open(a)):
id = id_generator()
while id in ids:
id = id_generator()
ids.append(id)
header = seq[0].split()[1]
name = remove_bad(header)
seq[0] = % (id, header)
print(.join(seq), file=a_id_f)
print( % (id, name, header), file=a_id_lookup_f)
return a_id, a_id_lookup | make copy of sequences with short identifier |
def SetRange(self, range_offset, range_size):
if self._is_open:
raise IOError()
if range_offset < 0:
raise ValueError(
.format(
range_offset))
if range_size < 0:
raise ValueError(
.format(
range_size))
self._range_offset = range_offset
self._range_size = range_size
self._current_offset = 0 | Sets the data range (offset and size).
The data range is used to map a range of data within one file
(e.g. a single partition within a full disk image) as a file-like object.
Args:
range_offset (int): start offset of the data range.
range_size (int): size of the data range.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the range offset or range size is invalid. |
def checkbox_check(self, force_check=False):
if not self.get_attribute():
self.click(force_click=force_check) | Wrapper to check a checkbox |
def release_port(self, port):
if port in self.__closed:
self.__closed.remove(port)
self.__ports.add(port) | release port |
def get_message_state_scope(self, msgid, line=None, confidence=UNDEFINED):
if self.config.confidence and confidence.name not in self.config.confidence:
return MSG_STATE_CONFIDENCE
try:
if line in self.file_state._module_msgs_state[msgid]:
return MSG_STATE_SCOPE_MODULE
except (KeyError, TypeError):
return MSG_STATE_SCOPE_CONFIG
return None | Returns the scope at which a message was enabled/disabled. |
def load_retaildata():
db = {
"Auto, other Motor Vehicle": "https://www.census.gov/retail/marts/www/adv441x0.txt",
"Building Material and Garden Equipment and Supplies Dealers": "https://www.census.gov/retail/marts/www/adv44400.txt",
"Clothing and Clothing Accessories Stores": "https://www.census.gov/retail/marts/www/adv44800.txt",
"Dept. Stores (ex. leased depts)": "https://www.census.gov/retail/marts/www/adv45210.txt",
"Electronics and Appliance Stores": "https://www.census.gov/retail/marts/www/adv44300.txt",
"Food Services and Drinking Places": "https://www.census.gov/retail/marts/www/adv72200.txt",
"Food and Beverage Stores": "https://www.census.gov/retail/marts/www/adv44500.txt",
"Furniture and Home Furnishings Stores": "https://www.census.gov/retail/marts/www/adv44200.txt",
"Gasoline Stations": "https://www.census.gov/retail/marts/www/adv44700.txt",
"General Merchandise Stores": "https://www.census.gov/retail/marts/www/adv45200.txt",
"Grocery Stores": "https://www.census.gov/retail/marts/www/adv44510.txt",
"Health and Personal Care Stores": "https://www.census.gov/retail/marts/www/adv44600.txt",
"Miscellaneous Store Retailers": "https://www.census.gov/retail/marts/www/adv45300.txt",
"Motor Vehicle and Parts Dealers": "https://www.census.gov/retail/marts/www/adv44100.txt",
"Nonstore Retailers": "https://www.census.gov/retail/marts/www/adv45400.txt",
"Retail and Food Services, total": "https://www.census.gov/retail/marts/www/adv44x72.txt",
"Retail, total": "https://www.census.gov/retail/marts/www/adv44000.txt",
"Sporting Goods, Hobby, Book, and Music Stores": "https://www.census.gov/retail/marts/www/adv45100.txt",
"Total (excl. Motor Vehicle)": "https://www.census.gov/retail/marts/www/adv44y72.txt",
"Retail (excl. Motor Vehicle and Parts Dealers)": "https://www.census.gov/retail/marts/www/adv4400a.txt",
}
dct = {}
for key, value in db.items():
data = pd.read_csv(
value,
skiprows=5,
skip_blank_lines=True,
header=None,
sep="\s+",
index_col=0,
)
try:
cut = data.index.get_loc("SEASONAL")
except KeyError:
cut = data.index.get_loc("NO")
data = data.iloc[:cut]
data = data.apply(lambda col: pd.to_numeric(col, downcast="float"))
data = data.stack()
year = data.index.get_level_values(0)
month = data.index.get_level_values(1)
idx = pd.to_datetime(
{"year": year, "month": month, "day": 1}
) + offsets.MonthEnd(1)
data.index = idx
data.name = key
dct[key] = data
sales = pd.DataFrame(dct)
sales = sales.reindex(
pd.date_range(sales.index[0], sales.index[-1], freq="M")
)
yoy = sales.pct_change(periods=12)
return sales, yoy | Monthly retail trade data from census.gov. |
def _report_container_spec_metrics(self, pod_list, instance_tags):
for pod in pod_list[]:
pod_name = pod.get(, {}).get()
pod_phase = pod.get(, {}).get()
if self._should_ignore_pod(pod_name, pod_phase):
continue
for ctr in pod[][]:
if not ctr.get():
continue
c_name = ctr.get(, )
cid = None
for ctr_status in pod[].get(, []):
if ctr_status.get() == c_name:
cid = ctr_status.get()
break
if not cid:
continue
pod_uid = pod.get(, {}).get()
if self.pod_list_utils.is_excluded(cid, pod_uid):
continue
tags = tagger.tag( % cid, tagger.HIGH) + instance_tags
try:
for resource, value_str in iteritems(ctr.get(, {}).get(, {})):
value = self.parse_quantity(value_str)
self.gauge(.format(self.NAMESPACE, resource), value, tags)
except (KeyError, AttributeError) as e:
self.log.debug("Unable to retrieve container requests for %s: %s", c_name, e)
try:
for resource, value_str in iteritems(ctr.get(, {}).get(, {})):
value = self.parse_quantity(value_str)
self.gauge(.format(self.NAMESPACE, resource), value, tags)
except (KeyError, AttributeError) as e:
self.log.debug("Unable to retrieve container limits for %s: %s", c_name, e) | Reports pod requests & limits by looking at pod specs. |
def fix2real(uval, conv):
res = 0
int_val = ((uval & conv["int_mask"]) >> conv["bin_point"])
dec_val = conv["dec_step"] * (uval & conv["dec_mask"])
if conv["signed"] and (uval & conv["sign_mask"] > 0):
res = conv["int_min"] + int_val + dec_val
else:
res = int_val + dec_val
return (res / conv["scaling"]) | Convert a 32 bit unsigned int register into the value it represents in its Fixed arithmetic form.
@param uval: the numeric unsigned value in simulink representation
@param conv: conv structure with conversion specs as generated by I{get_conv}
@return: the real number represented by the Fixed arithmetic defined in conv
@todo: Better error detection and management of unsupported operations and arguments |
def drawDisplay( self, painter, option, rect, text ):
painter.setBrush(Qt.NoBrush)
painter.drawText(rect.left() + 3,
rect.top(),
rect.width() - 3,
rect.height(),
option.displayAlignment,
text) | Handles the display drawing for this delegate.
:param painter | <QPainter>
option | <QStyleOption>
rect | <QRect>
text | <str> |
def composition_prediction(self, composition, to_this_composition=True):
preds = self.list_prediction(list(composition.keys()),
to_this_composition)
output = []
for p in preds:
if to_this_composition:
subs = {v: k for k, v in p[].items()}
else:
subs = p[]
charge = 0
for k, v in composition.items():
charge += subs[k].oxi_state * v
if abs(charge) < 1e-8:
output.append(p)
logging.info(
.format(len(output)))
return output | Returns charged balanced substitutions from a starting or ending
composition.
Args:
composition:
starting or ending composition
to_this_composition:
If true, substitutions with this as a final composition
will be found. If false, substitutions with this as a
starting composition will be found (these are slightly
different)
Returns:
List of predictions in the form of dictionaries.
If to_this_composition is true, the values of the dictionary
will be from the list species. If false, the keys will be
from that list. |
def add_patch(self, *args, **kwargs):
return self.add_route(hdrs.METH_PATCH, *args, **kwargs) | Shortcut for add_route with method PATCH |
def streaming_to_client():
for handler in client_logger.handlers:
if hasattr(handler, ):
break
else:
handler = None
old_propagate = client_logger.propagate
client_logger.propagate = False
if handler is not None:
old_append = handler.append_newlines
handler.append_newlines = False
yield
client_logger.propagate = old_propagate
if handler is not None:
handler.append_newlines = old_append | Puts the client logger into streaming mode, which sends
unbuffered input through to the socket one character at a time.
We also disable propagation so the root logger does not
receive many one-byte emissions. This context handler
was originally created for streaming Compose up's
terminal output through to the client and should only be
used for similarly complex circumstances. |
def set_default_prediction_value(self, values):
if type(values) is not list:
values = [float(values)]
self.tree_parameters.numPredictionDimensions = len(values)
for value in values:
self.tree_parameters.basePredictionValue.append(value) | Set the default prediction value(s).
The values given here form the base prediction value that the values
at activated leaves are added to. If values is a scalar, then
the output of the tree must also be 1 dimensional; otherwise, values
must be a list with length matching the dimension of values in the tree.
Parameters
----------
values: [int | double | list[double]]
Default values for predictions. |
def format(self):
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FORMAT) | Return the format attribute of the BFD file being processed. |
def process_get(self):
self.gen_lt()
if not self.request.session.get("authenticated") or self.renew:
self.init_form()
return self.USER_NOT_AUTHENTICATED
return self.USER_AUTHENTICATED | Analyse the GET request
:return:
* :attr:`USER_NOT_AUTHENTICATED` if the user is not authenticated or is requesting
for authentication renewal
* :attr:`USER_AUTHENTICATED` if the user is authenticated and is not requesting
for authentication renewal
:rtype: int |
def add_status_message(self, message, severity="info"):
self.context.plone_utils.addPortalMessage(message, severity) | Set a portal message |
def get_stp_mst_detail_output_cist_cist_reg_root_id(self, **kwargs):
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
cist_reg_root_id = ET.SubElement(cist, "cist-reg-root-id")
cist_reg_root_id.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
def ip_unnumbered(self, **kwargs):
kwargs[] = kwargs.pop()
kwargs[] = kwargs.pop()
kwargs[] = kwargs.pop(, False)
callback = kwargs.pop(, self._callback)
valid_int_types = [, ,
, ]
if kwargs[] not in valid_int_types:
raise ValueError( %
repr(valid_int_types))
unnumbered_type = self._ip_unnumbered_type(**kwargs)
unnumbered_name = self._ip_unnumbered_name(**kwargs)
if kwargs.pop(, False):
return self._get_ip_unnumbered(unnumbered_type, unnumbered_name)
config = pynos.utilities.merge_xml(unnumbered_type, unnumbered_name)
return callback(config) | Configure an unnumbered interface.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet etc).
name (str): Name of interface id.
(For interface: 1/0/5, 1/0/10 etc).
delete (bool): True is the IP address is added and False if its to
be deleted (True, False). Default value will be False if not
specified.
donor_type (str): Interface type of the donor interface.
donor_name (str): Interface name of the donor interface.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `donor_type`, or `donor_name` is
not passed.
ValueError: if `int_type`, `name`, `donor_type`, or `donor_name`
are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ip_address(int_type='loopback',
... name='1', ip_addr='4.4.4.4/32', rbridge_id='230')
... int_type = 'tengigabitethernet'
... name = '230/0/20'
... donor_type = 'loopback'
... donor_name = '1'
... output = dev.interface.disable_switchport(inter_type=
... int_type, inter=name)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name,
... get=True)
... output = dev.interface.ip_unnumbered(int_type=int_type,
... name=name, donor_type=donor_type, donor_name=donor_name,
... delete=True)
... output = dev.interface.ip_address(int_type='loopback',
... name='1', ip_addr='4.4.4.4/32', rbridge_id='230',
... delete=True)
... output = dev.interface.ip_unnumbered(int_type='hodor',
... donor_name=donor_name, donor_type=donor_type, name=name)
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError |
def plot_site(fignum, SiteRec, data, key):
print()
print()
print(SiteRec[], SiteRec[], SiteRec[], SiteRec[], SiteRec[],
SiteRec[], SiteRec[], SiteRec[], SiteRec[])
print()
for i in range(len(data)):
print( % (data[i][ + key + ], data[i][key + ], data[i]
[key + ], data[i][key + ], data[i][key + ], data[i][]))
plot_slnp(fignum, SiteRec, data, key)
plot = input("s[a]ve plot, [q]uit or <return> to continue: ")
if plot == :
print("CUL8R")
sys.exit()
if plot == :
files = {}
for key in list(EQ.keys()):
files[key] = site + + key + + fmt
save_plots(EQ, files) | deprecated (used in ipmag) |
def start(io_loop=None, check_time=2):
io_loop = io_loop or asyncio.get_event_loop()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
logger.warning("aiohttp_autoreload started more than once in the same process")
modify_times = {}
callback = functools.partial(_reload_on_update, modify_times)
logger.debug("Starting periodic checks for code changes")
call_periodic(check_time, callback, loop=io_loop) | Begins watching source files for changes.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated. |
def get_spec(self):
if _mac_ver() >= (10, 14):
return self.vggish_model.get_spec()
else:
vggish_model_file = VGGish()
coreml_model_path = vggish_model_file.get_model_path(format=)
return MLModel(coreml_model_path).get_spec() | Return the Core ML spec |
def _split_string_to_tokens(text):
if not text:
return []
ret = []
token_start = 0
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
for pos in xrange(1, len(text)):
if is_alnum[pos] != is_alnum[pos - 1]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret | Splits text to a list of string tokens. |
def get_creation_date(
self,
bucket: str,
key: str,
) -> datetime.datetime:
blob_obj = self._get_blob_obj(bucket, key)
return blob_obj.time_created | Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date |
def get_rendered_objects(self):
objects = self.objects
if isinstance(objects, str):
objects = getattr(self.object, objects).all()
return [
self.get_rendered_object(obj)
for obj in objects
] | Render objects |
def mutex(self, mutex, **kwargs):
indicator_obj = Mutex(mutex, **kwargs)
return self._indicator(indicator_obj) | Add Mutex data to Batch object.
Args:
mutex (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Mutex. |
def dlogpdf_dlink_dvar(self, inv_link_f, y, Y_metadata=None):
e = y - inv_link_f
dlogpdf_dlink_dvar = (self.v*(self.v+1)*(-e))/((self.sigma2*self.v + e**2)**2)
return dlogpdf_dlink_dvar | Derivative of the dlogpdf_dlink w.r.t variance parameter (t_noise)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{df}) = \\frac{-2\\sigma v(v + 1)(y_{i}-\lambda(f_{i}))}{(y_{i}-\lambda(f_{i}))^2 + \\sigma^2 v)^2}
:param inv_link_f: latent variables inv_link_f
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: Nx1 array |
def get_port_profile_for_intf_output_has_more(self, **kwargs):
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
def get_cs_archs(self):
cs_archs = {
: (CS_ARCH_X86, CS_MODE_16),
: (CS_ARCH_X86, CS_MODE_32),
: (CS_ARCH_X86, CS_MODE_64),
: (CS_ARCH_ARM, CS_MODE_ARM),
: (CS_ARCH_ARM, CS_MODE_THUMB),
: (CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN),
: (CS_ARCH_MIPS, CS_MODE_MIPS32),
: (CS_ARCH_MIPS, CS_MODE_MIPS64),
}
return cs_archs | capstone disassembler |
def _all_tables_present(self, txn):
conn = txn.connect()
for table_name in asset_db_table_names:
if txn.dialect.has_table(conn, table_name):
return True
return False | Checks if any tables are present in the current assets database.
Parameters
----------
txn : Transaction
The open transaction to check in.
Returns
-------
has_tables : bool
True if any tables are present, otherwise False. |
def get_string(self):
return self.left.chr++str(self.left.end)++self.right.chr++str(self.right.start) | A string representation of the junction
:return: string represnetation
:rtype: string |
def display(surface):
screen = pygame.display.get_surface()
screen.blit(surface, (0, 0))
pygame.display.flip() | Displays a pygame.Surface in the window.
in pygame the window is represented through a surface, on which you can draw
as on any other pygame.Surface. A refernce to to the screen can be optained
via the :py:func:`pygame.display.get_surface` function. To display the
contents of the screen surface in the window :py:func:`pygame.display.flip`
needs to be called.
:py:func:`display` draws the surface onto the screen surface at the postion
(0, 0), and then calls :py:func:`flip`.
:param surface: the pygame.Surface to display
:type surface: pygame.Surface |
def get_seqstr(config, metadata):
seq_abbrs = metadata.get(, ).split()
seqs = [config[].get(seq, seq) for seq in seq_abbrs]
variants = [config[].get(var, var) for var in \
metadata.get(, ).split()]
seqs = list_to_str(seqs)
if seq_abbrs[0]:
seqs += .format(os.path.sep.join(seq_abbrs))
variants = list_to_str(variants)
return seqs, variants | Extract and reformat imaging sequence(s) and variant(s) into pretty
strings.
Parameters
----------
config : :obj:`dict`
A dictionary with relevant information regarding sequences, sequence
variants, phase encoding directions, and task names.
metadata : :obj:`dict`
The metadata for the scan.
Returns
-------
seqs : :obj:`str`
Sequence names.
variants : :obj:`str`
Sequence variant names. |
def _default_read_frame(self, *, frame=None, mpkit=None):
from pcapkit.toolkit.default import (ipv4_reassembly, ipv6_reassembly,
tcp_reassembly, tcp_traceflow)
if not self._flag_m:
frame = Frame(self._ifile, num=self._frnum+1, proto=self._dlink,
layer=self._exlyr, protocol=self._exptl, nanosecond=self._nnsec)
self._frnum += 1
if self._flag_v:
print(f)
frnum = f
if not self._flag_q:
if self._flag_f:
ofile = self._ofile(f)
ofile(frame.info, name=frnum)
else:
self._ofile(frame.info, name=frnum)
if self._ipv4:
flag, data = ipv4_reassembly(frame)
if flag:
self._reasm[0](data)
if self._ipv6:
flag, data = ipv6_reassembly(frame)
if flag:
self._reasm[1](data)
if self._tcp:
flag, data = tcp_reassembly(frame)
if flag:
self._reasm[2](data)
if self._flag_t:
flag, data = tcp_traceflow(frame, data_link=self._dlink)
if flag:
self._trace(data)
if self._exeng == :
if self._flag_d:
mpkit.frames[self._frnum] = frame
mpkit.current += 1
elif self._exeng == :
if self._flag_d:
self._frame.append(frame)
self._frnum += 1
else:
if self._flag_d:
self._frame.append(frame)
self._proto = frame.protochain.chain
return frame | Read frames with default engine.
- Extract frames and each layer of packets.
- Make Info object out of frame properties.
- Append Info.
- Write plist & append Info. |
async def profile(self, ctx, tag):
if not self.check_valid_tag(tag):
return await ctx.send()
profile = await self.cr.get_profile(tag)
em = discord.Embed(color=0x00FFFFF)
em.set_author(name=str(profile), icon_url=profile.clan_badge_url)
em.set_thumbnail(url=profile.arena.badge_url)
for attr in self.cdir(profile):
value = getattr(profile, attr)
if not callable(value):
em.add_field(
name=attr.replace().title(),
value=str(value)
)
await ctx.send(embed=em) | Example command for use inside a discord bot cog. |
def get_group_details(group):
result = []
for datastore in _get_datastores():
value = datastore.get_group_details(group)
value[] = datastore.config[]
result.append(value)
return result | Get group details. |
def remove(self, resource):
if isinstance(resource, Resource):
self._resources.remove(resource) | Removes a resource from the context |
def _factln(num):
if num < 20:
log_factorial = log(factorial(num))
else:
log_factorial = num * log(num) - num + log(num * (1 + 4 * num * (
1 + 2 * num))) / 6.0 + log(pi) / 2
return log_factorial | Computes logfactorial regularly for tractable numbers, uses Ramanujans approximation otherwise. |
def keyword(
name: str,
ns: Optional[str] = None,
kw_cache: atom.Atom["PMap[int, Keyword]"] = __INTERN,
) -> Keyword:
h = hash((name, ns))
return kw_cache.swap(__get_or_create, h, name, ns)[h] | Create a new keyword. |
def insert_all(db, schema_name, table_name, columns, items):
table = .format(schema_name, table_name) if schema_name else table_name
columns_list = .join(columns)
values_list = .join([] * len(columns))
query = .format(
table=table, columns=columns_list, values=values_list)
for item in items:
values = [getattr(item, col) for col in columns]
db.execute(query, values) | Insert all item in given items list into the specified table, schema_name.table_name. |
def train(cls, new_data, old=None):
if not len(new_data):
return old
if not hasattr(new_data, ):
new_data = np.asarray(new_data)
if new_data.dtype.kind not in CONTINUOUS_KINDS:
raise TypeError(
"Discrete value supplied to continuous scale")
if old is not None:
new_data = np.hstack([new_data, old])
return min_max(new_data, na_rm=True, finite=True) | Train a continuous scale
Parameters
----------
new_data : array_like
New values
old : array_like
Old range. Most likely a tuple of length 2.
Returns
-------
out : tuple
Limits(range) of the scale |
def resample(self, seed=None):
if seed is not None:
gen = torch.manual_seed(seed)
else:
gen = torch.default_generator
if self.replacement:
self.perm = torch.LongTensor(len(self)).random_(
len(self.dataset), generator=gen)
else:
self.perm = torch.randperm(
len(self.dataset), generator=gen).narrow(0, 0, len(self)) | Resample the dataset.
Args:
seed (int, optional): Seed for resampling. By default no seed is
used. |
def get_activities_by_query(self, activity_query=None):
url_path = construct_url(,
bank_id=self._catalog_idstr)
query_terms = [v for k, v in activity_query._query_terms.items()]
url_path += + .join(query_terms)
objects.ActivityList(self._get_request(url_path)) | Gets a list of Activities matching the given activity query.
arg: activityQuery (osid.learning.ActivityQuery): the
activity query
return: (osid.learning.ActivityList) - the returned ActivityList
raise: NullArgument - activityQuery is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - activityQuery is not of this service
compliance: mandatory - This method must be implemented. |
def read_config(config):
for line in config.splitlines():
line = line.lstrip()
if line and not line.startswith("
return line
return "" | Read config file and return uncomment line |
def get_item_lookup_session(self):
if not self.supports_item_lookup():
raise errors.Unimplemented()
return sessions.ItemLookupSession(runtime=self._runtime) | Gets the ``OsidSession`` associated with the item lookup service.
return: (osid.assessment.ItemLookupSession) - an
``ItemLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_lookup()`` is ``true``.* |
def Region2_cp0(Tr, Pr):
Jo = [0, 1, -5, -4, -3, -2, -1, 2, 3]
no = [-0.96927686500217E+01, 0.10086655968018E+02, -0.56087911283020E-02,
0.71452738081455E-01, -0.40710498223928E+00, 0.14240819171444E+01,
-0.43839511319450E+01, -0.28408632460772E+00, 0.21268463753307E-01]
go = log(Pr)
gop = Pr**-1
gopp = -Pr**-2
got = gott = gopt = 0
for j, ni in zip(Jo, no):
go += ni * Tr**j
got += ni*j * Tr**(j-1)
gott += ni*j*(j-1) * Tr**(j-2)
return go, gop, gopp, got, gott, gopt | Ideal properties for Region 2
Parameters
----------
Tr : float
Reduced temperature, [-]
Pr : float
Reduced pressure, [-]
Returns
-------
prop : array
Array with ideal Gibbs energy partial derivatives:
* g: Ideal Specific Gibbs energy [kJ/kg]
* gp: ∂g/∂P|T
* gpp: ∂²g/∂P²|T
* gt: ∂g/∂T|P
* gtt: ∂²g/∂T²|P
* gpt: ∂²g/∂T∂P
References
----------
IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the
Thermodynamic Properties of Water and Steam August 2007,
http://www.iapws.org/relguide/IF97-Rev.html, Eq 16 |
def scheduleServices(self, jobGraph):
self.jobGraphsWithServicesBeingStarted.add(jobGraph)
self.jobsIssuedToServiceManager += sum(map(len, jobGraph.services)) + 1
self._jobGraphsWithServicesToStart.put(jobGraph) | Schedule the services of a job asynchronously.
When the job's services are running the jobGraph for the job will
be returned by toil.leader.ServiceManager.getJobGraphsWhoseServicesAreRunning.
:param toil.jobGraph.JobGraph jobGraph: wrapper of job with services to schedule. |
def get_search_score(query, choice, ignore_case=True, apply_regex=True,
template=):
original_choice = choice
result = (original_choice, NOT_FOUND_SCORE)
if not query:
return result
if ignore_case:
query = query.lower()
choice = choice.lower()
if apply_regex:
pattern = get_search_regex(query, ignore_case=ignore_case)
r = re.search(pattern, choice)
if r is None:
return result
else:
sep = u
let = u
score = 0
exact_words = [query == word for word in choice.split(u)]
partial_words = [query in word for word in choice.split(u)]
if any(exact_words) or any(partial_words):
pos_start = choice.find(query)
pos_end = pos_start + len(query)
score += pos_start
text = choice.replace(query, sep*len(query), 1)
enriched_text = original_choice[:pos_start] +\
template.format(original_choice[pos_start:pos_end]) +\
original_choice[pos_end:]
if any(exact_words):
score += 1
elif any(partial_words):
score += 100
else:
text = [l for l in original_choice]
if ignore_case:
temp_text = [l.lower() for l in original_choice]
else:
temp_text = text[:]
score += temp_text.index(query[0])
enriched_text = text[:]
for char in query:
if char != u and char in temp_text:
index = temp_text.index(char)
enriched_text[index] = template.format(text[index])
text[index] = sep
temp_text = [u]*(index + 1) + temp_text[index+1:]
enriched_text = u.join(enriched_text)
patterns_text = []
for i, char in enumerate(text):
if char != u and char != sep:
new_char = let
else:
new_char = char
patterns_text.append(new_char)
patterns_text = u.join(patterns_text)
for i in reversed(range(1, len(query) + 1)):
score += (len(query) - patterns_text.count(sep*i))*100000
temp = patterns_text.split(sep)
while u in temp:
temp.remove(u)
if not patterns_text.startswith(sep):
temp = temp[1:]
if not patterns_text.endswith(sep):
temp = temp[:-1]
for pat in temp:
score += pat.count(u)*10000
score += pat.count(let)*100
return original_choice, enriched_text, score | Returns a tuple with the enriched text (if a template is provided) and
a score for the match.
Parameters
----------
query : str
String with letters to search in choice (in order of appearance).
choice : str
Sentence/words in which to search for the 'query' letters.
ignore_case : bool, optional
Optional value perform a case insensitive search (True by default).
apply_regex : bool, optional
Optional value (True by default) to perform a regex search. Useful
when this function is called directly.
template : str, optional
Optional template string to surround letters found in choices. This is
useful when using a rich text editor ('{}' by default).
Examples: '<b>{}</b>', '<code>{}</code>', '<i>{}</i>'
Returns
-------
results : tuple
Tuples where the first item is the text (enriched if a template was
used) and the second item is a search score.
Notes
-----
The score is given according the following precedence (high to low):
- Letters in one word and no spaces with exact match.
Example: 'up' in 'up stroke'
- Letters in one word and no spaces with partial match.
Example: 'up' in 'upstream stroke'
- Letters in one word but with skip letters.
Example: 'cls' in 'close up'
- Letters in two or more words
Example: 'cls' in 'car lost' |
def scale_edges(self, multiplier):
if not isinstance(multiplier,int) and not isinstance(multiplier,float):
raise TypeError("multiplier must be an int or float")
for node in self.traverse_preorder():
if node.edge_length is not None:
node.edge_length *= multiplier | Multiply all edges in this ``Tree`` by ``multiplier`` |
def purity(rho: Density) -> bk.BKTensor:
tensor = rho.tensor
N = rho.qubit_nb
matrix = bk.reshape(tensor, [2**N, 2**N])
return bk.trace(bk.matmul(matrix, matrix)) | Calculate the purity of a mixed quantum state.
Purity, defined as tr(rho^2), has an upper bound of 1 for a pure state,
and a lower bound of 1/D (where D is the Hilbert space dimension) for a
competently mixed state.
Two closely related measures are the linear entropy, 1- purity, and the
participation ratio, 1/purity. |
def set_attributes(self, obj, **attributes):
attributes_url = .format(self.session_url, obj.ref)
attributes_list = [{u: str(name), u: str(value)} for name, value in attributes.items()]
self._request(RestMethod.patch, attributes_url, headers={: },
data=json.dumps(attributes_list)) | Set attributes.
:param obj: requested object.
:param attributes: dictionary of {attribute: value} to set |
def from_dict(cls, data):
return cls(
auth_class_ref=data.get("auth_class_ref"),
timestamp=data.get("timestamp"),
issuer=data.get("issuer"),
) | :type data: dict[str, str]
:rtype: satosa.internal.AuthenticationInformation
:param data: A dict representation of an AuthenticationInformation object
:return: An AuthenticationInformation object |
def save(self):
with open(self.series_file, "wb") as f:
for patchline in self.patchlines:
f.write(_encode_str(str(patchline)))
f.write(b"\n") | Saves current patches list in the series file |
def delete_entitlement(owner, repo, identifier):
client = get_entitlements_api()
with catch_raise_api_exception():
_, _, headers = client.entitlements_delete_with_http_info(
owner=owner, repo=repo, identifier=identifier
)
ratelimits.maybe_rate_limit(client, headers) | Delete an entitlement from a repository. |
def _conv_general_permutations(self, dimension_numbers):
lhs_spec, rhs_spec, out_spec = dimension_numbers
lhs_char, rhs_char, out_char = (, ), (, ), (, )
charpairs = (lhs_char, rhs_char, out_char)
for i, (a, b) in enumerate(charpairs):
if not (dimension_numbers[i].count(a) == 1 and
dimension_numbers[i].count(b) == 1):
msg = (
)
raise TypeError(msg.format(i, a, b, dimension_numbers[i]))
if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):
msg = (
)
raise TypeError(msg.format(i, dimension_numbers[i]))
if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==
set(out_spec) - set(out_char)):
msg = (
)
raise TypeError(msg.format(dimension_numbers))
def getperm(spec, charpair):
spatial = (i for i, c in enumerate(spec) if c not in charpair)
if spec is not rhs_spec:
spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))
return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)
lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)
return lhs_perm, rhs_perm, out_perm | Utility for convolution dimension permutations relative to Conv HLO. |
def get_namespaced_custom_object_status(self, group, version, namespace, plural, name, **kwargs):
kwargs[] = True
if kwargs.get():
return self.get_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, **kwargs)
else:
(data) = self.get_namespaced_custom_object_status_with_http_info(group, version, namespace, plural, name, **kwargs)
return data | get_namespaced_custom_object_status # noqa: E501
read status of the specified namespace scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_namespaced_custom_object_status(group, version, namespace, plural, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:return: object
If the method is called asynchronously,
returns the request thread. |
def load(self, file, **options):
section = options.pop(, self.__class__.__name__)
parser = ConfigParser()
parser.read(file)
if parser.has_section(section):
verbose(options, "[{0}]".format(section))
for field_path, field in self.field_items(**options):
if field_path.startswith():
option = + field_path
else:
option = field_path
if parser.has_option(section, option):
if field.is_bool():
field.value = parser.getboolean(section, option)
elif field.is_float():
field.value = parser.getfloat(section, option)
elif field.is_string():
field.value = parser.get(section, option)
elif field.is_stream():
value = parser.get(section, option)
stream = bytes.fromhex(value.replace("['):
verbose(options,
"{0}{1} = {2}".format(section,
field_path,
field.value))
else:
verbose(options,
"{0}.{1} = {2}".format(section,
field_path,
field.value))
else:
verbose(options, "No section [{0}] found.".format(section)) | Loads the field *value* for each :class:`Field` *nested* in the
`Container` from an ``.ini`` *file*.
:param str file: name and location of the ``.ini`` *file*.
:keyword str section: section in the ``.ini`` *file* to lookup the
value for each :class:`Field` in the `Container`.
If no *section* is specified the class name of the instance is used.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
`Container` load their referenced :attr:`~Pointer.data` object
field valus as well (chained method call).
:keyword bool verbose: if ``True`` the loading is executed in verbose
mode.
File `foo.ini`:
.. code-block:: ini
[Foo]
stream =
float = 0.0
structure.decimal = 0
array[0] = 0x0
array[1] = 0x0
array[2] = 0x0
pointer = 0x0
Example:
>>> class Foo(Structure):
... def __init__(self):
... super().__init__()
... self.stream = Stream()
... self.float = Float()
... self.structure = Structure()
... self.structure.decimal = Decimal(8)
... self.array = Array(Byte, 3)
... self.pointer = Pointer()
>>> foo = Foo()
>>> foo.load('foo.ini')
[Foo]
Foo.stream =
Foo.float = 0.0
Foo.structure.decimal = 0
Foo.array[0] = 0x0
Foo.array[1] = 0x0
Foo.array[2] = 0x0
Foo.pointer = 0x0
>>> foo.to_list(nested=True)
[('Foo.stream', ''),
('Foo.float', 0.0),
('Foo.structure.decimal', 0),
('Foo.array[0]', '0x0'),
('Foo.array[1]', '0x0'),
('Foo.array[2]', '0x0'),
('Foo.pointer', '0x0')]
>>> foo.to_json(nested=True)
'{"stream": "",
"float": 0.0,
"structure": {"decimal": 0},
"array": ["0x0", "0x0", "0x0"],
"pointer": {"value": "0x0",
"data": null}}' |
def parse(self, configManager, config):
parser = ConfigParser.RawConfigParser()
configOptions = dict()
configFile = self._getConfigFile(config)
if configFile:
parser.readfp(configFile)
for section in parser.sections():
if self.sections is None or section in self.sections:
configOptions.update(parser.items(section))
return configOptions | Parse configuration options out of an .ini configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object. |
def _get_loftee(data):
ancestral_file = tz.get_in(("genome_resources", "variation", "ancestral"), data)
if not ancestral_file or not os.path.exists(ancestral_file):
ancestral_file = "false"
vep = config_utils.get_program("vep", data["config"])
args = ["--plugin", "LoF,human_ancestor_fa:%s,loftee_path:%s" %
(ancestral_file, os.path.dirname(os.path.realpath(vep)))]
return args | Retrieve loss of function plugin parameters for LOFTEE.
https://github.com/konradjk/loftee |
def _is_not_pickle_safe_gl_class(obj_class):
gl_ds = [_SFrame, _SArray, _SGraph]
return (obj_class in gl_ds) or _is_not_pickle_safe_gl_model_class(obj_class) | Check if class is a Turi create model.
The function does it by checking the method resolution order (MRO) of the
class and verifies that _Model is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the class is a GLC Model. |
def joint_distances(self):
return [((np.array(j.anchor) - j.anchor2) ** 2).sum() for j in self.joints] | Get the current joint separations for the skeleton.
Returns
-------
distances : list of float
A list expressing the distance between the two joint anchor points,
for each joint in the skeleton. These quantities describe how
"exploded" the bodies in the skeleton are; a value of 0 indicates
that the constraints are perfectly satisfied for that joint. |
def find_uuid(es_url, index):
uid_field = None
res = requests.get( % (es_url, index))
first_item = res.json()[][][0][]
fields = first_item.keys()
if in fields:
uid_field =
else:
uuid_value = res.json()[][][0][]
logging.debug("Finding unique id for %s with value %s", index, uuid_value)
for field in fields:
if first_item[field] == uuid_value:
logging.debug("Found unique id for %s: %s", index, field)
uid_field = field
break
if not uid_field:
logging.error("Can not find uid field for %s. Can not copy the index.", index)
logging.error("Try to copy it directly with elasticdump or similar.")
sys.exit(1)
return uid_field | Find the unique identifier field for a given index |
def p_class_constant_declaration(p):
if len(p) == 6:
p[0] = p[1] + [ast.ClassConstant(p[3], p[5], lineno=p.lineno(2))]
else:
p[0] = [ast.ClassConstant(p[2], p[4], lineno=p.lineno(1))] | class_constant_declaration : class_constant_declaration COMMA STRING EQUALS static_scalar
| CONST STRING EQUALS static_scalar |
def _add_cat_dict(self,
cat_dict_class,
key_in_self,
check_for_dupes=True,
compare_to_existing=True,
**kwargs):
if cat_dict_class != Error:
try:
source = self._check_cat_dict_source(cat_dict_class,
key_in_self, **kwargs)
except CatDictError as err:
if err.warn:
self._log.info(" Not adding : ".format(self[
self._KEYS.NAME], key_in_self, str(err)))
return False
if source is None:
return False
new_entry = self._init_cat_dict(cat_dict_class, key_in_self, **kwargs)
if new_entry is None:
return False
if compare_to_existing and cat_dict_class != Error:
for item in self.get(key_in_self, []):
if new_entry.is_duplicate_of(item):
item.append_sources_from(new_entry)
return new_entry
self.catalog.aliases[new_entry[QUANTITY.VALUE]] = self[
self._KEYS.NAME]
self.setdefault(key_in_self, []).append(new_entry)
if (key_in_self == self._KEYS.ALIAS and check_for_dupes and
self.dupe_of):
self.merge_dupes()
return True | Add a `CatDict` to this `Entry`.
CatDict only added if initialization succeeds and it
doesn't already exist within the Entry. |
def _is_empty_observation_data(
feature_ndims, observation_index_points, observations):
if observation_index_points is None and observations is None:
return True
num_obs = tf.compat.dimension_value(
observation_index_points.shape[-(feature_ndims + 1)])
if num_obs is not None and num_obs == 0:
return True
return False | Returns `True` if given observation data is empty.
Emptiness means either
1. Both `observation_index_points` and `observations` are `None`, or
2. the "number of observations" shape is 0. The shape of
`observation_index_points` is `[..., N, f1, ..., fF]`, where `N` is the
number of observations and the `f`s are feature dims. Thus, we look at the
shape element just to the left of the leftmost feature dim. If that shape is
zero, we consider the data empty.
We don't check the shape of observations; validations are checked elsewhere in
the calling code, to ensure these shapes are consistent.
Args:
feature_ndims: the number of feature dims, as reported by the GP kernel.
observation_index_points: the observation data locations in the index set.
observations: the observation data.
Returns:
is_empty: True if the data were deemed to be empty. |
def parse_minionqc_report(self, s_name, f):
s report file for results.
Uses only the "All reads" stats. Ignores "Q>=x" part.
Q>=allAll readsq_filtallq_filtreadsgigabases{} {}allq_filt']
self.q_threshold_list.add(q_threshold) | Parses minionqc's 'summary.yaml' report file for results.
Uses only the "All reads" stats. Ignores "Q>=x" part. |
def isSane(self):
if self.host == :
return True
host_parts = self.host.split()
if self.wildcard:
assert host_parts[0] == , host_parts
del host_parts[0]
return len(host_parts) > 2
return True | This method checks the to see if a trust root represents a
reasonable (sane) set of URLs. 'http://*.com/', for example
is not a reasonable pattern, as it cannot meaningfully specify
the site claiming it. This function attempts to find many
related examples, but it can only work via heuristics.
Negative responses from this method should be treated as
advisory, used only to alert the user to examine the trust
root carefully.
@return: Whether the trust root is sane
@rtype: C{bool} |
def invoice(request, invoice_id, access_code=None):
current_invoice = InvoiceController.for_id_or_404(invoice_id)
if not current_invoice.can_view(
user=request.user,
access_code=access_code,
):
raise Http404()
data = {
"invoice": current_invoice.invoice,
}
return render(request, "registrasion/invoice.html", data) | Displays an invoice.
This view is not authenticated, but it will only allow access to either:
the user the invoice belongs to; staff; or a request made with the correct
access code.
Arguments:
invoice_id (castable to int): The invoice_id for the invoice you want
to view.
access_code (Optional[str]): The access code for the user who owns
this invoice.
Returns:
render:
Renders ``registrasion/invoice.html``, with the following
data::
{
"invoice": models.commerce.Invoice(),
}
Raises:
Http404: if the current user cannot view this invoice and the correct
access_code is not provided. |
def get_cache_item(self):
t been set.Caching disabled in DEBUG modetemplate_cache_key']) | Gets the cached item. Raises AttributeError if it hasn't been set. |
def removeUnreferencedElements(doc, keepDefs):
global _num_elements_removed
num = 0
removeTags = [, , ]
identifiedElements = findElementsWithId(doc.documentElement)
referencedIDs = findReferencedElements(doc.documentElement)
for id in identifiedElements:
if id not in referencedIDs:
goner = identifiedElements[id]
if (goner is not None and goner.nodeName in removeTags
and goner.parentNode is not None
and goner.parentNode.tagName != ):
goner.parentNode.removeChild(goner)
num += 1
_num_elements_removed += 1
if not keepDefs:
defs = doc.documentElement.getElementsByTagName()
for aDef in defs:
elemsToRemove = removeUnusedDefs(doc, aDef)
for elem in elemsToRemove:
elem.parentNode.removeChild(elem)
_num_elements_removed += 1
num += 1
return num | Removes all unreferenced elements except for <svg>, <font>, <metadata>, <title>, and <desc>.
Also vacuums the defs of any non-referenced renderable elements.
Returns the number of unreferenced elements removed from the document. |
def power(self, n):
if n > 0:
return super().power(n)
return Kraus(SuperOp(self).power(n)) | The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Kraus: the matrix power of the SuperOp converted to a Kraus channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer. |
def get_filterbanks(nfilt=20,nfft=512,samplerate=16000,lowfreq=0,highfreq=None):
highfreq= highfreq or samplerate/2
assert highfreq <= samplerate/2, "highfreq is greater than samplerate/2"
lowmel = hz2mel(lowfreq)
highmel = hz2mel(highfreq)
melpoints = numpy.linspace(lowmel,highmel,nfilt+2)
bin = numpy.floor((nfft+1)*mel2hz(melpoints)/samplerate)
fbank = numpy.zeros([nfilt,nfft//2+1])
for j in range(0,nfilt):
for i in range(int(bin[j]), int(bin[j+1])):
fbank[j,i] = (i - bin[j]) / (bin[j+1]-bin[j])
for i in range(int(bin[j+1]), int(bin[j+2])):
fbank[j,i] = (bin[j+2]-i) / (bin[j+2]-bin[j+1])
return fbank | Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond
to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1)
:param nfilt: the number of filters in the filterbank, default 20.
:param nfft: the FFT size. Default is 512.
:param samplerate: the sample rate of the signal we are working with, in Hz. Affects mel spacing.
:param lowfreq: lowest band edge of mel filters, default 0 Hz
:param highfreq: highest band edge of mel filters, default samplerate/2
:returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter. |
def _can_connect(host, port=22):
try:
logger.debug(, host)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host,
port=port)
client.close()
logger.info(, host)
return True
except Exception as e:
logger.info(, host)
logger.info(, str(e))
return False | Checks if the connection to provided ``host`` and ``port`` is possible or not.
Args:
host (str): Hostname for the host to check connection.
port (int): Port name of the host to check connection on. |
def auto_constraints(self, component=None):
if not component:
for table in self.tables:
self.auto_constraints(table)
return
if not component.tableSchema.primaryKey:
idcol = component.get_column(term_uri())
if idcol:
component.tableSchema.primaryKey = [idcol.name]
self._auto_foreign_keys(component)
try:
table_type = self.get_tabletype(component)
except ValueError:
return
for table in self.tables:
self._auto_foreign_keys(table, component=component, table_type=table_type) | Use CLDF reference properties to implicitely create foreign key constraints.
:param component: A Table object or `None`. |
def value_compare(left, right, ordering=1):
try:
ltype = left.__class__
rtype = right.__class__
if ltype in list_types or rtype in list_types:
if left == None:
return ordering
elif right == None:
return - ordering
left = listwrap(left)
right = listwrap(right)
for a, b in zip(left, right):
c = value_compare(a, b) * ordering
if c != 0:
return c
if len(left) < len(right):
return - ordering
elif len(left) > len(right):
return ordering
else:
return 0
if ltype is float and isnan(left):
left = None
ltype = none_type
if rtype is float and isnan(right):
right = None
rtype = none_type
null_order = ordering*10
ltype_num = TYPE_ORDER.get(ltype, null_order)
rtype_num = TYPE_ORDER.get(rtype, null_order)
type_diff = ltype_num - rtype_num
if type_diff != 0:
return ordering if type_diff > 0 else -ordering
if ltype_num == null_order:
return 0
elif ltype is builtin_tuple:
for a, b in zip(left, right):
c = value_compare(a, b)
if c != 0:
return c * ordering
return 0
elif ltype in data_types:
for k in sorted(set(left.keys()) | set(right.keys())):
c = value_compare(left.get(k), right.get(k)) * ordering
if c != 0:
return c
return 0
elif left > right:
return ordering
elif left < right:
return -ordering
else:
return 0
except Exception as e:
Log.error("Can not compare values {{left}} to {{right}}", left=left, right=right, cause=e) | SORT VALUES, NULL IS THE LEAST VALUE
:param left: LHS
:param right: RHS
:param ordering: (-1, 0, 1) TO AFFECT SORT ORDER
:return: The return value is negative if x < y, zero if x == y and strictly positive if x > y. |
def _get_audio_channels(self, audio_abs_path):
channel_num = int(
subprocess.check_output(
(
).format(audio_abs_path, "Channels"),
shell=True, universal_newlines=True).rstrip())
return channel_num | Parameters
----------
audio_abs_path : str
Returns
-------
channel_num : int |
def ctx() -> moderngl.Context:
win = window()
if not win.ctx:
raise RuntimeError("Attempting to get context before creation")
return win.ctx | ModernGL context |
def get_qpimage(self, idx):
if self._bgdata:
qpi = super(SeriesHdf5Qpimage, self).get_qpimage(idx)
else:
with self._qpseries() as qps:
qpi = qps.get_qpimage(index=idx).copy()
for key in self.meta_data:
qpi[key] = self.meta_data[key]
qpi["identifier"] = self.get_identifier(idx)
return qpi | Return background-corrected QPImage of data at index `idx` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.