code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def download_fastqs(self,dest_dir,barcode,overwrite=False):
fastq_props = self.get_fastq_files_props(barcode=barcode)
res = {}
for f in fastq_props:
props = fastq_props[f]
read_num = int(props["read"])
barcode = props["barcode"]
if barcode not in res:
res[barcode] = {}
name = props["fastq_file_name"]
filedest = os.path.abspath(os.path.join(dest_dir,name))
res[barcode][read_num] = filedest
if os.path.exists(filedest) and not overwrite:
continue
debug_logger.debug("Downloading FASTQ file {name} from DNAnexus project {project} to {path}.".format(name=name,project=self.dx_project_name,path=filedest))
dxpy.download_dxfile(f,filedest)
return res | Downloads all FASTQ files in the project that match the specified barcode, or if a barcode
isn't given, all FASTQ files as in this case it is assumed that this is not a multiplexed
experiment. Files are downloaded to the directory specified by dest_dir.
Args:
barcode: `str`. The barcode sequence used.
dest_dir: `str`. The local directory in which the FASTQs will be downloaded.
overwrite: `bool`. If True, then if the file to download already exists in dest_dir, the
file will be downloaded again, overwriting it. If False, the file will not be
downloaded again from DNAnexus.
Returns:
`dict`: The key is the barcode, and the value is a dict with integer keys of 1 for the
forward reads file, and 2 for the reverse reads file. If not paired-end,
Raises:
`Exception`: The barcode is specified and less than or greater than 2 FASTQ files are found. |
def wait(sec):
while sec > 0:
sys.stdout.write( + str(sec//60).zfill(1) + ":" +
str(sec % 60).zfill(2) + )
sec -= 1
time.sleep(1)
sys.stdout.write( + + ) | Prints a timer with the format 0:00 to the console,
and then clears the line when the timer is done |
def abu_profiles(p,ifig=1,xlm=xlm,ylm=(-8,0),show=False,abunds=,xaxis=xaxis_type, figsize1=(8,8)):
Allcommonly used
matplotlib.rc(,facecolor=,figsize=figsize1)
f, ([ax1,ax2],[ax3,ax4]) = pl.subplots(2, 2, sharex=False, sharey=True, figsize=figsize1)
all_isos=[[,,,,,,,,,,,,],[,,,,,,,,,,,,], [, ,, ,,,,, ,, , ,],
[,,,,,,,,,,,,]]
if abunds == :
abus=[[],[],[],[]]
j=0
for i, row in enumerate(all_isos):
for iso in row:
if iso in p.cols:
abus[i].append(iso)
j+=1
abus1=[]
abus2 =[[],[],[],[]]
for l in range(len(abus)):
for k in range(len(abus[l])):
abus1.append(abus[l][k])
is_small_isos = False
for i in range(len(abus)):
if len(abus[i]) < 5:
is_small_isos = True
print("Missing isotopes from the default list. Distributing the ones you have over the panels.")
if is_small_isos:
n=4
quo, rem = divmod(len(abus1), n)
for i in range(len(abus2)):
for k in range(i*quo,(i+1)*quo+rem):
abus2[i].append(abus1[k])
abus = abus2
else:
abus = abus
ax = [ax1,ax2,ax3,ax4]
xxx = p.get() if xaxis is "Eulerian" else p.get()
mass = p.get()
radius = p.get()*ast.rsun_cm/1.e8
if xaxis is "Eulerian":
xxx = radius
if xlm[0] == 0 and xlm[1] == 0:
indtop = 0
indbot = len(mass)-1
else:
indbot = np.where(radius>=xlm[0])[0][-1]
indtop = np.where(radius<xlm[1])[0][0]
xll = (radius[indbot],radius[indtop])
xxlabel = "Radius (Mm)"
elif xaxis is "Lagrangian":
xxx = mass
xll = xlm
xxlabel = "$M / \mathrm{M_{sun}}$"
else:
print("Error: donlog Xequalt =star_age dt =time_stepmodel number = model_numberabuprofmodel_number.png') | Four panels of abundance plots
Parameters
----------
p : instance
mesa_profile instance
xlm : tuple
xlimits: mass_min, mass_max
abus : 'All' plots many 'commonly used' isotopes up to Fe if they are in your mesa output.
otherwise provide a list of lists of desired abus
show : Boolean
False for batch use
True for interactive use
xaxis : character
Lagrangian mass is radial mass coordinate
Eulerian radius is radial coordinate, in Mm |
def prepare_venn_axes(ax, centers, radii):
ax.set_aspect()
ax.set_xticks([])
ax.set_yticks([])
min_x = min([centers[i][0] - radii[i] for i in range(len(radii))])
max_x = max([centers[i][0] + radii[i] for i in range(len(radii))])
min_y = min([centers[i][1] - radii[i] for i in range(len(radii))])
max_y = max([centers[i][1] + radii[i] for i in range(len(radii))])
ax.set_xlim([min_x - 0.1, max_x + 0.1])
ax.set_ylim([min_y - 0.1, max_y + 0.1])
ax.set_axis_off() | Sets properties of the axis object to suit venn plotting. I.e. hides ticks, makes proper xlim/ylim. |
def authenticate(self, provider, creds=None, cookies=None):
cookies = cookies or Cookies.global_instance()
if not provider:
raise BasicAuthException()
provider_config = self.get_options().providers.get(provider)
if not provider_config:
raise BasicAuthException(.format(provider))
url = provider_config.get()
if not url:
raise BasicAuthException(.format(provider))
if not self.get_options().allow_insecure_urls and not url.startswith():
raise BasicAuthException(.format(provider, url))
if creds:
auth = requests.auth.HTTPBasicAuth(creds.username, creds.password)
else:
auth = None
response = requests.get(url, auth=auth)
if response.status_code != requests.codes.ok:
if response.status_code == requests.codes.unauthorized:
parsed = www_authenticate.parse(response.headers.get(, ))
if in parsed:
raise Challenged(url, response.status_code, response.reason, parsed[][])
raise BasicAuthException(url, response.status_code, response.reason)
cookies.update(response.cookies) | Authenticate against the specified provider.
:param str provider: Authorize against this provider.
:param pants.auth.basic_auth.BasicAuthCreds creds: The creds to use.
If unspecified, assumes that creds are set in the netrc file.
:param pants.auth.cookies.Cookies cookies: Store the auth cookies in this instance.
If unspecified, uses the global instance.
:raises pants.auth.basic_auth.BasicAuthException: If auth fails due to misconfiguration or
rejection by the server. |
def p_type_list(self, p):
if p[1] is None:
p[0] = []
else:
p[1].append(p[2])
p[0] = p[1] | type_list : type_list type_def
| empty |
def strip_ansi_escape_codes(self, string_buffer):
log.debug("In strip_ansi_escape_codes")
log.debug("repr = {}".format(repr(string_buffer)))
code_position_cursor = chr(27) + r"\[\d+;\d+H"
code_show_cursor = chr(27) + r"\[\?25h"
code_next_line = chr(27) + r"E"
code_erase_line_end = chr(27) + r"\[K"
code_erase_line = chr(27) + r"\[2K"
code_erase_start_line = chr(27) + r"\[K"
code_enable_scroll = chr(27) + r"\[\d+;\d+r"
code_form_feed = chr(27) + r"\[1L"
code_carriage_return = chr(27) + r"\[1M"
code_disable_line_wrapping = chr(27) + r"\[\?7l"
code_reset_mode_screen_options = chr(27) + r"\[\?\d+l"
code_reset_graphics_mode = chr(27) + r"\[00m"
code_erase_display = chr(27) + r"\[2J"
code_graphics_mode = chr(27) + r"\[\d\d;\d\dm"
code_graphics_mode2 = chr(27) + r"\[\d\d;\d\d;\d\dm"
code_get_cursor_position = chr(27) + r"\[6n"
code_cursor_position = chr(27) + r"\[m"
code_erase_display = chr(27) + r"\[J"
code_attrs_off = chr(27) + r"[0m"
code_reverse = chr(27) + r"[7m"
code_set = [
code_position_cursor,
code_show_cursor,
code_erase_line,
code_enable_scroll,
code_erase_start_line,
code_form_feed,
code_carriage_return,
code_disable_line_wrapping,
code_erase_line_end,
code_reset_mode_screen_options,
code_reset_graphics_mode,
code_erase_display,
code_graphics_mode,
code_graphics_mode2,
code_get_cursor_position,
code_cursor_position,
code_erase_display,
code_attrs_off,
code_reverse,
]
output = string_buffer
for ansi_esc_code in code_set:
output = re.sub(ansi_esc_code, "", output)
output = re.sub(code_next_line, self.RETURN, output)
log.debug("new_output = {0}".format(output))
log.debug("repr = {0}".format(repr(output)))
return output | Remove any ANSI (VT100) ESC codes from the output
http://en.wikipedia.org/wiki/ANSI_escape_code
Note: this does not capture ALL possible ANSI Escape Codes only the ones
I have encountered
Current codes that are filtered:
ESC = '\x1b' or chr(27)
ESC = is the escape character [^ in hex ('\x1b')
ESC[24;27H Position cursor
ESC[?25h Show the cursor
ESC[E Next line (HP does ESC-E)
ESC[K Erase line from cursor to the end of line
ESC[2K Erase entire line
ESC[1;24r Enable scrolling from start to row end
ESC[?6l Reset mode screen with options 640 x 200 monochrome (graphics)
ESC[?7l Disable line wrapping
ESC[2J Code erase display
ESC[00;32m Color Green (30 to 37 are different colors) more general pattern is
ESC[\d\d;\d\dm and ESC[\d\d;\d\d;\d\dm
ESC[6n Get cursor position
HP ProCurve and Cisco SG300 require this (possible others).
:param string_buffer: The string to be processed to remove ANSI escape codes
:type string_buffer: str |
def getImportFromObjects(node):
somenames = [x.asname for x in node.names if x.asname]
othernames = [x.name for x in node.names if not x.asname]
return somenames+othernames | Returns a list of objects referenced by import from node |
def bind_key_name(self, function, object_name):
for funcname, name in self.name_map.items():
if funcname == function:
self.name_map[
funcname] = object_name | Bind a key to an object name |
def consume(callback, bindings=None, queues=None):
if isinstance(bindings, dict):
bindings = [bindings]
if bindings is None:
bindings = config.conf["bindings"]
else:
try:
config.validate_bindings(bindings)
except exceptions.ConfigurationException as e:
raise ValueError(e.message)
if queues is None:
queues = config.conf["queues"]
else:
try:
config.validate_queues(queues)
except exceptions.ConfigurationException as e:
raise ValueError(e.message)
session = _session.ConsumerSession()
session.consume(callback, bindings=bindings, queues=queues) | Start a message consumer that executes the provided callback when messages are
received.
This API is blocking and will not return until the process receives a signal
from the operating system.
.. warning:: This API is runs the callback in the IO loop thread. This means
if your callback could run for a length of time near the heartbeat interval,
which is likely on the order of 60 seconds, the broker will kill the TCP
connection and the message will be re-delivered on start-up.
For now, use the :func:`twisted_consume` API which runs the
callback in a thread and continues to handle AMQP events while the
callback runs if you have a long-running callback.
The callback receives a single positional argument, the message:
>>> from fedora_messaging import api
>>> def my_callback(message):
... print(message)
>>> bindings = [{'exchange': 'amq.topic', 'queue': 'demo', 'routing_keys': ['#']}]
>>> queues = {
... "demo": {"durable": False, "auto_delete": True, "exclusive": True, "arguments": {}}
... }
>>> api.consume(my_callback, bindings=bindings, queues=queues)
If the bindings and queue arguments are not provided, they will be loaded from
the configuration.
For complete documentation on writing consumers, see the :ref:`consumers`
documentation.
Args:
callback (callable): A callable object that accepts one positional argument,
a :class:`Message` or a class object that implements the ``__call__``
method. The class will be instantiated before use.
bindings (dict or list of dict): Bindings to declare before consuming. This
should be the same format as the :ref:`conf-bindings` configuration.
queues (dict): The queue or queues to declare and consume from. This should be
in the same format as the :ref:`conf-queues` configuration dictionary where
each key is a queue name and each value is a dictionary of settings for that
queue.
Raises:
fedora_messaging.exceptions.HaltConsumer: If the consumer requests that
it be stopped.
ValueError: If the consumer provide callback that is not a class that
implements __call__ and is not a function, if the bindings argument
is not a dict or list of dicts with the proper keys, or if the queues
argument isn't a dict with the proper keys. |
def __on_publish(self, client, userdata, mid):
try:
self.__in_flight[mid].set()
except KeyError:
pass | A message has been published by a server
:param client: Client that received the message
:param userdata: User data (unused)
:param mid: Message ID |
def _connect(self):
self.conn = self._create_connection()
spawn(self.conn.connect)
self.set_nick(self.nick)
self.cmd(u, u.format(self.nick, self.realname)) | Connects the bot to the server and identifies itself. |
def fmtVersion(*vsnparts):
if len(vsnparts) < 1:
raise s_exc.BadTypeValu(valu=repr(vsnparts), name=,
mesg=,)
ret = .join([str(part).lower() for part in vsnparts])
return ret | Join a string of parts together with a . separator.
Args:
*vsnparts:
Returns: |
def ranker(self, X, meta):
total_score = X.sum(axis=1).transpose()
total_score = np.squeeze(np.asarray(total_score))
ranks = total_score.argsort()
ranks = ranks[::-1]
sorted_meta = [meta[r] for r in ranks]
sorted_X = X[ranks]
return (sorted_X, sorted_meta) | Sort the place features list by the score of its relevance. |
def MRCA(list_of_taxids):
from ete2 import Tree
t = TaxIDTree(list_of_taxids)
t = Tree(str(t), format=8)
ancestor = t.get_common_ancestor(*t.get_leaves())
return ancestor.name | This gets the most recent common ancester (MRCA) for a list of taxids
>>> mylist = [3702, 3649, 3694, 3880]
>>> MRCA(mylist)
'rosids' |
def blacklistClient(self, clientName: str,
reason: str = None, code: int = None):
msg = "{} blacklisting client {}".format(self, clientName)
if reason:
msg += " for reason {}".format(reason)
logger.display(msg)
self.clientBlacklister.blacklist(clientName) | Add the client specified by `clientName` to this node's blacklist |
def reporter(self):
make_path(self.reportpath)
header =
data =
with open(os.path.join(self.reportpath, self.analysistype + ), ) as report:
for sample in self.runmetadata.samples:
data += sample.name +
if sample[self.analysistype].results:
if not sample[self.analysistype].multiple:
for name, identity in sample[self.analysistype].results.items():
if name == sample[self.analysistype].besthit[0]:
data += .format(name, identity, sample[self.analysistype].genera[name],
sample[self.analysistype].avgdepth[name])
else:
data += .format(, , .join(sample[self.analysistype]
.classification), )
else:
data +=
report.write(header)
report.write(data) | Creates a report of the results |
def get_label_scale(self, min_val, max_val, size):
if size < self.SCALE_DENSITY:
label_cnt = 1
else:
label_cnt = int(size / self.SCALE_DENSITY)
try:
if max_val >= 100:
label = [int((min_val + i * (max_val - min_val) / label_cnt))
for i in range(label_cnt + 1)]
else:
label = [round((min_val + i *
(max_val - min_val) / label_cnt), 1)
for i in range(label_cnt + 1)]
return label
except ZeroDivisionError:
logging.debug("Side label creation divided by 0")
return "" | Dynamically change the scale of the graph (y lable) |
def get_kwargs(self, **kwargs):
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs | Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint |
def get_from_string(cls, string_phase):
if string_phase == :
return cls.PENDING
elif string_phase == :
return cls.RUNNING
elif string_phase == :
return cls.SUCCEEDED
elif string_phase == :
return cls.FAILED
elif string_phase == :
return cls.UNKNOWN
return cls.UNKNOWN | Convert string value obtained from k8s API to PodPhase enum value
:param string_phase: str, phase value from Kubernetes API
:return: PodPhase |
def period(A,M1,M2):
A *= rsun_cm
print(A)
velocity = np.sqrt(grav_const*msun_g*(M1+M2)/A)
print(old_div(velocity,1.e5))
p = 2.*np.pi * A / velocity
p /= (60*60*24.)
return p | calculate binary period from separation.
Parameters
----------
A : float
separation A Rsun.
M1, M2 : float
M in Msun.
Returns
-------
p
period in days. |
def proximal_translation(prox_factory, y):
r
def translation_prox_factory(sigma):
return (ConstantOperator(y) + prox_factory(sigma) *
(IdentityOperator(y.space) - ConstantOperator(y)))
return translation_prox_factory | r"""Calculate the proximal of the translated function F(x - y).
Parameters
----------
prox_factory : callable
A factory function that, when called with a step size, returns the
proximal operator of ``F``.
y : Element in domain of ``F``.
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized
Notes
-----
Given a functional :math:`F`, this is calculated according to the rule
.. math::
\mathrm{prox}_{\sigma F( \cdot - y)}(x) =
y + \mathrm{prox}_{\sigma F}(x - y)
where :math:`y` is the translation, and :math:`\sigma` is the step size.
For reference on the identity used, see [CP2011c].
References
----------
[CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting
methods in signal processing.* In: Bauschke, H H, Burachik, R S,
Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point
algorithms for inverse problems in science and engineering, Springer,
2011. |
def merge(cls, *args, **kwargs):
newkeys = bool(kwargs.get(, False))
ignore = kwargs.get(, list())
if len(args) < 1:
raise ValueError()
elif not all(isinstance(s, Ent) for s in args):
raise ValueError(
)
ent = args[0]
data = cls.load(ent)
for ent in args[1:]:
for key, value in ent.__dict__.items():
if key in ignore:
continue
if key in data.__dict__:
v1 = data.__dict__[key]
if type(value) == type(v1):
if isinstance(v1, Ent):
data.__dict__[key] = cls.merge(v1, value, **kwargs)
else:
data.__dict__[key] = cls.load(value)
elif newkeys:
data.__dict__[key] = value
return data | Create a new Ent from one or more existing Ents. Keys in the
later Ent objects will overwrite the keys of the previous Ents.
Later keys of different type than in earlier Ents will be bravely
ignored.
The following keyword arguments are recognized:
newkeys: boolean value to determine whether keys from later Ents
should be included if they do not exist in earlier Ents.
ignore: list of strings of key names that should not be overridden by
later Ent keys. |
def _deserialize(
self, data, fields_dict, error_store, many=False, partial=False,
unknown=RAISE, dict_class=dict, index_errors=True, index=None,
):
index = index if index_errors else None
if many:
if not is_collection(data):
error_store.store_error([self.error_messages[]], index=index)
ret = []
else:
self._pending = True
ret = [
self._deserialize(
d, fields_dict, error_store, many=False,
partial=partial, unknown=unknown,
dict_class=dict_class, index=idx,
index_errors=index_errors,
)
for idx, d in enumerate(data)
]
self._pending = False
return ret
ret = dict_class()
if not isinstance(data, Mapping):
error_store.store_error([self.error_messages[]], index=index)
else:
partial_is_collection = is_collection(partial)
for attr_name, field_obj in iteritems(fields_dict):
if field_obj.dump_only:
continue
field_name = attr_name
if field_obj.data_key:
field_name = field_obj.data_key
raw_value = data.get(field_name, missing)
if raw_value is missing:
key,
(index if index_errors else None),
)
return ret | Deserialize ``data`` based on the schema defined by ``fields_dict``.
:param dict data: The data to deserialize.
:param dict fields_dict: Mapping of field names to :class:`Field` objects.
:param ErrorStore error_store: Structure to store errors.
:param bool many: Set to `True` if ``data`` should be deserialized as
a collection.
:param bool|tuple partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
:param type dict_class: Dictionary class used to construct the output.
:param bool index_errors: Whether to store the index of invalid items in
``self.errors`` when ``many=True``.
:param int index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: A dictionary of the deserialized data. |
def validate_attr(resource_attr_id, scenario_id, template_id=None):
rs = db.DBSession.query(ResourceScenario).\
filter(ResourceScenario.resource_attr_id==resource_attr_id,
ResourceScenario.scenario_id==scenario_id).options(
joinedload_all("resourceattr")).options(
joinedload_all("dataset")
).one()
error = None
try:
_do_validate_resourcescenario(rs, template_id)
except HydraError as e:
error = JSONObject(dict(
ref_key = rs.resourceattr.ref_key,
ref_id = rs.resourceattr.get_resource_id(),
ref_name = rs.resourceattr.get_resource().get_name(),
resource_attr_id = rs.resource_attr_id,
attr_id = rs.resourceattr.attr.id,
attr_name = rs.resourceattr.attr.name,
dataset_id = rs.dataset_id,
scenario_id=scenario_id,
template_id=template_id,
error_text=e.args[0]))
return error | Check that a resource attribute satisfies the requirements of all the types of the
resource. |
def python_2_format_compatible(method):
if six.PY3:
return method
def wrapper(self, format_spec):
formatted = method(self, format_spec)
if isinstance(format_spec, str):
return formatted.encode()
return formatted
return wrapper | Handles bytestring and unicode inputs for the `__format__()` method in
Python 2. This function has no effect in Python 3.
:param method: The `__format__()` method to wrap.
:return: The wrapped method. |
def add_self_loops(matrix, loop_value):
shape = matrix.shape
assert shape[0] == shape[1], "Error, matrix is not square"
if isspmatrix(matrix):
new_matrix = matrix.todok()
else:
new_matrix = matrix.copy()
for i in range(shape[0]):
new_matrix[i, i] = loop_value
if isspmatrix(matrix):
return new_matrix.tocsc()
return new_matrix | Add self-loops to the matrix by setting the diagonal
to loop_value
:param matrix: The matrix to add loops to
:param loop_value: Value to use for self-loops
:returns: The matrix with self-loops |
async def popen_uci(command: Union[str, List[str]], *, setpgrp: bool = False, loop=None, **popen_args: Any) -> Tuple[asyncio.SubprocessTransport, UciProtocol]:
transport, protocol = await UciProtocol.popen(command, setpgrp=setpgrp, loop=loop, **popen_args)
try:
await protocol.initialize()
except:
transport.close()
raise
return transport, protocol | Spawns and initializes an UCI engine.
:param command: Path of the engine executable, or a list including the
path and arguments.
:param setpgrp: Open the engine process in a new process group. This will
stop signals (such as keyboard interrupts) from propagating from the
parent process. Defaults to ``False``.
:param popen_args: Additional arguments for
`popen <https://docs.python.org/3/library/subprocess.html#popen-constructor>`_.
Do not set ``stdin``, ``stdout``, ``bufsize`` or
``universal_newlines``.
Returns a subprocess transport and engine protocol pair. |
def get_default_field_names(self, declared_fields, model_info):
return (
[self.url_field_name] +
list(declared_fields.keys()) +
list(model_info.fields.keys()) +
list(model_info.forward_relations.keys())
) | Return the default list of field names that will be used if the
`Meta.fields` option is not specified. |
def _reformat_policy(policy):
policy_name = policy[]
ret = {}
ret[] = policy[]
attrs = policy[]
if ret[] != :
return policy_name, ret
attributes = dict()
for attr in attrs:
attributes[attr[]] = attr[]
ret[] = dict()
ret[][] = bool(attributes.get())
ret[][] = bool(attributes.get())
ret[][] = bool(attributes.get())
ret[][] = bool(attributes.get())
ret[][] = bool(attributes.get())
ret[] = bool(attributes.get())
ret[] = attributes.get(, None)
non_ciphers = [
,
,
,
,
,
,
]
ciphers = []
for cipher in attributes:
if attributes[cipher] == and cipher not in non_ciphers:
ciphers.append(cipher)
ciphers.sort()
ret[] = ciphers
return policy_name, ret | Policies returned from boto3 are massive, ugly, and difficult to read.
This method flattens and reformats the policy.
:param policy: Result from invoking describe_load_balancer_policies(...)
:return: Returns a tuple containing policy_name and the reformatted policy dict. |
def get_rich_events(self, item):
if "version_downloads_data" not in item[]:
return []
eitem = self.get_rich_item(item)
for sample in item[]["version_downloads_data"]["version_downloads"]:
event = deepcopy(eitem)
event[] = sample[]
event[] = sample[]
sample_date = parser.parse(event[])
event[] = sample[]
event[] = sample[]
event.update(self.get_grimoire_fields(sample_date.isoformat(), "downloads_event"))
yield event | In the events there are some common fields with the crate. The name
of the field must be the same in the create and in the downloads event
so we can filer using it in crate and event at the same time.
* Fields that don't change: the field does not change with the events
in a create so the value is always the same in the events of a create.
* Fields that change: the value of the field changes with events |
def is_user_valid(self, userID):
cur = self.conn.cursor()
cur.execute(, [userID])
results = cur.fetchall()
cur.close()
return len(results) > 0 | Check if this User ID is valid. |
def splitByConnectivity(actor, maxdepth=100):
actor.addIDs()
pd = actor.polydata()
cf = vtk.vtkConnectivityFilter()
cf.SetInputData(pd)
cf.SetExtractionModeToAllRegions()
cf.ColorRegionsOn()
cf.Update()
cpd = cf.GetOutput()
a = Actor(cpd)
alist = []
for t in range(max(a.scalars("RegionId")) - 1):
if t == maxdepth:
break
suba = a.clone().threshold("RegionId", t - 0.1, t + 0.1)
area = suba.area()
alist.append([suba, area])
alist.sort(key=lambda x: x[1])
alist.reverse()
blist = []
for i, l in enumerate(alist):
l[0].color(i + 1)
l[0].mapper.ScalarVisibilityOff()
blist.append(l[0])
return blist | Split a mesh by connectivity and order the pieces by increasing area.
:param int maxdepth: only consider this number of mesh parts.
.. hint:: |splitmesh| |splitmesh.py|_ |
def filter(pred: Callable, xs: Iterable):
generator = (x for x in xs if pred(x))
return gather(*generator) | Applied a predicate to a list returning a :py:class:`PromisedObject`
containing the values satisfying the predicate.
:param pred: predicate function.
:param xs: iterable object.
:returns: :py:class:`PromisedObject` |
def initialize(self, client, initial_response, deserialization_callback):
self._client = client
self._response = initial_response
self._operation = LongRunningOperation(initial_response, deserialization_callback, self._lro_options)
try:
self._operation.set_initial_status(initial_response)
except BadStatus:
self._operation.status =
raise CloudError(initial_response)
except BadResponse as err:
self._operation.status =
raise CloudError(initial_response, str(err))
except OperationFailed:
raise CloudError(initial_response) | Set the initial status of this LRO.
:param initial_response: The initial response of the poller
:raises: CloudError if initial status is incorrect LRO state |
def _map_condition(self, wire_map, condition):
if condition is None:
new_condition = None
else:
bit0 = (condition[0], 0)
new_condition = (wire_map.get(bit0, bit0)[0], condition[1])
return new_condition | Use the wire_map dict to change the condition tuple's creg name.
Args:
wire_map (dict): a map from wires to wires
condition (tuple): (ClassicalRegister,int)
Returns:
tuple(ClassicalRegister,int): new condition |
def _strand_flag(data):
strand_flag = {"unstranded": "0",
"firststrand": "2",
"secondstrand": "1"}
stranded = dd.get_strandedness(data)
assert stranded in strand_flag, ("%s is not a valid strandedness value. "
"Valid values are , , "
"and 'unstranded")
return strand_flag[stranded] | 0: unstranded 1: stranded 2: reverse stranded |
def cublasZherk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc):
status = _libcublas.cublasZherk_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(ctypes.c_double(alpha)),
int(A), lda,
ctypes.byref(ctypes.c_double(beta)),
int(C), ldc)
cublasCheckStatus(status) | Rank-k operation on Hermitian matrix. |
def add_progress(self, count, symbol=,
color=None, on_color=None, attrs=None):
self._progress.add_progress(count, symbol, color, on_color, attrs) | Add a section of progress to the progressbar.
The progress is captured by "count" and displayed as a fraction
of the statusbar width proportional to this count over the total
progress displayed. The progress will be displayed using the "symbol"
character and the foreground and background colours and display style
determined by the the "fg", "bg" and "style" parameters. For these,
use the colorama package to set up the formatting. |
def simxPackInts(intList):
if sys.version_info[0] == 3:
s=bytes()
for i in range(len(intList)):
s=s+struct.pack(,intList[i])
s=bytearray(s)
else:
s=
for i in range(len(intList)):
s+=struct.pack(,intList[i])
return s | Please have a look at the function description/documentation in the V-REP user manual |
def get_cutoff_indices(flow, fhigh, df, N):
if flow:
kmin = int(flow / df)
if kmin < 0:
err_msg = "Start frequency cannot be negative. "
err_msg += "Supplied value and kmin {} and {}".format(flow, kmin)
raise ValueError(err_msg)
else:
kmin = 1
if fhigh:
kmax = int(fhigh / df )
if kmax > int((N + 1)/2.):
kmax = int((N + 1)/2.)
else:
kmax = int((N + 1)/2.)
if kmax <= kmin:
err_msg = "Kmax cannot be less than or equal to kmin. "
err_msg += "Provided values of freqencies (min,max) were "
err_msg += "{} and {} ".format(flow, fhigh)
err_msg += "corresponding to (kmin, kmax) of "
err_msg += "{} and {}.".format(kmin, kmax)
raise ValueError(err_msg)
return kmin,kmax | Gets the indices of a frequency series at which to stop an overlap
calculation.
Parameters
----------
flow: float
The frequency (in Hz) of the lower index.
fhigh: float
The frequency (in Hz) of the upper index.
df: float
The frequency step (in Hz) of the frequency series.
N: int
The number of points in the **time** series. Can be odd
or even.
Returns
-------
kmin: int
kmax: int |
def rename_directory(db, user_id, old_api_path, new_api_path):
old_db_path = from_api_dirname(old_api_path)
new_db_path = from_api_dirname(new_api_path)
if old_db_path == :
raise RenameRoot()
if _dir_exists(db, user_id, new_db_path):
raise DirectoryExists(new_api_path)
db.execute(
directories.update().where(
and_(
directories.c.user_id == user_id,
directories.c.name == old_db_path,
)
).values(
name=new_db_path,
)
)
db.execute(
directories.update().where(
and_(
directories.c.user_id == user_id,
directories.c.name.startswith(old_db_path),
directories.c.parent_name.startswith(old_db_path),
)
).values(
name=func.concat(
new_db_path,
func.right(directories.c.name, -func.length(old_db_path))
),
parent_name=func.concat(
new_db_path,
func.right(
directories.c.parent_name,
-func.length(old_db_path)
)
),
)
) | Rename a directory. |
def decoder(self, response: bytes):
response = response[:-(len(self.SEPARATOR))]
if self.compreser is not None:
response = self.compreser.decompress(response)
if self.debug is True:
response = json.loads(response.decode())
else:
response = msgpack.unpackb(response, encoding=)
version = response.get("MPRPC")
if version and version == self.VERSION:
return response
else:
raise ProtocolException("Wrong Protocol") | 编码请求为bytes.
检查是否使用debug模式和是否对数据进行压缩.之后根据状态将python字典形式的请求编码为字节串.
Parameters:
response (bytes): - 响应的字节串编码
Return:
(Dict[str, Any]): - python字典形式的响应 |
def distutils_old_autosemver_case(metadata, attr, value):
metadata = distutils_default_case(metadata, attr, value)
create_changelog(bugtracker_url=getattr(metadata, , ))
return metadata | DEPRECATED |
def convert_flux(wavelengths, fluxes, out_flux_unit, **kwargs):
if not isinstance(fluxes, u.Quantity):
fluxes = fluxes * PHOTLAM
out_flux_unit = validate_unit(out_flux_unit)
out_flux_unit_name = out_flux_unit.to_string()
in_flux_unit_name = fluxes.unit.to_string()
if in_flux_unit_name == out_flux_unit_name:
return fluxes
in_flux_type = fluxes.unit.physical_type
out_flux_type = out_flux_unit.physical_type
if not isinstance(wavelengths, u.Quantity):
wavelengths = wavelengths * u.AA
eqv = u.spectral_density(wavelengths)
try:
out_flux = fluxes.to(out_flux_unit, eqv)
except u.UnitConversionError:
if fluxes.unit == PHOTLAM:
flux_photlam = fluxes
elif in_flux_type != :
flux_photlam = fluxes.to(PHOTLAM, eqv)
else:
flux_photlam = _convert_flux(
wavelengths, fluxes, PHOTLAM, **kwargs)
if out_flux_unit == PHOTLAM:
out_flux = flux_photlam
elif out_flux_type != :
out_flux = flux_photlam.to(out_flux_unit, eqv)
else:
out_flux = _convert_flux(
wavelengths, flux_photlam, out_flux_unit, **kwargs)
return out_flux | Perform conversion for :ref:`supported flux units <synphot-flux-units>`.
Parameters
----------
wavelengths : array-like or `~astropy.units.quantity.Quantity`
Wavelength values. If not a Quantity, assumed to be in
Angstrom.
fluxes : array-like or `~astropy.units.quantity.Quantity`
Flux values. If not a Quantity, assumed to be in PHOTLAM.
out_flux_unit : str or `~astropy.units.core.Unit`
Output flux unit.
area : float or `~astropy.units.quantity.Quantity`
Area that fluxes cover. If not a Quantity, assumed to be in
:math:`cm^{2}`. This value *must* be provided for conversions involving
OBMAG and count, otherwise it is not needed.
vegaspec : `~synphot.spectrum.SourceSpectrum`
Vega spectrum from :func:`~synphot.spectrum.SourceSpectrum.from_vega`.
This is *only* used for conversions involving VEGAMAG.
Returns
-------
out_flux : `~astropy.units.quantity.Quantity`
Converted flux values.
Raises
------
astropy.units.core.UnitsError
Conversion failed.
synphot.exceptions.SynphotError
Area or Vega spectrum is not given when needed. |
def init_logging(debug=False, logfile=None):
loglevel = logging.DEBUG if debug else logging.INFO
logformat =
formatter = logging.Formatter(logformat)
stderr = logging.StreamHandler()
stderr.setFormatter(formatter)
root = logging.getLogger()
root.setLevel(loglevel)
root.handlers = [stderr]
if logfile:
fhandler = logging.FileHandler(logfile)
fhandler.setFormatter(formatter)
root.addHandler(fhandler) | Initialize logging. |
def is_ipynb():
try:
shell = get_ipython().__class__.__name__
if shell == :
return True
elif shell == :
return False
else:
return False
except NameError:
return False | Return True if the module is running in IPython kernel,
False if in IPython shell or other Python shell.
Copied from: http://stackoverflow.com/a/37661854/1592810
There are other methods there too
>>> is_ipynb()
False |
def createExpenseItemsForEvents(request=None, datetimeTuple=None, rule=None, event=None):
submissionUser = getattr(request, , None)
generate_count = 0
rule_filters = Q(disabled=False) & Q(rentalRate__gt=0) & \
Q(Q(staffmemberwageinfo__isnull=False) | Q(staffdefaultwage__isnull=False))
if rule:
rule_filters = rule_filters & Q(id=rule.id)
rulesToCheck = RepeatedExpenseRule.objects.filter(
rule_filters).distinct().order_by(
,
)
event_timefilters = Q()
if datetimeTuple and len(datetimeTuple) == 2:
timelist = list(datetimeTuple)
timelist.sort()
event_timefilters = event_timefilters & (
Q(event__startTime__gte=timelist[0]) & Q(event__startTime__lte=timelist[1])
)
if event:
event_timefilters = event_timefilters & Q(event__id=event.id)
for rule in rulesToCheck:
staffMember = getattr(rule, , None)
staffCategory = getattr(rule, , None)
if (
(not staffMember and not staffCategory) or
(
not staffMember and not
getConstant()
)
):
continue
replacements = {
: _(),
: _(),
: _(),
}
expense_category = getConstant()
if staffCategory:
if staffMember:
eventstaff_filter = Q(staffMember=staffMember) & Q(category=staffCategory)
elif getConstant():
eventstaff_filter = (
Q(category=staffCategory) &
~Q(staffMember__expenserules__category=staffCategory)
)
replacements[] = staffCategory.name
if staffCategory == getConstant():
expense_category = getConstant()
elif staffCategory in [
getConstant(),
getConstant()
]:
expense_category = getConstant()
else:
}
ExpenseItem.objects.create(**params)
generate_count += 1
rulesToCheck.update(lastRun=timezone.now())
return generate_count | For each StaffMember-related Repeated Expense Rule, look for EventStaffMember
instances in the designated time window that do not already have expenses associated
with them. For hourly rental expenses, then generate new expenses that are
associated with this rule. For non-hourly expenses, generate new expenses
based on the non-overlapping intervals of days, weeks or months for which
there is not already an ExpenseItem associated with the rule in question. |
def add_highlights_docs(docs):
if not isinstance(docs, list):
docs = [docs]
for doc in docs:
if in doc[]:
matched_sentences = doc[][]
for sentence in matched_sentences:
doc[][][] = [{: sentence, : sentence}]
paragraph = SimilarityScoreRerank.get_description(doc)
if paragraph:
high_para = SimilarityScoreRerank.create_highlighted_sentences(matched_sentences, paragraph)
if high_para:
if not in doc:
doc[] = dict()
doc[][] = [high_para]
return docs | "highlight": {
"knowledge_graph.title.value": [
"Before 1 January 2018, will <em>South</em> <em>Korea</em> file a World Trade Organization dispute against the United States related to solar panels?"
]
} |
def resolve_self_references(self, rules):
with suppress(KeyError):
rule = rules.pop()
rules[self.app_name] = rule
return rules | Resolves `$self` references to actual application name in security group rules. |
def fold(self, predicate):
childs = {x:y.fold(predicate) for (x,y) in self._attributes.items()
if isinstance(y, SerializableTypedAttributesHolder)}
return predicate(self, childs) | Takes a predicate and applies it to each node starting from the
leaves and making the return value propagate. |
def dbg_repr(self, max_display=10):
s = repr(self) + "\n"
if len(self.chosen_statements) > max_display:
s += "%d SimRuns in program slice, displaying %d.\n" % (len(self.chosen_statements), max_display)
else:
s += "%d SimRuns in program slice.\n" % len(self.chosen_statements)
if max_display is None:
run_addrs = sorted(self.chosen_statements.keys())
else:
run_addrs = sorted(self.chosen_statements.keys())[ : max_display]
for run_addr in run_addrs:
s += self.dbg_repr_run(run_addr) + "\n"
return s | Debugging output of this slice.
:param max_display: The maximum number of SimRun slices to show.
:return: A string representation. |
def exists(self, doc_id, rev_id=):
title = % self.__class__.__name__
input_fields = {
: doc_id,
: rev_id
}
for key, value in input_fields.items():
if value:
object_title = % (title, key, str(value))
self.fields.validate(value, % key, object_title)
url = self.bucket_url + % doc_id
params = None
if rev_id:
params = { : rev_id }
response = requests.get(url, params=params)
if not in response.json():
return True
return False | a method to determine if document exists
:param doc_id: string with id of document in bucket
:param rev_id: [optional] string with revision id of document in bucket
:return: boolean indicating existence of document |
def inner(self, x1, x2):
if self.exponent != 2.0:
raise NotImplementedError(
.format(self.exponent))
else:
inner = self.const * _inner_default(x1, x2)
if x1.space.field is None:
return inner
else:
return x1.space.field.element(inner) | Return the weighted inner product of ``x1`` and ``x2``.
Parameters
----------
x1, x2 : `NumpyTensor`
Tensors whose inner product is calculated.
Returns
-------
inner : float or complex
The inner product of the two provided tensors. |
def _is_target_a_directory(link, rel_target):
target = os.path.join(os.path.dirname(link), rel_target)
return os.path.isdir(target) | If creating a symlink from link to a target, determine if target
is a directory (relative to dirname(link)). |
def _process_response(self, response):
assert self._state == self._STATE_RUNNING, "Should be running if processing response"
cols = None
data = []
for r in response:
if not cols:
cols = [(f, r._fields[f].db_type) for f in r._fields]
data.append([getattr(r, f) for f in r._fields])
self._data = data
self._columns = cols
self._state = self._STATE_FINISHED | Update the internal state with the data from the response |
def if_stmt(self, if_loc, test, if_colon_loc, body, elifs, else_opt):
stmt = ast.If(orelse=[],
else_loc=None, else_colon_loc=None)
if else_opt:
stmt.else_loc, stmt.else_colon_loc, stmt.orelse = else_opt
for elif_ in reversed(elifs):
stmt.keyword_loc, stmt.test, stmt.if_colon_loc, stmt.body = elif_
stmt.loc = stmt.keyword_loc.join(stmt.body[-1].loc)
if stmt.orelse:
stmt.loc = stmt.loc.join(stmt.orelse[-1].loc)
stmt = ast.If(orelse=[stmt],
else_loc=None, else_colon_loc=None)
stmt.keyword_loc, stmt.test, stmt.if_colon_loc, stmt.body = \
if_loc, test, if_colon_loc, body
stmt.loc = stmt.keyword_loc.join(stmt.body[-1].loc)
if stmt.orelse:
stmt.loc = stmt.loc.join(stmt.orelse[-1].loc)
return stmt | if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] |
def maybe_start_recording(tokens, index):
if tokens[index].type == TokenType.BeginRSTComment:
return _RSTCommentBlockRecorder(index, tokens[index].line)
return None | Return a new _RSTCommentBlockRecorder when its time to record. |
def find_one_raw(self, resource, _id):
return self._find_by_id(resource=resource, _id=_id) | Find document by id. |
def fetch_model(self, source: str, file: Union[str, BinaryIO],
chunk_size: int=DEFAULT_DOWNLOAD_CHUNK_SIZE) -> None:
download_http(source, file, self._log, chunk_size) | Download the model from GCS. |
def _norm_perm_list_from_perm_dict(self, perm_dict):
high_perm_dict = self._highest_perm_dict_from_perm_dict(perm_dict)
return [
[k, list(sorted(high_perm_dict[k]))]
for k in ORDERED_PERM_LIST
if high_perm_dict.get(k, False)
] | Return a minimal, ordered, hashable list of subjects and permissions. |
def _calculate_sun_vector(self):
z_axis = Vector3(0., 0., -1.)
x_axis = Vector3(1., 0., 0.)
north_vector = Vector3(0., 1., 0.)
_sun_vector = north_vector \
.rotate_around(x_axis, self.altitude_in_radians) \
.rotate_around(z_axis, self.azimuth_in_radians) \
.rotate_around(z_axis, math.radians(-1 * self.north_angle))
_sun_vector.normalize()
try:
_sun_vector.flip()
except AttributeError:
_sun_vector = Vector3(-1 * _sun_vector.x,
-1 * _sun_vector.y,
-1 * _sun_vector.z)
self._sun_vector = _sun_vector | Calculate sun vector for this sun. |
def open(cls, filename, crs=None):
with fiona.Env():
with fiona.open(filename, ) as source:
original_crs = CRS(source.crs)
schema = source.schema
length = len(source)
crs = crs or original_crs
ret_val = cls(filename, crs, schema, length)
return ret_val | Creates a FileCollection from a file in disk.
Parameters
----------
filename : str
Path of the file to read.
crs : CRS
overrides the crs of the collection, this funtion will not reprojects |
def payout(address):
qry = DbCursor().execute_and_fetchall(.format(address))
Transaction = namedtuple(
,
)
named_transactions = []
for i in qry:
tx_id = Transaction(
id=i[0],
amount=i[1],
timestamp=i[2],
recipientId=i[3],
senderId=i[4],
rawasset=i[5],
type=i[6],
fee=i[7],
)
named_transactions.append(tx_id)
return named_transactions | returns all received transactions between the address and registered delegate accounts
ORDER by timestamp ASC. |
def unpack_2to8(data):
two_eight_lookup = {0: 40,
1: 12,
2: -12,
3: -40}
tmp = data.astype(np.uint32)
tmp = (tmp | (tmp << 12)) & 0xF000F
tmp = (tmp | (tmp << 6)) & 0x3030303
tmp = tmp.byteswap()
tmp = tmp.view()
mapped = np.array(tmp, dtype=np.int8)
for k, v in two_eight_lookup.items():
mapped[tmp == k] = v
return mapped | Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
DATA MUST BE LOADED as np.array() with dtype='uint8'.
This works with some clever shifting and AND / OR operations.
Data is LOADED as 8-bit, then promoted to 32-bits:
/ABCD EFGH/ (8 bits of data)
/0000 0000/0000 0000/0000 0000/ABCD EFGH/ (8 bits of data as a 32-bit word)
Once promoted, we can do some shifting, AND and OR operations:
/0000 0000/0000 ABCD/EFGH 0000/0000 0000/ (shifted << 12)
/0000 0000/0000 ABCD/EFGH 0000/ABCD EFGH/ (bitwise OR of previous two lines)
/0000 0000/0000 ABCD/0000 0000/0000 EFGH/ (bitwise AND with mask 0xF000F)
/0000 00AB/CD00 0000/0000 00EF/GH00 0000/ (prev. line shifted << 6)
/0000 00AB/CD00 ABCD/0000 00EF/GH00 EFGH/ (bitwise OR of previous two lines)
/0000 00AB/0000 00CD/0000 00EF/0000 00GH/ (bitwise AND with 0x3030303)
Then we change the view of the data to interpret it as 4x8 bit:
[000000AB, 000000CD, 000000EF, 000000GH] (change view from 32-bit to 4x8-bit)
The converted bits are then mapped to values in the range [-40, 40] according to a lookup chart.
The mapping is based on specifications in the breakthough docs:
https://github.com/UCBerkeleySETI/breakthrough/blob/master/doc/RAW-File-Format.md |
def transform(self, m):
if len(m) != 6:
raise ValueError("bad sequ. length")
self.x, self.y = TOOLS._transform_point(self, m)
return self | Replace point by its transformation with matrix-like m. |
def generate_rpn_proposals(boxes, scores, img_shape,
pre_nms_topk, post_nms_topk=None):
assert boxes.shape.ndims == 2, boxes.shape
if post_nms_topk is None:
post_nms_topk = pre_nms_topk
topk = tf.minimum(pre_nms_topk, tf.size(scores))
topk_scores, topk_indices = tf.nn.top_k(scores, k=topk, sorted=False)
topk_boxes = tf.gather(boxes, topk_indices)
topk_boxes = clip_boxes(topk_boxes, img_shape)
topk_boxes_x1y1x2y2 = tf.reshape(topk_boxes, (-1, 2, 2))
topk_boxes_x1y1, topk_boxes_x2y2 = tf.split(topk_boxes_x1y1x2y2, 2, axis=1)
wbhb = tf.squeeze(topk_boxes_x2y2 - topk_boxes_x1y1, axis=1)
valid = tf.reduce_all(wbhb > cfg.RPN.MIN_SIZE, axis=1)
topk_valid_boxes_x1y1x2y2 = tf.boolean_mask(topk_boxes_x1y1x2y2, valid)
topk_valid_scores = tf.boolean_mask(topk_scores, valid)
topk_valid_boxes_y1x1y2x2 = tf.reshape(
tf.reverse(topk_valid_boxes_x1y1x2y2, axis=[2]),
(-1, 4), name=)
nms_indices = tf.image.non_max_suppression(
topk_valid_boxes_y1x1y2x2,
topk_valid_scores,
max_output_size=post_nms_topk,
iou_threshold=cfg.RPN.PROPOSAL_NMS_THRESH)
topk_valid_boxes = tf.reshape(topk_valid_boxes_x1y1x2y2, (-1, 4))
proposal_boxes = tf.gather(topk_valid_boxes, nms_indices)
proposal_scores = tf.gather(topk_valid_scores, nms_indices)
tf.sigmoid(proposal_scores, name=)
return tf.stop_gradient(proposal_boxes, name=), tf.stop_gradient(proposal_scores, name=) | Sample RPN proposals by the following steps:
1. Pick top k1 by scores
2. NMS them
3. Pick top k2 by scores. Default k2 == k1, i.e. does not filter the NMS output.
Args:
boxes: nx4 float dtype, the proposal boxes. Decoded to floatbox already
scores: n float, the logits
img_shape: [h, w]
pre_nms_topk, post_nms_topk (int): See above.
Returns:
boxes: kx4 float
scores: k logits |
def run_program(program, *args):
real_args = [program]
real_args.extend(args)
logging.debug(_(), real_args)
check_output(real_args, universal_newlines=True) | Wrap subprocess.check_output to make life easier. |
def prepend(self, frame_p):
return lib.zmsg_prepend(self._as_parameter_, byref(zframe_p.from_param(frame_p))) | Push frame to the front of the message, i.e. before all other frames.
Message takes ownership of frame, will destroy it when message is sent.
Returns 0 on success, -1 on error. Deprecates zmsg_push, which did not
nullify the caller's frame reference. |
def uninstall_task(self, name):
if name in self._task_type_by_name:
self._task_type_by_name[name].options_scope = None
del self._task_type_by_name[name]
self._ordered_task_names = [x for x in self._ordered_task_names if x != name]
else:
raise GoalError(.format(name)) | Removes the named task from this goal.
Allows external plugins to modify the execution plan. Use with caution.
Note: Does not relax a serialization requirement that originated
from the uninstalled task's install() call.
:API: public |
def set_model_domain(model, domain):
if model is None or not isinstance(model, onnx_proto.ModelProto):
raise ValueError("Model is not a valid ONNX model.")
if not convert_utils.is_string_type(domain):
raise ValueError("Domain must be a string type.")
model.domain = domain | Sets the domain on the ONNX model.
:param model: instance of an ONNX model
:param domain: string containing the domain name of the model
Example:
::
from onnxmltools.utils import set_model_domain
onnx_model = load_model("SqueezeNet.onnx")
set_model_domain(onnx_model, "com.acme") |
def set_nbytes(self, key, nbytes=None):
obj = super().__getitem__(key)
if nbytes is not None:
obj.attrs[] = nbytes
else:
obj.attrs[] = nbytes = ByteCounter.get_nbytes(obj)
return nbytes | Set the `nbytes` attribute on the HDF5 object identified by `key`. |
def run_ec2_import(self, config_file_location, description, region=):
import_cmd = "aws ec2 import-image --description --profile --region --output " \
" --disk-containers file://{}"\
.format(description, self.aws_project, region, config_file_location)
try:
res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print "Error importing to ec2"
print "output: {}".format(e.output)
sys.exit(5)
print "got res: {}".format(res)
res_json = json.loads(res)
task_running, import_id = self.check_task_status_and_id(res_json)
return import_id | Runs the command to import an uploaded vmdk to aws ec2
:param config_file_location: config file of import param location
:param description: description to attach to the import task
:return: the import task id for the given ami |
def append(self, val):
val.resultset = self
val.dataset = self.dataset
if val.dataset:
dataset_dimensions = self.dataset.dimensions
for k, v in val.raw_dimensions.items():
if k not in dataset_dimensions:
d = Dimension(k)
else:
d = dataset_dimensions[k]
normalized_value = unicode(v)
if d.dialect and d.datatype:
if d.dialect in d.datatype.dialects:
for av in d.allowed_values:
if unicode(v) in av.dialects.get(d.dialect, []):
normalized_value = av.value
break
if isinstance(v, DimensionValue):
dim = v
v.value = normalized_value
else:
if k in dataset_dimensions:
dim = DimensionValue(normalized_value, d)
else:
dim = DimensionValue(normalized_value, Dimension())
val.dimensionvalues.append(dim)
self.dimensionvalues = val.dimensionvalues
super(ResultSet, self).append(val) | Connect any new results to the resultset.
This is where all the heavy lifting is done for creating results:
- We add a datatype here, so that each result can handle
validation etc independently. This is so that scraper authors
don't need to worry about creating and passing around datatype objects.
- As the scraper author yields result objects, we append them to
a resultset.
- This is also where we normalize dialects. |
def report_comment_abuse(context, obj):
context.update({
: obj,
: -1,
: "-".join((obj._meta.app_label, obj._meta.module_name)),
})
return context | Checks whether a user can report abuse (has not liked comment previously)
or has reported abuse previously and renders appropriate response.
If requesting user is part of the 'Moderators' group a vote equal to
ABUSE_CUTOFF setting will be made, thereby immediately marking the comment
as abusive. |
def sync_repo_hook(self, repo_id):
gh_repo = self.api.repository_with_id(repo_id)
hooks = (hook.id for hook in gh_repo.hooks()
if hook.config.get(, ) == self.webhook_url)
hook_id = next(hooks, None)
if hook_id:
Repository.enable(user_id=self.user_id,
github_id=gh_repo.id,
name=gh_repo.full_name,
hook=hook_id)
else:
Repository.disable(user_id=self.user_id,
github_id=gh_repo.id,
name=gh_repo.full_name) | Sync a GitHub repo's hook with the locally stored repo. |
def alphabeta(game, alpha_beta=(-float(), float()),
player=dominoes.players.identity):
if game.result is not None:
return [], game.result.points
if game.turn % 2:
best_value = float()
op = operator.lt
update = lambda ab, v: (ab[0], min(ab[1], v))
else:
best_value = -float()
op = operator.gt
update = lambda ab, v: (max(ab[0], v), ab[1])
for move, new_game in make_moves(game, player):
moves, value = alphabeta(new_game, alpha_beta, player)
if op(value, best_value):
best_value = value
best_moves = moves
best_moves.insert(0, move)
alpha_beta = update(alpha_beta, best_value)
if alpha_beta[1] <= alpha_beta[0]:
break
return best_moves, best_value | Runs minimax search with alpha-beta pruning on the provided game.
:param Game game: game to search
:param tuple alpha_beta: a tuple of two floats that indicate
the initial values of alpha and beta,
respectively. The default is (-inf, inf).
:param callable player: player used to sort moves to be explored.
Ordering better moves first may significantly
reduce the amount of moves that need to be
explored. The identity player is the default. |
def _parse_docline(self, line, container):
match = self.RE_DECOR.match(line)
if match is not None:
return "{}.{}".format(container.name, match.group("name"))
else:
return container.name | Parses a single line of code following a docblock to see if
it as a valid code element that can be decorated. If so, return
the name of the code element. |
def team_events(self, team, year=None, simple=False, keys=False):
if year:
if keys:
return self._get( % (self.team_key(team), year))
else:
return [Event(raw) for raw in self._get( % (self.team_key(team), year, if simple else ))]
else:
if keys:
return self._get( % self.team_key(team))
else:
return [Event(raw) for raw in self._get( % (self.team_key(team), if simple else ))] | Get team events a team has participated in.
:param team: Team to get events for.
:param year: Year to get events from.
:param simple: Get only vital data.
:param keys: Get just the keys of the events. Set to True if you only need the keys of each event and not their full data.
:return: List of strings or Teams |
def lookup(self):
print "%s by %s, size: %s, uploaded %s ago" % (self.name, self.author,
self.size, self.age) | Prints name, author, size and age |
def pdf_Gates_Gaudin_Schuhman_basis_integral(d, d_characteristic, m, n):
r
return m/(m+n)*d**n*(d/d_characteristic)**m | r'''Calculates the integral of the multiplication of d^n by the Gates,
Gaudin and Schuhman (GGS) model given a particle diameter `d`,
characteristic (maximum) particle diameter `d_characteristic`, and exponent
`m`.
.. math::
\int d^n\cdot q(d)\; dd =\frac{m}{m+n} d^n \left(\frac{d}
{d_{characteristic}}\right)^m
Parameters
----------
d : float
Specified particle diameter, [m]
d_characteristic : float
Characteristic particle diameter; in this model, it is the largest
particle size diameter in the distribution, [m]
m : float
Particle size distribution exponent, [-]
n : int
Exponent of the multiplied n, [-]
Returns
-------
pdf_basis_integral : float
Integral of Rosin Rammler pdf multiplied by d^n, [-]
Notes
-----
This integral does not have any numerical issues as `d` approaches 0.
Examples
--------
>>> pdf_Gates_Gaudin_Schuhman_basis_integral(d=2E-4, d_characteristic=1E-3, m=2.3, n=-3)
-10136984887.543015 |
def delete_submission(self, submission_id):
LOG.info("Deleting clinvar submission %s", submission_id)
submission_obj = self.clinvar_submission_collection.find_one({ : ObjectId(submission_id)})
submission_variants = submission_obj.get()
submission_casedata = submission_obj.get()
submission_objects = []
if submission_variants and submission_casedata:
submission_objects = submission_variants + submission_casedata
elif submission_variants:
submission_objects = submission_variants
elif submission_casedata:
submission_objects = submission_casedata
result = self.clinvar_collection.delete_many({: { "$in": submission_objects} })
deleted_objects = result.deleted_count
result = self.clinvar_submission_collection.delete_one({: ObjectId(submission_id)})
deleted_submissions = result.deleted_count
return deleted_objects,deleted_submissions | Deletes a Clinvar submission object, along with all associated clinvar objects (variants and casedata)
Args:
submission_id(str): the ID of the submission to be deleted
Returns:
deleted_objects(int): the number of associated objects removed (variants and/or casedata)
deleted_submissions(int): 1 if it's deleted, 0 if something went wrong |
def percent_bandwidth(data, period, std=2.0):
catch_errors.check_for_period_error(data, period)
period = int(period)
percent_bandwidth = ((np.array(data) -
lower_bollinger_band(data, period, std)) /
bb_range(data, period, std)
)
return percent_bandwidth | Percent Bandwidth.
Formula:
%_bw = data() - l_bb() / bb_range() |
def next(self):
if self.count < len(self.reservoir):
self.count += 1
return self.reservoir[self.count-1]
raise StopIteration("Reservoir exhausted") | Next point in iteration |
def init(cls, repo_dir=None, temp=False, initial_commit=False):
if temp:
suffix =
repo_dir = create_tempdir(suffix=suffix, delete=True)
else:
repo_dir = repo_dir or os.getcwd()
git_init(repo_dir)
instance = cls(repo_dir)
return instance | Run `git init` in the repo_dir.
Defaults to current working directory if repo_dir is not supplied.
If 'temp' is True, a temporary directory will be created for you
and the repository will be initialized. The tempdir is scheduled
for deletion (when the process exits) through an exit function
registered with the atexit module. If 'temp' is True, repo_dir is
ignored. |
def get_asset_composition_design_session(self):
if not self.supports_asset_composition_design():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
try:
session = sessions.AssetCompositionDesignSession(proxy=self._proxy,
runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | Gets the session for creating asset compositions.
return: (osid.repository.AssetCompositionDesignSession) - an
AssetCompositionDesignSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_composition_design() is
false
compliance: optional - This method must be implemented if
supports_asset_composition_design() is true. |
def variables(self):
result = []
for _, value in sorted(self.variable_map.items()):
if isinstance(value, list):
result.extend(value)
else:
result.append(value)
return result | Returns the list of all tf.Variables created by module instantiation. |
def genExamplePlanet(binaryLetter=):
planetPar = PlanetParameters()
planetPar.addParam(, )
planetPar.addParam(, )
planetPar.addParam(, )
planetPar.addParam(, )
planetPar.addParam(, )
planetPar.addParam(, )
planetPar.addParam(, .format(ac._ExampleSystemCount, binaryLetter))
planetPar.addParam(, )
planetPar.addParam(, )
planetPar.addParam(, )
planetPar.addParam(, )
planetPar.addParam(, )
planetPar.addParam(, , {: })
examplePlanet = Planet(planetPar.params)
examplePlanet.flags.addFlag()
exampleStar = genExampleStar(binaryLetter=binaryLetter)
exampleStar._addChild(examplePlanet)
examplePlanet.parent = exampleStar
return examplePlanet | Creates a fake planet with some defaults
:param `binaryLetter`: host star is part of a binary with letter binaryletter
:return: |
def deserialize(self, apic_frame):
return Image(data=apic_frame.data, desc=apic_frame.desc,
type=apic_frame.type) | Convert APIC frame into Image. |
def domain_of_validity(self):
domain = self.element.find(GML_NS + )
domain_href = domain.attrib[XLINK_NS + ]
url = .format(prefix=EPSG_IO_URL,
code=domain_href)
xml = requests.get(url).content
gml = ET.fromstring(xml)
def extract_bound(tag):
ns =
xpath = .format(
ns=ns,
tag=tag)
bound = gml.find(xpath)
return float(bound.text)
tags = (, ,
, )
bounds = [extract_bound(tag) for tag in tags]
return bounds | Return the domain of validity for this CRS as:
(west, east, south, north).
For example::
>>> print(get(21781).domain_of_validity())
[5.96, 10.49, 45.82, 47.81] |
def documents(cls, filter=None, **kwargs):
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document] | Returns a list of Documents if any document is filtered |
def romanize(number):
roman = []
for numeral, value in NUMERALS:
times, number = divmod(number, value)
roman.append(times * numeral)
return .join(roman) | Convert `number` to a Roman numeral. |
def add_coverage(self, qname, sname, qcover, scover=None):
self.alignment_coverage.loc[qname, sname] = qcover
if scover:
self.alignment_coverage.loc[sname, qname] = scover | Add percentage coverage values to self.alignment_coverage. |
def get_facet_objects_serializer(self, *args, **kwargs):
facet_objects_serializer_class = self.get_facet_objects_serializer_class()
kwargs["context"] = self.get_serializer_context()
return facet_objects_serializer_class(*args, **kwargs) | Return the serializer instance which should be used for
serializing faceted objects. |
def determine_band_channel(kal_out):
band = ""
channel = ""
tgt_freq = ""
while band == "":
for line in kal_out.splitlines():
if "Using " in line and " channel " in line:
band = str(line.split()[1])
channel = str(line.split()[3])
tgt_freq = str(line.split()[4]).replace(
"(", "").replace(")", "")
if band == "":
band = None
return(band, channel, tgt_freq) | Return band, channel, target frequency from kal output. |
def try_storage(self, identifier, req, resp, resource, uri_kwargs):
if identifier is None:
user = None
elif self.user_storage is not None:
user = self.user_storage.get_user(
self, identifier, req, resp, resource, uri_kwargs
)
elif self.user_storage is None and not self.only_with_storage:
user = {
: self,
: identifier
}
else:
user = None
return user | Try to find user in configured user storage object.
Args:
identifier: User identifier.
Returns:
user object. |
def add_acquisition_source(
self,
method,
submission_number=None,
internal_uid=None,
email=None,
orcid=None,
source=None,
datetime=None,
):
acquisition_source = self._sourced_dict(source)
acquisition_source[] = str(submission_number)
for key in (, , , , ):
if locals()[key] is not None:
acquisition_source[key] = locals()[key]
self.obj[] = acquisition_source | Add acquisition source.
:type submission_number: integer
:type email: integer
:type source: string
:param method: method of acquisition for the suggested document
:type method: string
:param orcid: orcid of the user that is creating the record
:type orcid: string
:param internal_uid: id of the user that is creating the record
:type internal_uid: string
:param datetime: UTC datetime in ISO 8601 format
:type datetime: string |
def last_year(today: datetime=None, tz=None):
if today is None:
today = datetime.utcnow()
end = datetime(day=1, month=1, year=today.year)
end_incl = end - timedelta(seconds=1)
begin = datetime(day=1, month=1, year=end_incl.year)
return localize_time_range(begin, end, tz) | Returns last year begin (inclusive) and end (exclusive).
:param today: Some date (defaults current datetime)
:param tz: Timezone (defaults pytz UTC)
:return: begin (inclusive), end (exclusive) |
def to_file_mode(self):
for message_no in range(len(self.messages)):
self.__to_file(message_no) | Write all the messages to files |
def p_example_multiline(self, p):
p[0] = AstExampleField(
self.path, p.lineno(1), p.lexpos(1), p[1], p[5]) | example_field : ID EQ NL INDENT ex_map NL DEDENT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.