code
stringlengths 64
7.01k
| docstring
stringlengths 2
15.8k
|
---|---|
#vtb
def time_in_range(self):
curr = datetime.datetime.now().time()
if self.start_time <= self.end_time:
return self.start_time <= curr <= self.end_time
else:
return self.start_time <= curr or curr <= self.end_time | Return true if current time is in the active range |
#vtb
def load_config():
filename =
keyring_cfg = os.path.join(platform.config_root(), filename)
if not os.path.exists(keyring_cfg):
return
config = configparser.RawConfigParser()
config.read(keyring_cfg)
_load_keyring_path(config)
try:
if config.has_section("backend"):
keyring_name = config.get("backend", "default-keyring").strip()
else:
raise configparser.NoOptionError(, )
except (configparser.NoOptionError, ImportError):
logger = logging.getLogger()
logger.warning("Keyring config file contains incorrect values.\n"
+ "Config file: %s" % keyring_cfg)
return
return load_keyring(keyring_name) | Load a keyring using the config file in the config root. |
#vtb
def sayHello(self, name="Not given", message="nothing"):
print(
"Python.sayHello called by: {0} "
"with message: ".format(name, message)
)
return (
"PythonSync says: Howdy {0} "
"that's a nice runtime you got there".format(name)
) | Synchronous implementation of IHello.sayHello synchronous method.
The remote calling thread will be blocked until this is executed and
responds. |
#vtb
def set_connection(host=None, database=None, user=None, password=None):
c.CONNECTION[] = host
c.CONNECTION[] = database
c.CONNECTION[] = user
c.CONNECTION[] = password | Set connection parameters. Call set_connection with no arguments to clear. |
#vtb
def __build_author_name_expr(author_name, author_email_address):
assert author_name or author_email_address,
author_name_expr = author_name or author_email_address[:author_email_address.find()]
if in author_name_expr:
author_name_expr = % author_name_expr
if author_email_address:
author_name_expr = % (author_name_expr, author_email_address)
return author_name_expr | Build the name of the author of a message as described in the Internet
Message Format specification: https://tools.ietf.org/html/rfc5322#section-3.6.2
@param author_name: complete name of the originator of the message.
@param author_email_address: address of the mailbox to which the author
of the message suggests that replies be sent.
@return: a string representing the author of the message, that is, the
mailbox of the person or system responsible for the writing of the
message. This string is intended to be used as the "From:" field
of the message. |
#vtb
def zip_entry_rollup(zipfile):
files = dirs = 0
total_c = total_u = 0
for i in zipfile.infolist():
if i.filename[-1] == :
dirs += 1
else:
files += 1
total_c += i.compress_size
total_u += i.file_size
return files, dirs, total_c, total_u | returns a tuple of (files, dirs, size_uncompressed,
size_compressed). files+dirs will equal len(zipfile.infolist) |
#vtb
def _get_reference(self):
super()._get_reference()
self.cubeA_body_id = self.sim.model.body_name2id("cubeA")
self.cubeB_body_id = self.sim.model.body_name2id("cubeB")
self.l_finger_geom_ids = [
self.sim.model.geom_name2id(x) for x in self.gripper.left_finger_geoms
]
self.r_finger_geom_ids = [
self.sim.model.geom_name2id(x) for x in self.gripper.right_finger_geoms
]
self.cubeA_geom_id = self.sim.model.geom_name2id("cubeA")
self.cubeB_geom_id = self.sim.model.geom_name2id("cubeB") | Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data. |
#vtb
def create_app(config=, override=None,
init_logging=init_logging):
app = UDataApp(APP_NAME)
app.config.from_object(config)
settings = os.environ.get(, join(os.getcwd(), ))
if exists(settings):
app.settings_file = settings
app.config.from_pyfile(settings)
if override:
app.config.from_object(override)
for pkg in entrypoints.get_roots(app):
if pkg == :
continue
module = .format(pkg)
if pkgutil.find_loader(module):
settings = pkgutil.get_loader(module)
for key, default in settings.__dict__.items():
app.config.setdefault(key, default)
app.json_encoder = UDataJsonEncoder
app.debug = app.config[] and not app.config[]
app.wsgi_app = ProxyFix(app.wsgi_app)
init_logging(app)
register_extensions(app)
return app | Factory for a minimal application |
#vtb
def validate_data(self):
warnings = {}
spec_warnings, samp_warnings, site_warnings, loc_warnings = {}, {}, {}, {}
if self.specimens:
spec_warnings = self.validate_items(self.specimens, )
if self.samples:
samp_warnings = self.validate_items(self.samples, )
if self.sites:
site_warnings = self.validate_items(self.sites, )
if self.locations:
loc_warnings = self.validate_items(self.locations, )
return spec_warnings, samp_warnings, site_warnings, loc_warnings | Validate specimen, sample, site, and location data. |
#vtb
def get_locations():
arequest = requests.get(LOCATIONS_URL, headers=HEADERS)
status_code = str(arequest.status_code)
if status_code == :
_LOGGER.error("Token expired.")
return False
return arequest.json() | Pull the accounts locations. |
#vtb
def guess_labels(self, doc):
if doc.nb_pages <= 0:
return set()
self.label_guesser.total_nb_documents = len(self._docs_by_id.keys())
label_names = self.label_guesser.guess(doc)
labels = set()
for label_name in label_names:
label = self.labels[label_name]
labels.add(label)
return labels | return a prediction of label names |
#vtb
def _at_extend(self, calculator, rule, scope, block):
from scss.selector import Selector
selectors = calculator.apply_vars(block.argument)
rule.extends_selectors.extend(Selector.parse_many(selectors)) | Implements @extend |
#vtb
def wrap(x):
if isinstance(x, G1Element):
return _wrap(x, serializeG1)
elif isinstance(x, G2Element):
return _wrap(x, serializeG2)
elif isinstance(x, GtElement):
return _wrap(x, serializeGt)
elif isinstance(x, str):
return x
elif isinstance(x, (int, long, BigInt)):
return hex(long(x))
else:
raise NotImplementedError("Cannot unwrap {}; only types {} supported".
format(type(x),
[G1Element, G2Element, GtElement, int, long, BigInt]) ) | Wraps an element or integer type by serializing it and base64 encoding
the resulting bytes. |
#vtb
def set_event_data(self, data, read_attrs):
if self.handle.mode == :
raise Exception()
read_number = read_attrs[]
read_group = .format(self.group_name, read_number)
read_info = self.handle.status.read_info
read_number_map = self.handle.status.read_number_map
index = read_number_map.get(read_number)
if index is None:
raise Exception()
info = read_info[index]
read_attrs.update({: info.read_id,
: info.start_time,
: info.duration,
: info.start_mux,
: info.median_before})
attrs = self.handle.get_analysis_attributes(read_group)
if attrs is None:
self.handle.add_analysis_subgroup(self.group_name, .format(read_number),
attrs=read_attrs)
self.handle.add_analysis_dataset(read_group, , data)
else:
raise Exception() | Set event data with the specied attributes.
:param data: Event data table.
:param read_attrs: Attributes to put on the read group. This must include
the read_number, which must refer to a read present in the object. The
attributes should not include the standard read attributes:
* read_id
* start_time
* duration
* start_mux
Those will be pulled from the read information already present in the
object for the specified read. |
#vtb
def _construct_w(self, inputs):
depthwise_weight_shape = self._kernel_shape + (self._input_channels,
self._channel_multiplier)
pointwise_input_size = self._channel_multiplier * self._input_channels
pointwise_weight_shape = (1, 1, pointwise_input_size, self._output_channels)
if "w_dw" not in self._initializers:
fan_in_shape = depthwise_weight_shape[:2]
self._initializers["w_dw"] = create_weight_initializer(fan_in_shape,
dtype=inputs.dtype)
if "w_pw" not in self._initializers:
fan_in_shape = pointwise_weight_shape[:3]
self._initializers["w_pw"] = create_weight_initializer(fan_in_shape,
dtype=inputs.dtype)
w_dw = tf.get_variable(
"w_dw",
shape=depthwise_weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w_dw"],
partitioner=self._partitioners.get("w_dw", None),
regularizer=self._regularizers.get("w_dw", None))
w_pw = tf.get_variable(
"w_pw",
shape=pointwise_weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w_pw"],
partitioner=self._partitioners.get("w_pw", None),
regularizer=self._regularizers.get("w_pw", None))
return w_dw, w_pw | Connects the module into the graph, with input Tensor `inputs`.
Args:
inputs: A 4D Tensor of shape:
[batch_size, input_height, input_width, input_channels]
and of type `tf.float16`, `tf.bfloat16` or `tf.float32`.
Returns:
A tuple of two 4D Tensors, each with the same dtype as `inputs`:
1. w_dw, the depthwise weight matrix, of shape:
[kernel_size, input_channels, channel_multiplier]
2. w_pw, the pointwise weight matrix, of shape:
[1, 1, channel_multiplier * input_channels, output_channels]. |
#vtb
def _parse_geometry(geometry):
if isinstance(geometry, str):
geometry = shapely.wkt.loads(geometry)
elif isinstance(geometry, dict):
geometry = shapely.geometry.shape(geometry)
elif not isinstance(geometry, shapely.geometry.base.BaseGeometry):
raise TypeError()
if not isinstance(geometry, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)):
raise ValueError(.format(type(geometry)))
return geometry | Parses given geometry into shapely object
:param geometry:
:return: Shapely polygon or multipolygon
:rtype: shapely.geometry.Polygon or shapely.geometry.MultiPolygon
:raises TypeError |
#vtb
def _add_arg_python(self, key, value=None, mask=False):
self._data[key] = value
if not value:
pass
elif value is True:
self._args.append(.format(key))
self._args_quoted.append(.format(key))
self._args_masked.append(.format(key))
else:
self._args.append(.format(key, value))
if mask:
value = * len(str(value))
else:
value = self.quote(value)
self._args_quoted.append(.format(key, value))
self._args_masked.append(.format(key, value)) | Add CLI Arg formatted specifically for Python.
Args:
key (string): The CLI Args key (e.g., --name).
value (string): The CLI Args value (e.g., bob).
mask (boolean, default:False): Indicates whether no mask value. |
#vtb
def _create_page_control(self):
if self.custom_page_control:
control = self.custom_page_control()
elif self.kind == :
control = QtGui.QPlainTextEdit()
elif self.kind == :
control = QtGui.QTextEdit()
control.installEventFilter(self)
viewport = control.viewport()
viewport.installEventFilter(self)
control.setReadOnly(True)
control.setUndoRedoEnabled(False)
control.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
return control | Creates and connects the underlying paging widget. |
#vtb
def limit(self, limit_value, key_func=None, per_method=False,
methods=None, error_message=None, exempt_when=None):
return self.__limit_decorator(limit_value, key_func, per_method=per_method,
methods=methods, error_message=error_message,
exempt_when=exempt_when) | decorator to be used for rate limiting individual routes.
:param limit_value: rate limit string or a callable that returns a string.
:ref:`ratelimit-string` for more details.
:param function key_func: function/lambda to extract the unique identifier for
the rate limit. defaults to remote address of the request.
:param bool per_method: whether the limit is sub categorized into the http
method of the request.
:param list methods: if specified, only the methods in this list will be rate
limited (default: None).
:param error_message: string (or callable that returns one) to override the
error message used in the response.
:return: |
#vtb
def ruamel_structure(data, validator=None):
if isinstance(data, dict):
if len(data) == 0:
raise exceptions.CannotBuildDocumentsFromEmptyDictOrList(
"Document must be built with non-empty dicts and lists"
)
return CommentedMap(
[
(ruamel_structure(key), ruamel_structure(value))
for key, value in data.items()
]
)
elif isinstance(data, list):
if len(data) == 0:
raise exceptions.CannotBuildDocumentsFromEmptyDictOrList(
"Document must be built with non-empty dicts and lists"
)
return CommentedSeq([ruamel_structure(item) for item in data])
elif isinstance(data, bool):
return u"yes" if data else u"no"
elif isinstance(data, (int, float)):
return str(data)
else:
if not is_string(data):
raise exceptions.CannotBuildDocumentFromInvalidData(
(
"Document must be built from a combination of:\n"
"string, int, float, bool or nonempty list/dict\n\n"
"Instead, found variable with type : "
).format(type(data).__name__, data)
)
return data | Take dicts and lists and return a ruamel.yaml style
structure of CommentedMaps, CommentedSeqs and
data.
If a validator is presented and the type is unknown,
it is checked against the validator to see if it will
turn it back in to YAML. |
#vtb
def _copy_selection(self, *event):
if react_to_event(self.view, self.view.editor, event):
logger.debug("copy selection")
global_clipboard.copy(self.model.selection)
return True | Copies the current selection to the clipboard. |
#vtb
def getquals(args):
p = OptionParser(getquals.__doc__)
p.add_option("--types", default="gene,mRNA,CDS",
type="str", dest="quals_ftypes",
help="Feature types from which to extract qualifiers")
p.add_option("--ignore", default="locus_tag,product,codon_start,translation",
type="str", dest="quals_ignore",
help="Qualifiers to exclude from parsing")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gbkfile, = args
quals_ftypes = opts.quals_ftypes.split(",")
quals_ignore = opts.quals_ignore.split(",")
locus = dict()
locus_tag = None
for rec in SeqIO.parse(gbkfile, "gb"):
for f in rec.features:
if f.type in quals_ftypes:
locus_tag = f.qualifiers[LT][0]
if locus_tag not in locus:
locus[locus_tag] = dict()
for ftype in quals_ftypes:
if ftype not in locus[locus_tag]:
locus[locus_tag][ftype] = []
if ftype == "CDS":
locus[locus_tag]["protein_id"] = []
quals = []
for qual in f.qualifiers:
if qual in quals_ignore:
continue
for qval in f.qualifiers[qual]:
quals.append((locus_tag, qual, qval))
if qual == "protein_id":
locus[locus_tag]["protein_id"].append(qval)
if len(quals) > 0:
locus[locus_tag][f.type].append(quals)
for locus_tag in locus:
print_locus_quals(locus_tag, locus, quals_ftypes) | %prog getquals [--options] gbkfile > qualsfile
Read GenBank file and extract all qualifiers per feature type
into a tab-delimited file |
#vtb
def SetPlatformArchContext():
_CONFIG.AddContext("Platform:%s" % platform.system().title())
machine = platform.uname()[4]
if machine in ["x86_64", "AMD64", "i686"]:
if platform.architecture()[0] == "32bit":
arch = "i386"
else:
arch = "amd64"
elif machine == "x86":
arch = "i386"
else:
arch = machine
_CONFIG.AddContext("Arch:%s" % arch) | Add the running contexts to the config system. |
#vtb
def logBranch(self, indent=0, level=logging.DEBUG):
if 0:
print(indent * " " + str(self))
else:
logger.log(level, indent * " " + str(self))
for childItems in self.childItems:
childItems.logBranch(indent + 1, level=level) | Logs the item and all descendants, one line per child |
#vtb
def rev_comp(seq):
rev_seq = seq[::-1]
rev_comp_seq = .join([base_pairing[s] for s in rev_seq])
return rev_comp_seq | Get reverse complement of sequence.
rev_comp will maintain the case of the sequence.
Parameters
----------
seq : str
nucleotide sequence. valid {a, c, t, g, n}
Returns
-------
rev_comp_seq : str
reverse complement of sequence |
#vtb
def local_open(url):
scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib.request.urlopen(url)
elif path.endswith() and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == :
with open(filepath, ) as fp:
body = fp.read()
break
elif os.path.isdir(filepath):
f +=
files.append(.format(name=f))
else:
tmpl = (
"<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
body = tmpl.format(url=url, files=.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {: }
body_stream = six.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream) | Read a local path, with special support for directories |
#vtb
def cmd_tcpscan(ip, port, iface, flags, sleeptime, timeout, show_all, verbose):
if verbose:
logging.basicConfig(level=logging.INFO, format=)
conf.verb = False
if iface:
conf.iface = iface
port_regex = r
if not re.match(port_regex, port):
logging.critical("Invalid port specification")
return False
ports = []
for p in str(port).split():
if in p:
first, last = p.split()
for n in range(int(first), int(last)+1):
ports.append(n)
else:
ports.append(int(p))
out = "{port} {sflags} -> {rflags}"
pkts = IP(dst=ip)/TCP(flags=flags, dport=ports)
if sleeptime:
res = []
for pkt in pkts:
logging.info(pkt.summary())
_ = sr1(pkt)
if _:
logging.info(_.summary())
res.append((pkt, _))
else:
res, unans = sr(pkts, verbose=verbose)
for s,r in res:
if show_all or in r.sprintf(r"%TCP.flags%"):
print(out.format(
port=s[TCP].dport,
sflags=s.sprintf(r"%TCP.flags%"),
rflags=r.sprintf(r"%TCP.flags%")
)) | TCP Port Scanner.
Print the ports that generated a response with the SYN flag or (if show use -a) all the
ports that generated a response.
It's really basic compared with nmap, but who is comparing?
Example:
\b
# habu.tcpscan -p 22,23,80,443 -s 1 45.77.113.133
22 S -> SA
80 S -> SA
443 S -> SA |
#vtb
def options(argv=[]):
parser = HendrixOptionParser
parsed_args = parser.parse_args(argv)
return vars(parsed_args[0]) | A helper function that returns a dictionary of the default key-values pairs |
#vtb
def ber_code(self):
try:
alt_text = self._ad_page_content.find(
, {: }
).find()[]
if ( in alt_text):
return
else:
alt_arr = alt_text.split()
if in alt_arr[0].lower():
return alt_arr[1].lower()
else:
return None
except Exception as e:
if self._debug:
logging.error(
"Error getting the Ber Code. Error message: " + e.args[0])
return None | This method gets ber code listed in Daft.
:return: |
#vtb
def is_img_id_valid(img_id):
t = re.sub(r, , img_id, re.IGNORECASE)
t = re.sub(r, , t)
if img_id != t or img_id.count() != 1:
return False
profile, base_name = img_id.split(, 1)
if not profile or not base_name:
return False
try:
get_profile_configs(profile)
except ValueError:
return False
return True | Checks if img_id is valid. |
#vtb
def mean_by_panel(self, length):
self._check_panel(length)
func = lambda v: v.reshape(-1, length).mean(axis=0)
newindex = arange(length)
return self.map(func, index=newindex) | Compute the mean across fixed sized panels of each record.
Splits each record into panels of size `length`,
and then computes the mean across panels.
Panel length must subdivide record exactly.
Parameters
----------
length : int
Fixed length with which to subdivide. |
#vtb
def check_str(obj):
if isinstance(obj, str):
return obj
if isinstance(obj, float):
return str(int(obj))
else:
return str(obj) | Returns a string for various input types |
#vtb
def _parse_wsgi_headers(wsgi_environ):
prefix =
p_len = len(prefix)
headers = {
key[p_len:].replace(, ).lower():
val for (key, val) in wsgi_environ.items()
if key.startswith(prefix)}
return headers | HTTP headers are presented in WSGI environment with 'HTTP_' prefix.
This method finds those headers, removes the prefix, converts
underscores to dashes, and converts to lower case.
:param wsgi_environ:
:return: returns a dictionary of headers |
#vtb
def consume_changes(self, start, end):
left, right = self._get_changed(start, end)
if left < right:
del self.lines[left:right]
return left < right | Clear the changed status of lines from start till end |
#vtb
async def deregister(self, check):
check_id = extract_attr(check, keys=["CheckID", "ID"])
response = await self._api.get("/v1/agent/check/deregister", check_id)
return response.status == 200 | Deregisters a local check
Parameters:
check (ObjectID): Check ID
Returns:
bool: ``True`` on success
The agent will take care of deregistering the check from the Catalog. |
#vtb
def set(ctx, key, value):
if key == "default_account" and value[0] == "@":
value = value[1:]
ctx.bitshares.config[key] = value | Set configuration parameters |
#vtb
def price_change(self):
try:
if self._data_from_search:
return self._data_from_search.find(, {: }).text
else:
return self._ad_page_content.find(, {: }).text
except Exception as e:
if self._debug:
logging.error(
"Error getting price_change. Error message: " + e.args[0])
return | This method returns any price change.
:return: |
#vtb
def fromJSON(value):
j = json.loads(value)
v = GPString()
if "defaultValue" in j:
v.value = j[]
else:
v.value = j[]
if in j:
v.paramName = j[]
elif in j:
v.paramName = j[]
return v | loads the GP object from a JSON string |
#vtb
def delete_image(self, identifier):
if isinstance(identifier, dict):
identifier = identifier.get(, )
j, r = self.request(, + str(identifier))
r.raise_for_status()
return j | ::
DELETE /:login/images/:id
:param identifier: match on the listed image identifier
:type identifier: :py:class:`basestring` or :py:class:`dict`
A string or a dictionary containing an ``id`` key may be
passed in. Will raise an error if the response was an error. |
#vtb
def remove_entry(self, entry):
if not isinstance(entry, Entry):
raise TypeError("entry param must be of type Entry.")
if not entry in self.entries:
raise ValueError("Entry doesn't exist / not bound to this datbase.")
entry.group.entries.remove(entry)
self.entries.remove(entry) | Remove specified entry.
:param entry: The Entry object to remove.
:type entry: :class:`keepassdb.model.Entry` |
#vtb
def addText(self, text):
self.moveCursor(QtGui.QTextCursor.End)
self.setTextColor(self._currentColor)
self.textCursor().insertText(text) | append text in the chosen color |
#vtb
def robot_files(self):
result = []
for name in os.listdir(self.path):
fullpath = os.path.join(self.path, name)
if os.path.isdir(fullpath):
result.append(RobotFactory(fullpath, parent=self))
else:
if ((name.endswith(".txt") or name.endswith(".robot")) and
(name not in ("__init__.txt", "__init__.robot"))):
result.append(RobotFactory(fullpath, parent=self))
return result | Return a list of all folders, and test suite files (.txt, .robot) |
#vtb
def load_preferences(session, config, valid_paths,
cull_disabled=False, openid=None,
cull_backends=None):
cull_backends = cull_backends or []
query = session.query(fmn.lib.models.Preference)
if openid:
query = query.filter(fmn.lib.models.Preference.openid==openid)
preferences = query.all()
return [
preference.__json__(reify=True)
for preference in preferences
if (
preference.context.name in config[]
and preference.context.name not in cull_backends
and (not cull_disabled or preference.enabled)
)
] | Every rule for every filter for every context for every user.
Any preferences in the DB that are for contexts that are disabled in the
config are omitted here.
If the `openid` argument is None, then this is an expensive query that
loads, practically, the whole database. However, if an openid string is
submitted, then only the preferences of that user are returned (and this is
less expensive). |
#vtb
def GetRowHeaders(self) -> list:
eleArray = self.pattern.GetCurrentRowHeaders()
if eleArray:
controls = []
for i in range(eleArray.Length):
ele = eleArray.GetElement(i)
con = Control.CreateControlFromElement(element=ele)
if con:
controls.append(con)
return controls
return [] | Call IUIAutomationTablePattern::GetCurrentRowHeaders.
Return list, a list of `Control` subclasses, representing all the row headers in a table.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtablepattern-getcurrentrowheaders |
#vtb
def get_instance(self, payload):
return MonthlyInstance(self._version, payload, account_sid=self._solution[], ) | Build an instance of MonthlyInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.monthly.MonthlyInstance |
#vtb
def delete_collection_pod_security_policy(self, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_collection_pod_security_policy_with_http_info(**kwargs)
else:
(data) = self.delete_collection_pod_security_policy_with_http_info(**kwargs)
return data | delete collection of PodSecurityPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_pod_security_policy(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread. |
#vtb
def contains_entry(self, key, value):
check_not_none(key, "key cant be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(multi_map_contains_entry_codec, key_data, key=key_data,
value=value_data, thread_id=thread_id()) | Returns whether the multimap contains an entry with the value.
:param key: (object), the specified key.
:param value: (object), the specified value.
:return: (bool), ``true`` if this multimap contains the key-value tuple. |
#vtb
def parse_and_normalize_url_date(date_str):
if date_str is None:
return None
try:
return d1_common.date_time.dt_from_iso8601_str(date_str)
except d1_common.date_time.iso8601.ParseError as e:
raise d1_common.types.exceptions.InvalidRequest(
0,
.format(
date_str, str(e)
),
) | Parse a ISO 8601 date-time with optional timezone.
- Return as datetime with timezone adjusted to UTC.
- Return naive date-time set to UTC. |
#vtb
def create_layout(self, size = None):
if not self.context:
raise Exception("Can not create layout without existing context!")
layout = pangocairo.create_layout(self.context)
font_desc = pango.FontDescription(_font_desc)
if size: font_desc.set_absolute_size(size * pango.SCALE)
layout.set_font_description(font_desc)
return layout | utility function to create layout with the default font. Size and
alignment parameters are shortcuts to according functions of the
pango.Layout |
#vtb
def merge_code(left_code, right_code):
data = dict()
code_lines = (left_code and left_code.iter_code_by_lines()) or tuple()
for abs_line, rel_line, dis in code_lines:
data[rel_line] = [(abs_line, dis), None]
code_lines = (right_code and right_code.iter_code_by_lines()) or tuple()
for abs_line, rel_line, dis in code_lines:
found = data.get(rel_line, None)
if found is None:
found = [None, (abs_line, dis)]
data[rel_line] = found
else:
found[1] = (abs_line, dis)
return data | { relative_line:
((left_abs_line, ((offset, op, args), ...)),
(right_abs_line, ((offset, op, args), ...))),
... } |
#vtb
def light_to_gl(light, transform, lightN):
gl_color = vector_to_gl(light.color.astype(np.float64) / 255.0)
assert len(gl_color) == 4
gl_position = vector_to_gl(transform[:3, 3])
args = [(lightN, gl.GL_POSITION, gl_position),
(lightN, gl.GL_SPECULAR, gl_color),
(lightN, gl.GL_DIFFUSE, gl_color),
(lightN, gl.GL_AMBIENT, gl_color)]
return args | Convert trimesh.scene.lighting.Light objects into
args for gl.glLightFv calls
Parameters
--------------
light : trimesh.scene.lighting.Light
Light object to be converted to GL
transform : (4, 4) float
Transformation matrix of light
lightN : int
Result of gl.GL_LIGHT0, gl.GL_LIGHT1, etc
Returns
--------------
multiarg : [tuple]
List of args to pass to gl.glLightFv eg:
[gl.glLightfb(*a) for a in multiarg] |
#vtb
async def pause(self, pause: bool = True):
self._paused = pause
await self.node.pause(self.channel.guild.id, pause) | Pauses the current song.
Parameters
----------
pause : bool
Set to ``False`` to resume. |
#vtb
def _rotate_tr(self):
rot, x, y, z = self._quaternion.get_axis_angle()
up, forward, right = self._get_dim_vectors()
self.transform.rotate(180 * rot / np.pi, (x, z, y)) | Rotate the transformation matrix based on camera parameters |
#vtb
def _cbc_decrypt(self, final_key, crypted_content):
aes = AES.new(final_key, AES.MODE_CBC, self._enc_iv)
decrypted_content = aes.decrypt(crypted_content)
padding = decrypted_content[-1]
if sys.version > :
padding = decrypted_content[-1]
else:
padding = ord(decrypted_content[-1])
decrypted_content = decrypted_content[:len(decrypted_content)-padding]
return decrypted_content | This method decrypts the database |
#vtb
def add_sent(self, sent_obj):
if sent_obj is None:
raise Exception("Sentence object cannot be None")
elif sent_obj.ID is None:
sent_obj.ID = next(self.__idgen)
elif self.has_id(sent_obj.ID):
raise Exception("Sentence ID {} exists".format(sent_obj.ID))
self.__sent_map[sent_obj.ID] = sent_obj
self.__sents.append(sent_obj)
return sent_obj | Add a ttl.Sentence object to this document |
#vtb
def save(self, set_cookie, **params):
if set(self.store.items()) ^ set(self.items()):
value = dict(self.items())
value = json.dumps(value)
value = self.encrypt(value)
if not isinstance(value, str):
value = value.encode(self.encoding)
set_cookie(self.key, value, **self.params)
return True
return False | Update cookies if the session has been changed. |
#vtb
async def teardown_client(self, client_id):
client_info = self._client_info(client_id)
self.adapter.remove_monitor(client_info[])
conns = client_info[]
for conn_string, conn_id in conns.items():
try:
self._logger.debug("Disconnecting client %s from conn %s at teardown", client_id, conn_string)
await self.adapter.disconnect(conn_id)
except:
self._logger.exception("Error disconnecting device during teardown_client: conn_string=%s", conn_string)
del self._clients[client_id] | Release all resources held by a client.
This method must be called and awaited whenever a client is
disconnected. It ensures that all of the client's resources are
properly released and any devices they have connected to are
disconnected cleanly.
Args:
client_id (str): The client that we should tear down.
Raises:
ArgumentError: The client_id is unknown. |
#vtb
def set_data_length(self, length):
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError()
len_diff = length - self.info_len
if len_diff > 0:
new_len = self.alloc_descs[-1][0] + len_diff
if new_len > 0x3ffff800:
raise pycdlibexception.PyCdlibInvalidInput()
self.alloc_descs[-1][0] = new_len
elif len_diff < 0:
len_left = length
alloc_descs_needed = 0
index = 0
while len_left > 0:
this_len = min(len_left, 0x3ffff800)
alloc_descs_needed += 1
self.alloc_descs[index][0] = this_len
index += 1
len_left -= this_len
self.alloc_descs = self.alloc_descs[:alloc_descs_needed]
self.info_len = length | A method to set the length of the data that this UDF File Entry
points to.
Parameters:
length - The new length for the data.
Returns:
Nothing. |
#vtb
def rsi(self, n, array=False):
result = talib.RSI(self.close, n)
if array:
return result
return result[-1] | RSI指标 |
#vtb
def cidr_notation(ip_address, netmask):
try:
inet_aton(ip_address)
except:
raise Exception("Invalid ip address " % ip_address)
try:
inet_aton(netmask)
except:
raise Exception("Invalid netmask " % netmask)
ip_address_split = ip_address.split()
netmask_split = netmask.split()
net_start = [str(int(ip_address_split[x]) & int(netmask_split[x]))
for x in range(0,4)]
return .join(net_start) + + get_net_size(netmask_split) | Retrieve the cidr notation given an ip address and netmask.
For example:
cidr_notation('12.34.56.78', '255.255.255.248')
Would return: 12.34.56.72/29
@see http://terminalmage.net/2012/06/10/how-to-find-out-the-cidr-notation-for-a-subne-given-an-ip-and-netmask/
@see http://www.aelius.com/njh/subnet_sheet.html |
#vtb
def subscribe_topics(self):
base = self.topic
subscribe = self.mqtt.subscribe
subscribe(b"/".join((base, b"$stats/interval/set")))
subscribe(b"/".join((self.settings.MQTT_BASE_TOPIC, b"$broadcast/
nodes = self.nodes
for node in nodes:
for topic in node.subscribe:
topic = b"/".join((base, topic))
subscribe(topic)
self.topic_callbacks[topic] = node.callback | subscribe to all registered device and node topics |
#vtb
def stop(cls, app_id):
conn = Qubole.agent()
return conn.put(cls.element_path(app_id) + "/stop") | Stops an app by issuing a PUT request to the /apps/ID/stop endpoint. |
#vtb
def playlist_create(
self,
name,
description=,
*,
make_public=False,
songs=None
):
share_state = if make_public else
playlist = self._call(
mc_calls.PlaylistsCreate,
name,
description,
share_state
).body
if songs:
playlist = self.playlist_songs_add(songs, playlist)
return playlist | Create a playlist.
Parameters:
name (str): Name to give the playlist.
description (str): Description to give the playlist.
make_public (bool, Optional): If ``True`` and account has a subscription,
make playlist public.
Default: ``False``
songs (list, Optional): A list of song dicts to add to the playlist.
Returns:
dict: Playlist information. |
#vtb
def loadfn(fn, *args, **kwargs):
if "mpk" in os.path.basename(fn).lower():
if msgpack is None:
raise RuntimeError(
"Loading of message pack files is not "
"possible as msgpack-python is not installed.")
if "object_hook" not in kwargs:
kwargs["object_hook"] = object_hook
with zopen(fn, "rb") as fp:
return msgpack.load(fp, *args, **kwargs)
else:
with zopen(fn) as fp:
if "yaml" in os.path.basename(fn).lower():
if yaml is None:
raise RuntimeError("Loading of YAML files is not "
"possible as ruamel.yaml is not installed.")
if "Loader" not in kwargs:
kwargs["Loader"] = Loader
return yaml.load(fp, *args, **kwargs)
else:
if "cls" not in kwargs:
kwargs["cls"] = MontyDecoder
return json.load(fp, *args, **kwargs) | Loads json/yaml/msgpack directly from a filename instead of a
File-like object. For YAML, ruamel.yaml must be installed. The file type is
automatically detected. YAML is assumed if the filename contains "yaml"
(lower or upper case). Otherwise, json is always assumed.
Args:
fn (str/Path): filename or pathlib.Path.
\*args: Any of the args supported by json/yaml.load.
\*\*kwargs: Any of the kwargs supported by json/yaml.load.
Returns:
(object) Result of json/yaml/msgpack.load. |
#vtb
def Update(self, env, args=None):
values = {}
for option in self.options:
if not option.default is None:
values[option.key] = option.default
for filename in self.files:
if os.path.exists(filename):
dir = os.path.split(os.path.abspath(filename))[0]
if dir:
sys.path.insert(0, dir)
try:
values[] = filename
with open(filename, ) as f:
contents = f.read()
exec(contents, {}, values)
finally:
if dir:
del sys.path[0]
del values[]
if args is None:
args = self.args
for arg, value in args.items():
added = False
for option in self.options:
if arg in list(option.aliases) + [ option.key ]:
values[option.key] = value
added = True
if not added:
self.unknown[arg] = value
| Update an environment with the option variables.
env - the environment to update. |
#vtb
def Sonnad_Goudar_2006(Re, eD):
r
S = 0.124*eD*Re + log(0.4587*Re)
return (.8686*log(.4587*Re/S**(S/(S+1))))**-2 | r'''Calculates Darcy friction factor using the method in Sonnad and Goudar
(2006) [2]_ as shown in [1]_.
.. math::
\frac{1}{\sqrt{f_d}} = 0.8686\ln\left(\frac{0.4587Re}{S^{S/(S+1)}}\right)
.. math::
S = 0.1240\times\frac{\epsilon}{D}\times Re + \ln(0.4587Re)
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
Range is 4E3 <= Re <= 1E8; 1E-6 <= eD <= 5E-2
Examples
--------
>>> Sonnad_Goudar_2006(1E5, 1E-4)
0.0185971269898162
References
----------
.. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence
and Combustion 90, no. 1 (January 1, 2013): 1-27.
doi:10.1007/s10494-012-9419-7
.. [2] Travis, Quentin B., and Larry W. Mays."Relationship between
Hazen-William and Colebrook-White Roughness Values." Journal of
Hydraulic Engineering 133, no. 11 (November 2007): 1270-73.
doi:10.1061/(ASCE)0733-9429(2007)133:11(1270). |
#vtb
def build_image_from_inherited_image(self, image_name: str, image_tag: str,
repo_path: Path,
requirements_option: RequirementsOptions):
base_name, base_tag = self.get_inherit_image()
if requirements_option == RequirementsOptions.no_requirements:
image = self.get_image(base_name, base_tag)
image.tag(image_name, image_tag)
return image
dockerfile = self.get_install_requirements_dockerfile(base_name, base_tag, repo_path, requirements_option)
self.get_or_build_image(image_name, image_tag, dockerfile, build_context=repo_path.parent, pull=False)
return self.get_image(image_name, image_tag) | Builds a image with installed requirements from the inherited image. (Or just tags the image
if there are no requirements.)
See :meth:`build_image` for parameters descriptions.
:rtype: docker.models.images.Image |
#vtb
def save_aggregate_report_to_elasticsearch(aggregate_report,
index_suffix=None,
monthly_indexes=False):
logger.debug("Saving aggregate report to Elasticsearch")
aggregate_report = aggregate_report.copy()
metadata = aggregate_report["report_metadata"]
org_name = metadata["org_name"]
report_id = metadata["report_id"]
domain = aggregate_report["policy_published"]["domain"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"])
end_date = human_timestamp_to_datetime(metadata["end_date"])
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%S")
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%S")
if monthly_indexes:
index_date = begin_date.strftime("%Y-%m")
else:
index_date = begin_date.strftime("%Y-%m-%d")
aggregate_report["begin_date"] = begin_date
aggregate_report["end_date"] = end_date
date_range = [aggregate_report["begin_date"],
aggregate_report["end_date"]]
org_name_query = Q(dict(match=dict(org_name=org_name)))
report_id_query = Q(dict(match=dict(report_id=report_id)))
domain_query = Q(dict(match={"published_policy.domain": domain}))
begin_date_query = Q(dict(match=dict(date_range=begin_date)))
end_date_query = Q(dict(match=dict(date_range=end_date)))
search = Search(index="dmarc_aggregate*")
query = org_name_query & report_id_query & domain_query
query = query & begin_date_query & end_date_query
search.query = query
existing = search.execute()
if len(existing) > 0:
raise AlreadySaved("An aggregate report ID {0} from {1} about {2} "
"with a date range of {3} UTC to {4} UTC already "
"exists in "
"Elasticsearch".format(report_id,
org_name,
domain,
begin_date_human,
end_date_human))
published_policy = _PublishedPolicy(
domain=aggregate_report["policy_published"]["domain"],
adkim=aggregate_report["policy_published"]["adkim"],
aspf=aggregate_report["policy_published"]["aspf"],
p=aggregate_report["policy_published"]["p"],
sp=aggregate_report["policy_published"]["sp"],
pct=aggregate_report["policy_published"]["pct"],
fo=aggregate_report["policy_published"]["fo"]
)
for record in aggregate_report["records"]:
agg_doc = _AggregateReportDoc(
xml_schemea=aggregate_report["xml_schema"],
org_name=metadata["org_name"],
org_email=metadata["org_email"],
org_extra_contact_info=metadata["org_extra_contact_info"],
report_id=metadata["report_id"],
date_range=date_range,
errors=metadata["errors"],
published_policy=published_policy,
source_ip_address=record["source"]["ip_address"],
source_country=record["source"]["country"],
source_reverse_dns=record["source"]["reverse_dns"],
source_base_domain=record["source"]["base_domain"],
message_count=record["count"],
disposition=record["policy_evaluated"]["disposition"],
dkim_aligned=record["policy_evaluated"]["dkim"] == "pass",
spf_aligned=record["policy_evaluated"]["spf"] == "pass",
header_from=record["identifiers"]["header_from"],
envelope_from=record["identifiers"]["envelope_from"],
envelope_to=record["identifiers"]["envelope_to"]
)
for override in record["policy_evaluated"]["policy_override_reasons"]:
agg_doc.add_policy_override(type_=override["type"],
comment=override["comment"])
for dkim_result in record["auth_results"]["dkim"]:
agg_doc.add_dkim_result(domain=dkim_result["domain"],
selector=dkim_result["selector"],
result=dkim_result["result"])
for spf_result in record["auth_results"]["spf"]:
agg_doc.add_spf_result(domain=spf_result["domain"],
scope=spf_result["scope"],
result=spf_result["result"])
index = "dmarc_aggregate"
if index_suffix:
index = "{0}_{1}".format(index, index_suffix)
index = "{0}-{1}".format(index, index_date)
create_indexes([index])
agg_doc.meta.index = index
try:
agg_doc.save()
except Exception as e:
raise ElasticsearchError(
"Elasticsearch error: {0}".format(e.__str__())) | Saves a parsed DMARC aggregate report to ElasticSearch
Args:
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
Raises:
AlreadySaved |
#vtb
def dictlist_convert_to_bool(dict_list: Iterable[Dict], key: str) -> None:
for d in dict_list:
d[key] = 1 if d[key] == "Y" else 0 | Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a bool. If that fails, convert it to ``None``. |
#vtb
def _get_new_connection(self, conn_params):
self.__connection_string = conn_params.get(, )
conn = self.Database.connect(**conn_params)
return conn | Opens a connection to the database. |
#vtb
def load(self):
pg = self.usr.getPage("http://www.neopets.com/bank.phtml")
if not "great to see you again" in pg.content:
logging.getLogger("neolib.user").info("Could not load userpg': pg})
raise noBankAcct
self.__loadDetails(pg) | Loads the user's account details and
Raises
parseException |
#vtb
def get_arctic_version(self, symbol, as_of=None):
return self._read_metadata(symbol, as_of=as_of).get(, 0) | Return the numerical representation of the arctic version used to write the last (or as_of) version for
the given symbol.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
Returns
-------
arctic_version : int
The numerical representation of Arctic version, used to create the specified symbol version |
#vtb
def space_cluster(catalog, d_thresh, show=True):
dist_mat = dist_mat_km(catalog)
dist_vec = squareform(dist_mat)
Z = linkage(dist_vec, method=)
indices = fcluster(Z, t=d_thresh, criterion=)
group_ids = list(set(indices))
indices = [(indices[i], i) for i in range(len(indices))]
if show:
plt.show()
indices.sort(key=lambda tup: tup[0])
groups = []
for group_id in group_ids:
group = Catalog()
for ind in indices:
if ind[0] == group_id:
group.append(catalog[ind[1]])
elif ind[0] > group_id:
groups.append(group)
break
groups.append(group)
return groups | Cluster a catalog by distance only.
Will compute the matrix of physical distances between events and utilize
the :mod:`scipy.clustering.hierarchy` module to perform the clustering.
:type catalog: obspy.core.event.Catalog
:param catalog: Catalog of events to clustered
:type d_thresh: float
:param d_thresh: Maximum inter-event distance threshold
:returns: list of :class:`obspy.core.event.Catalog` objects
:rtype: list
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("NCEDC")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=2)
>>> groups = space_cluster(catalog=cat, d_thresh=2, show=False)
>>> from eqcorrscan.utils.clustering import space_cluster
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> client = Client("https://earthquake.usgs.gov")
>>> starttime = UTCDateTime("2002-01-01")
>>> endtime = UTCDateTime("2002-02-01")
>>> cat = client.get_events(starttime=starttime, endtime=endtime,
... minmagnitude=6)
>>> groups = space_cluster(catalog=cat, d_thresh=1000, show=False) |
#vtb
def is_valid_number_for_region(numobj, region_code):
country_code = numobj.country_code
if region_code is None:
return False
metadata = PhoneMetadata.metadata_for_region_or_calling_code(country_code, region_code.upper())
if (metadata is None or
(region_code != REGION_CODE_FOR_NON_GEO_ENTITY and
country_code != country_code_for_valid_region(region_code))):
return False
nsn = national_significant_number(numobj)
return (_number_type_helper(nsn, metadata) != PhoneNumberType.UNKNOWN) | Tests whether a phone number is valid for a certain region.
Note this doesn't verify the number is actually in use, which is
impossible to tell by just looking at a number itself. If the country
calling code is not the same as the country calling code for the region,
this immediately exits with false. After this, the specific number pattern
rules for the region are examined. This is useful for determining for
example whether a particular number is valid for Canada, rather than just
a valid NANPA number.
Warning: In most cases, you want to use is_valid_number instead. For
example, this method will mark numbers from British Crown dependencies
such as the Isle of Man as invalid for the region "GB" (United Kingdom),
since it has its own region code, "IM", which may be undesirable.
Arguments:
numobj -- The phone number object that we want to validate.
region_code -- The region that we want to validate the phone number for.
Returns a boolean that indicates whether the number is of a valid pattern. |
#vtb
def result(self):
if self._result.lower() == :
return WIN
if self._result.lower() == and \
self.overtime != 0:
return OVERTIME_LOSS
return LOSS | Returns a ``string`` constant to indicate whether the team lost in
regulation, lost in overtime, or won. |
#vtb
def _trunc(x, minval=None, maxval=None):
x = np.copy(x)
if minval is not None:
x[x < minval] = minval
if maxval is not None:
x[x > maxval] = maxval
return x | Truncate vector values to have values on range [minval, maxval] |
#vtb
def _get_task_with_policy(queue_name, task_id, owner):
now = datetime.datetime.utcnow()
task = (
WorkQueue.query
.filter_by(queue_name=queue_name, task_id=task_id)
.with_lockmode()
.first())
if not task:
raise TaskDoesNotExistError( % task_id)
lease_delta = now - task.eta
if lease_delta > datetime.timedelta(0):
db.session.rollback()
raise LeaseExpiredError( % (
task.queue_name, task_id, lease_delta))
if task.last_owner != owner:
db.session.rollback()
raise NotOwnerError( % (
task.queue_name, task_id, task.last_owner))
return task | Fetches the specified task and enforces ownership policy.
Args:
queue_name: Name of the queue the work item is on.
task_id: ID of the task that is finished.
owner: Who or what has the current lease on the task.
Returns:
The valid WorkQueue task that is currently owned.
Raises:
TaskDoesNotExistError if the task does not exist.
LeaseExpiredError if the lease is no longer active.
NotOwnerError if the specified owner no longer owns the task. |
#vtb
def register_entity_to_group(self, entity, group):
if entity in self._entities:
if group in self._groups:
self._groups[group].append(entity)
else:
self._groups[group] = [entity]
else:
raise UnmanagedEntityError(entity) | Add entity to a group.
If group does not exist, entity will be added as first member
entity is of type Entity
group is a string that is the name of the group |
#vtb
def get_gaf_format(self):
sep =
return sep.join(
[self.gene, self.db_ref, self.term.id, self.evidence,
.join(self.db_ref), .join(self.with_)]) | Return a GAF 2.0-compatible string representation of the annotation.
Parameters
----------
Returns
-------
str
The formatted string. |
#vtb
def _translate_segment_glob(pattern):
escape = False
regex =
i, end = 0, len(pattern)
while i < end:
char = pattern[i]
i += 1
if escape:
escape = False
regex += re.escape(char)
elif char == :
escape = True
elif char == :
regex +=
elif char == :
regex +=
elif char == :
j = i
if j < end and pattern[j] == :
j += 1
if j < end and pattern[j] == :
j += 1
while j < end and pattern[j] != :
j += 1
if j < end:
j += 1
expr =
if pattern[i] == :
expr +=
i += 1
elif pattern[i] == :
else:
regex += re.escape(char)
return regex | Translates the glob pattern to a regular expression. This is used in
the constructor to translate a path segment glob pattern to its
corresponding regular expression.
*pattern* (:class:`str`) is the glob pattern.
Returns the regular expression (:class:`str`). |
#vtb
def GET_names( self, path_info ):
include_expired = False
qs_values = path_info[]
page = qs_values.get(, None)
if page is None:
log.error("Page required")
return self._reply_json({: }, status_code=400)
try:
page = int(page)
if page < 0:
raise ValueError("Page is negative")
except ValueError:
log.error("Invalid page")
return self._reply_json({: }, status_code=400)
if qs_values.get(, ).lower() in [, ]:
include_expired = True
offset = page * 100
count = 100
blockstackd_url = get_blockstackd_url()
res = blockstackd_client.get_all_names(offset, count, include_expired=include_expired, hostport=blockstackd_url)
if json_is_error(res):
log.error("Failed to list all names (offset={}, count={}): {}".format(offset, count, res[]))
return self._reply_json({: }, status_code=res.get(, 502))
return self._reply_json(res) | Get all names in existence
If `all=true` is set, then include expired names.
Returns the list on success
Returns 400 on invalid arguments
Returns 502 on failure to get names |
#vtb
def _has_population_germline(rec):
for k in population_keys:
if k in rec.header.info:
return True
return False | Check if header defines population annotated germline samples for tumor only. |
#vtb
def printSegmentForCell(tm, cell):
print "Segments for cell", cell, ":"
for seg in tm.basalConnections._cells[cell]._segments:
print " ",
synapses = seg._synapses
for s in synapses:
print "%d:%g" %(s.presynapticCell,s.permanence),
print | Print segment information for this cell |
#vtb
def read(self, want=0):
if self._finished:
if self._finished == 1:
self._finished += 1
return ""
return EOFError("EOF reached")
self._want = want
self._add.set()
self._result.wait()
self._result.clear()
if want:
data = self._data[:want]
self._data = self._data[want:]
else:
data = self._data
self._data = bytes()
return data | Read method, gets data from internal buffer while releasing
:meth:`write` locks when needed.
The lock usage means it must ran on a different thread than
:meth:`fill`, ie. the main thread, otherwise will deadlock.
The combination of both write and this method running on different
threads makes tarfile being streamed on-the-fly, with data chunks being
processed and retrieved on demand.
:param want: number bytes to read, defaults to 0 (all available)
:type want: int
:returns: tarfile data as bytes
:rtype: bytes |
#vtb
def get_serializer(name):
try:
log.debug(, name)
return SERIALIZER_LOOKUP[name]
except KeyError:
msg = .format(name)
log.error(msg, exc_info=True)
raise InvalidSerializerException(msg) | Return the serialize function. |
#vtb
def _shrink_file(dicom_file_in, subsample_factor):
dicom_file_out = dicom_file_in
dicom_in = compressed_dicom.read_file(dicom_file_in)
file_meta = pydicom.dataset.Dataset()
for key, value in dicom_in.file_meta.items():
file_meta.add(value)
dicom_out = pydicom.dataset.FileDataset(dicom_file_out, {}, file_meta=file_meta, preamble=b * 128)
dicom_out.is_little_endian = dicom_in.is_little_endian
dicom_out.is_implicit_VR = dicom_in.is_implicit_VR
rows = 0
columns = 0
for field_key, field_value in dicom_in.items():
logging.info(field_key)
if field_key == (0x7fe0, 0x0010):
pixel_array = dicom_in.pixel_array[::subsample_factor, ::subsample_factor]
dicom_out.PixelData = pixel_array.tostring()
rows = pixel_array.shape[1]
columns = pixel_array.shape[0]
dicom_out[0x7fe0, 0x0010].VR =
else:
dicom_out.add(field_value)
dicom_out.PixelSpacing[0] *= subsample_factor
dicom_out.PixelSpacing[1] *= subsample_factor
dicom_out.Rows = rows
dicom_out.Columns = columns
if not os.path.exists(os.path.dirname(dicom_file_out)):
logging.info()
dicom_out.save_as(dicom_file_out, write_like_original=False) | Anonimize a single dicomfile
:param dicom_file_in: filepath for input file
:param dicom_file_out: filepath for output file
:param fields_to_keep: dicom tags to keep |
#vtb
def _build_urlmapping(urls, strict_slashes=False, **kwargs):
rules = _build_rules(urls)
return Map(rules=list(rules), strict_slashes=strict_slashes, **kwargs) | Convers the anillo urlmappings list into
werkzeug Map instance.
:return: a werkzeug Map instance
:rtype: Map |
#vtb
def _deserialize_datetime(self, data):
for key in data:
if isinstance(data[key], dict):
if data[key].get() == :
data[key] = \
datetime.datetime.fromtimestamp(data[key][])
return data | Take any values coming in as a datetime and deserialize them |
#vtb
def login(self, token, use_token=True, mount_point=DEFAULT_MOUNT_POINT):
params = {
: token,
}
api_path = .format(mount_point=mount_point)
return self._adapter.login(
url=api_path,
use_token=use_token,
json=params,
) | Login using GitHub access token.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param token: GitHub personal API token.
:type token: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the login request.
:rtype: dict |
#vtb
def _on_changed(self):
page = self._get_page()
if not page.flag_autosave:
page.flag_changed = True
self._update_gui_text_tabs() | Slot for changed events |
#vtb
def date_parser(items):
try:
dt = datetime.strptime(items,"%d/%m/%Y %H:%M:%S")
except Exception as e:
try:
dt = datetime.strptime(items,"%m/%d/%Y %H:%M:%S")
except Exception as ee:
raise Exception("error parsing datetime string" +\
" {0}: \n{1}\n{2}".format(str(items),str(e),str(ee)))
return dt | datetime parser to help load smp files
Parameters
----------
items : iterable
something or somethings to try to parse into datetimes
Returns
-------
dt : iterable
the cast datetime things |
#vtb
def _send_content(self, content, connection):
if connection:
if connection.async:
callback = connection.callbacks[]
if callback:
callback(self, self.parent_object, content)
self.current_connection.reset()
self.current_connection = None
else:
return (self, self.parent_object, content) | Send a content array from the connection |
#vtb
def p_new_expr(self, p):
if len(p) == 2:
p[0] = p[1]
else:
p[0] = self.asttypes.NewExpr(p[2])
p[0].setpos(p) | new_expr : member_expr
| NEW new_expr |
#vtb
def map_nested(function, data_struct, dict_only=False, map_tuple=False):
if isinstance(data_struct, dict):
return {
k: map_nested(function, v, dict_only, map_tuple)
for k, v in data_struct.items()
}
elif not dict_only:
types = [list]
if map_tuple:
types.append(tuple)
if isinstance(data_struct, tuple(types)):
mapped = [map_nested(function, v, dict_only, map_tuple)
for v in data_struct]
if isinstance(data_struct, list):
return mapped
else:
return tuple(mapped)
return function(data_struct) | Apply a function recursively to each element of a nested data struct. |
#vtb
def setup(self):
self.devman.sort_device()
self.call.setup()
self.model_setup()
self.xy_addr0()
self.dae.setup()
self.to_sysbase()
return self | Set up the power system object by executing the following workflow:
* Sort the loaded models to meet the initialization sequence
* Create call strings for routines
* Call the ``setup`` function of the loaded models
* Assign addresses for the loaded models
* Call ``dae.setup`` to assign memory for the numerical dae structure
* Convert model parameters to the system base
Returns
-------
PowerSystem
The instance of the PowerSystem |
#vtb
def parse_reports(self):
self.infer_exp = dict()
regexes = {
: r"\"1\+\+,1--,2\+-,2-\+\": (\d\.\d+)",
: r"\"1\+-,1-\+,2\+\+,2--\": (\d\.\d+)",
: r"\"\+\+,--\": (\d\.\d+)",
: r"\+-,-\+\": (\d\.\d+)",
: r"Fraction of reads failed to determine: (\d\.\d+)"
}
for f in self.find_log_files():
d = dict()
for k, r in regexes.items():
r_search = re.search(r, f[], re.MULTILINE)
if r_search:
d[k] = float(r_search.group(1))
if len(d) > 0:
if f[] in self.infer_exp:
log.debug("Duplicate sample name found! Overwriting: {}".format(f[]))
self.add_data_source(f, section=)
self.infer_exp[f[]] = d
self.infer_exp = self.ignore_samples(self.infer_exp)
if len(self.infer_exp) > 0:
self.write_data_file(self.infer_exp, )
pdata = dict()
for s_name, vals in self.infer_exp.items():
pdata[s_name] = dict()
for k, v in vals.items():
v *= 100.0
if k[:2] == or k[:2] == :
k = k[3:]
pdata[s_name][k] = v + pdata[s_name].get(k, 0)
keys = OrderedDict()
keys[] = {: "Sense"}
keys[] = {: "Antisense"}
keys[] = {: "Undetermined"}
pconfig = {
: ,
: ,
: ,
: 0,
: 100,
: False,
: ,
: False
}
self.add_section (
name = ,
anchor = ,
description = \
" counts the percentage of reads and read pairs that match the strandedness of overlapping transcripts." \
" It can be used to infer whether RNA-seq library preps are stranded (sense or antisense).",
plot = bargraph.plot(pdata, keys, pconfig)
)
return len(self.infer_exp) | Find RSeQC infer_experiment reports and parse their data |
#vtb
def cert_from_instance(instance):
if instance.signature:
if instance.signature.key_info:
return cert_from_key_info(instance.signature.key_info,
ignore_age=True)
return [] | Find certificates that are part of an instance
:param instance: An instance
:return: possible empty list of certificates |
#vtb
def _decode_image(fobj, session, filename):
buf = fobj.read()
image = tfds.core.lazy_imports.cv2.imdecode(
np.fromstring(buf, dtype=np.uint8), flags=3)
if image is None:
logging.warning(
"Image %s could not be decoded by OpenCV, falling back to TF", filename)
try:
image = tf.image.decode_image(buf, channels=3)
image = session.run(image)
except tf.errors.InvalidArgumentError:
logging.fatal("Image %s could not be decoded by Tensorflow", filename)
if len(image.shape) == 4:
image = image.reshape(image.shape[1:])
return image | Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels). |
#vtb
def scopes(self):
if not self.__scopes:
self.__scopes = Scopes(self.__connection)
return self.__scopes | Gets the Scopes API client.
Returns:
Scopes: |
#vtb
def add_default_parameter_values(self, sam_template):
parameter_definition = sam_template.get("Parameters", None)
if not parameter_definition or not isinstance(parameter_definition, dict):
return self.parameter_values
for param_name, value in parameter_definition.items():
if param_name not in self.parameter_values and isinstance(value, dict) and "Default" in value:
self.parameter_values[param_name] = value["Default"] | Method to read default values for template parameters and merge with user supplied values.
Example:
If the template contains the following parameters defined
Parameters:
Param1:
Type: String
Default: default_value
Param2:
Type: String
Default: default_value
And, the user explicitly provided the following parameter values:
{
Param2: "new value"
}
then, this method will grab default value for Param1 and return the following result:
{
Param1: "default_value",
Param2: "new value"
}
:param dict sam_template: SAM template
:param dict parameter_values: Dictionary of parameter values provided by the user
:return dict: Merged parameter values |
Subsets and Splits