code
stringlengths 64
7.01k
| docstring
stringlengths 2
15.8k
|
---|---|
#vtb
def same_log10_order_of_magnitude(x, delta=0.1):
dmin = np.log10(np.min(x)*(1-delta))
dmax = np.log10(np.max(x)*(1+delta))
return np.floor(dmin) == np.floor(dmax) | Return true if range is approximately in same order of magnitude
For example these sequences are in the same order of magnitude:
- [1, 8, 5] # [1, 10)
- [35, 20, 80] # [10 100)
- [232, 730] # [100, 1000)
Parameters
----------
x : array-like
Values in base 10. Must be size 2 and
``rng[0] <= rng[1]``.
delta : float
Fuzz factor for approximation. It is multiplicative. |
#vtb
def surrogateescape_handler(exc):
mystring = exc.object[exc.start:exc.end]
try:
if isinstance(exc, UnicodeDecodeError):
decoded = replace_surrogate_decode(mystring)
elif isinstance(exc, UnicodeEncodeError):
decoded = replace_surrogate_encode(mystring, exc)
else:
raise exc
except NotASurrogateError:
raise exc
return (decoded, exc.end) | Pure Python implementation of the PEP 383: the "surrogateescape" error
handler of Python 3. Undecodable bytes will be replaced by a Unicode
character U+DCxx on decoding, and these are translated into the
original bytes on encoding. |
#vtb
def index(self, key, default=UNSET):
self._find_lt(key)
node = self._path[0][2]
if node is self._tail or key < node[0]:
if default is self.UNSET:
raise KeyError(.format(key))
return default
return self._distance[0] | Find the first key-value pair with key *key* and return its position.
If the key is not found, return *default*. If default was not provided,
raise a ``KeyError`` |
#vtb
def eval(self, code, mode="single"):
if isinstance(code, string_types):
if PY2 and isinstance(code, text_type):
code = UTF8_COOKIE + code.encode("utf-8")
code = compile(code, "<interactive>", mode)
return eval(code, self.globals, self.locals) | Evaluate code in the context of the frame. |
#vtb
def to_set_field(cls):
class SetConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
values = values or set()
args = {to_model(self.cls, value) for value in values}
return TypedSet(cls=self.cls, args=args)
return SetConverter(cls) | Returns a callable instance that will convert a value to a Sequence.
:param cls: Valid class type of the items in the Sequence.
:return: instance of the SequenceConverter. |
#vtb
def _close_cursor_now(self, cursor_id, address=None):
if not isinstance(cursor_id, integer_types):
raise TypeError("cursor_id must be an instance of (int, long)")
if self.__cursor_manager is not None:
self.__cursor_manager.close(cursor_id, address)
else:
self._kill_cursors([cursor_id], address, self._get_topology()) | Send a kill cursors message with the given id.
What closing the cursor actually means depends on this client's
cursor manager. If there is none, the cursor is closed synchronously
on the current thread. |
#vtb
def request_verification(self, user, identity):
return UserIdentityRequest(self).put(self.endpoint.request_verification, user, identity) | Sends the user a verification email with a link to verify ownership of the email address.
:param user: User id or object
:param identity: Identity id or object
:return: requests Response object |
#vtb
def free_params(self, value):
value = scipy.asarray(value, dtype=float)
self.K_up_to_date = False
self.k.free_params = value[:self.k.num_free_params]
self.noise_k.free_params = value[self.k.num_free_params:self.k.num_free_params + self.noise_k.num_free_params]
if self.mu is not None:
self.mu.free_params = value[self.k.num_free_params + self.noise_k.num_free_params:] | Set the free parameters. Note that this bypasses enforce_bounds. |
#vtb
def _local_pauli_eig_meas(op, idx):
if op == :
return Program(RY(-pi / 2, idx))
elif op == :
return Program(RX(pi / 2, idx))
elif op == :
return Program()
raise ValueError(f) | Generate gate sequence to measure in the eigenbasis of a Pauli operator, assuming
we are only able to measure in the Z eigenbasis. (Note: The unitary operations of this
Program are essentially the Hermitian conjugates of those in :py:func:`_one_q_pauli_prep`) |
#vtb
def emit(self, record):
if record.args and isinstance(record.args, collections.Mapping):
extra = dict(self._extra, **record.args)
else:
extra = self._extra
try:
msg = self.format(record)
pri = self.mapPriority(record.levelno)
mid = getattr(record, , None)
send(msg,
SOCKET=self.socket,
MESSAGE_ID=mid,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**extra)
except Exception:
self.handleError(record) | Write record as journal event.
MESSAGE is taken from the message provided by the
user, and PRIORITY, LOGGER, THREAD_NAME,
CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be
used if present. |
#vtb
def periodic_distance(a, b, periodic):
a = np.array(a)
b = np.array(b)
periodic = np.array(periodic)
delta = np.abs(a - b)
delta = np.where(delta > 0.5 * periodic, periodic - delta, delta)
return np.sqrt((delta ** 2).sum(axis=-1)) | Periodic distance between two arrays. Periodic is a 3
dimensional array containing the 3 box sizes. |
#vtb
def unpack_from_dict(fmt, names, data, offset=0):
return CompiledFormatDict(fmt, names).unpack_from(data, offset) | Same as :func:`~bitstruct.unpack_from_dict()`, but returns a
dictionary.
See :func:`~bitstruct.pack_dict()` for details on `names`. |
#vtb
def is_valid_country_abbrev(abbrev, case_sensitive=False):
if case_sensitive:
country_code = abbrev
else:
country_code = abbrev.upper()
for code, full_name in COUNTRY_TUPLES:
if country_code == code:
return True
return False | Given a country code abbreviation, check to see if it matches the
country table.
abbrev: (str) Country code to evaluate.
case_sensitive: (bool) When True, enforce case sensitivity.
Returns True if valid, False if not. |
#vtb
def mline_point_(self, col, x=None, y=None, rsum=None, rmean=None):
line = self._multiseries(col, x, y, "line", rsum, rmean)
point = self._multiseries(col, x, y, "point", rsum, rmean)
return line * point | Splits a column into multiple series based on the column's
unique values. Then visualize theses series in a chart.
Parameters: column to split, x axis column, y axis column
Optional: rsum="1D" to resample and sum data an rmean="1D"
to mean the data |
#vtb
def processStream(self):
print(.format(
, self.width-10))
print(.center(60, ))
self.windowSize = self.verboseRead(WindowSizeAlphabet())
print(.center(60, ))
self.ISLAST = False
self.output = bytearray()
while not self.ISLAST:
self.ISLAST = self.verboseRead(
BoolCode(, description="Last block"))
if self.ISLAST:
if self.verboseRead(
BoolCode(, description="Empty block")): break
if self.metablockLength(): continue
if not self.ISLAST and self.uncompressed(): continue
print(.center(60, ))
self.numberOfBlockTypes = {}
self.currentBlockCounts = {}
self.blockTypeCodes = {}
self.blockCountCodes = {}
for blockType in (L,I,D): self.blockType(blockType)
print(.center(60, ))
self.NPOSTFIX, self.NDIRECT = self.verboseRead(DistanceParamAlphabet())
self.readLiteralContextModes()
print(.center(60, ))
self.cmaps = {}
numberOfTrees = {I: self.numberOfBlockTypes[I]}
for blockType in (L,D):
numberOfTrees[blockType] = self.contextMap(blockType)
print(.center(60, ))
self.prefixCodes = {}
for blockType in (L,I,D):
self.readPrefixArray(blockType, numberOfTrees[blockType])
self.metablock() | Process a brotli stream. |
#vtb
def build_recursive_delocalize_command(source, outputs, file_provider):
command = _LOCALIZE_COMMAND_MAP[file_provider]
filtered_outputs = [
var for var in outputs
if var.recursive and var.file_provider == file_provider
]
return .join([
textwrap.dedent().format(
command=command,
data_mount=source.rstrip(),
docker_path=var.docker_path,
destination_uri=var.uri) for var in filtered_outputs
]) | Return a multi-line string with a shell script to copy recursively.
Arguments:
source: Folder with the data.
For example /mnt/data
outputs: a list of OutputFileParam.
file_provider: file provider string used to filter the output params; the
returned command will only apply outputs whose file provider
matches this file filter.
Returns:
a multi-line string with a shell script that copies the inputs
recursively to GCS. |
#vtb
def get_end_balance(self, after: date) -> Decimal:
datum = Datum()
datum.from_date(after)
datum.end_of_day()
return self.get_balance_on(datum.value) | Calculates account balance |
#vtb
def jenks_breaks(values, nb_class):
if not isinstance(values, Iterable) or isinstance(values, (str, bytes)):
raise TypeError("A sequence of numbers is expected")
if isinstance(nb_class, float) and int(nb_class) == nb_class:
nb_class = int(nb_class)
if not isinstance(nb_class, int):
raise TypeError(
"Number of class have to be a positive integer: "
"expected an instance of but found {}"
.format(type(nb_class)))
nb_values = len(values)
if np and isinstance(values, np.ndarray):
values = values[np.argwhere(np.isfinite(values)).reshape(-1)]
else:
values = [i for i in values if isfinite(i)]
if len(values) != nb_values:
warnings.warn()
nb_values = len(values)
if nb_class >= nb_values or nb_class < 2:
raise ValueError("Number of class have to be an integer "
"greater than 2 and "
"smaller than the number of values to use")
return jenks._jenks_breaks(values, nb_class) | Compute jenks natural breaks on a sequence of `values`, given `nb_class`,
the number of desired class.
Parameters
----------
values : array-like
The Iterable sequence of numbers (integer/float) to be used.
nb_class : int
The desired number of class (as some other functions requests
a `k` value, `nb_class` is like `k` + 1). Have to be lesser than
the length of `values` and greater than 2.
Returns
-------
breaks : tuple of floats
The computed break values, including minimum and maximum, in order
to have all the bounds for building `nb_class` class,
so the returned tuple has a length of `nb_class` + 1.
Examples
--------
Using nb_class = 3, expecting 4 break values , including min and max :
>>> jenks_breaks(
[1.3, 7.1, 7.3, 2.3, 3.9, 4.1, 7.8, 1.2, 4.3, 7.3, 5.0, 4.3],
nb_class = 3) # Should output (1.2, 2.3, 5.0, 7.8) |
#vtb
def fields(self):
fields = super().fields
return apply_subfield_projection(self, copy.copy(fields)) | Filter fields based on request query parameters. |
#vtb
def _get_apphook_field_names(model):
from .models import AppHookConfig
fields = []
for field in model._meta.fields:
if isinstance(field, ForeignKey) and issubclass(field.remote_field.model, AppHookConfig):
fields.append(field)
return [field.name for field in fields] | Return all foreign key field names for a AppHookConfig based model |
#vtb
def build_includes(include_packages, freezer=None, optional=None):
freezer = resolve_freezer(freezer)
package_references = _import_packages(include_packages, optional=optional)
includes = freezer.build_includes(package_references)
return includes | Iterate the list of packages to build a complete list of those packages as well as all subpackages.
:param include_packages: list of package names
:type: include_pacakges: list of basestr
:param freezer: The freezer to use (See FREEZER constants)
:param optional: Optional pacakge names to include (will only issue a warning if they don't exist)
:return: complete set of package includes |
#vtb
def addchild(self, startip, endip, name, description):
add_child_ip_scope(self.auth, self.url, startip, endip, name, description, self.id) | Method takes inpur of str startip, str endip, name, and description and adds a child scope.
The startip and endip MUST be in the IP address range of the parent scope.
:param startip: str of ipv4 address of the first address in the child scope
:param endip: str of ipv4 address of the last address in the child scope
:param name: of the owner of the child scope
:param description: description of the child scope
:return: |
#vtb
def add_external_reference(self,term_id, external_ref):
if term_id in self.idx:
term_obj = Cterm(self.idx[term_id],self.type)
term_obj.add_external_reference(external_ref)
else:
print(.format(**locals())) | Adds an external reference for the given term
@type term_id: string
@param term_id: the term identifier
@type external_ref: L{CexternalReference}
@param external_ref: the external reference object |
#vtb
def ndarray_to_imagedatadict(nparr):
ret = {}
dm_type = None
for k, v in iter(dm_image_dtypes.items()):
if v[1] == nparr.dtype.type:
dm_type = k
break
if dm_type is None and nparr.dtype == numpy.uint8 and nparr.shape[-1] in (3, 4):
ret["DataType"] = 23
ret["PixelDepth"] = 4
if nparr.shape[2] == 4:
rgb_view = nparr.view(numpy.int32).reshape(nparr.shape[:-1])
else:
assert nparr.shape[2] == 3
rgba_image = numpy.empty(nparr.shape[:-1] + (4,), numpy.uint8)
rgba_image[:,:,0:3] = nparr
rgba_image[:,:,3] = 255
rgb_view = rgba_image.view(numpy.int32).reshape(rgba_image.shape[:-1])
ret["Dimensions"] = list(rgb_view.shape[::-1])
ret["Data"] = parse_dm3.array.array(platform_independent_char(rgb_view.dtype), rgb_view.flatten())
else:
ret["DataType"] = dm_type
ret["PixelDepth"] = nparr.dtype.itemsize
ret["Dimensions"] = list(nparr.shape[::-1])
if nparr.dtype.type in np_to_structarray_map:
types = np_to_structarray_map[nparr.dtype.type]
ret["Data"] = parse_dm3.structarray(types)
ret["Data"].raw_data = bytes(numpy.array(nparr, copy=False).data)
else:
ret["Data"] = parse_dm3.array.array(platform_independent_char(nparr.dtype), numpy.array(nparr, copy=False).flatten())
return ret | Convert the numpy array nparr into a suitable ImageList entry dictionary.
Returns a dictionary with the appropriate Data, DataType, PixelDepth
to be inserted into a dm3 tag dictionary and written to a file. |
#vtb
def get(self, name):
if name in self._storage:
return self._storage[name]
elif name in self._providers:
value = self._storage[name] = self._providers[name](self)
return value
elif name.startswith():
rollout_name = name[8:]
value = self._storage[name] = self.rollout.batch_tensor(rollout_name)
return value
else:
raise RuntimeError(f"Key {name} is not provided by this evaluator") | Return a value from this evaluator.
Because tensor calculated is cached, it may lead to suble bugs if the same value is used multiple times
with and without no_grad() context.
It is advised in such cases to not use no_grad and stick to .detach() |
#vtb
def fault_barrier(fn):
@functools.wraps(fn)
def process(self, tup):
try:
return fn(self, tup)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
return
print(str(e), file=sys.stderr)
self.fail(tup)
return process | Method decorator to catch and log errors, then send fail message. |
#vtb
def dst(self, dt):
dst_start_date = self.first_sunday(dt.year, 3) + timedelta(days=7) \
+ timedelta(hours=2)
dst_end_date = self.first_sunday(dt.year, 11) + timedelta(hours=2)
if dst_start_date <= dt.replace(tzinfo=None) < dst_end_date:
return timedelta(hours=1)
else:
return timedelta(0) | Calculate delta for daylight saving. |
#vtb
def main():
strWelcome = + __version__
strDec = * len(strWelcome)
print(strDec + + strWelcome + + strDec)
objNspc = get_arg_parse()
if any(item is None for item in [objNspc.strCsvPrf, objNspc.strStmApr]):
print()
print()
print()
else:
lgcTest = False
pyprf_sim(objNspc.strCsvPrf, objNspc.strStmApr, lgcTest=lgcTest,
lgcNoise=objNspc.lgcNoise, lgcRtnNrl=objNspc.lgcRtnNrl,
lstRat=objNspc.supsur) | pyprf_sim entry point. |
#vtb
def from_quad_tree(cls, quad_tree):
assert bool(re.match(, quad_tree)),
zoom = len(str(quad_tree))
offset = int(math.pow(2, zoom)) - 1
google_x, google_y = [reduce(lambda result, bit: (result << 1) | bit, bits, 0)
for bits in zip(*(reversed(divmod(digit, 2))
for digit in (int(c) for c in str(quad_tree))))]
return cls(tms_x=google_x, tms_y=(offset - google_y), zoom=zoom) | Creates a tile from a Microsoft QuadTree |
#vtb
def _infer_spaces(s):
s = s.lower()
def best_match(i):
candidates = enumerate(reversed(cost[max(0, i - MAXWORD):i]))
return min((c + WORDCOST.get(s[i-k-1: i], 9e999), k + 1)
for k, c in candidates)
cost = [0]
for i in range(1, len(s) + 1):
c, k = best_match(i)
cost.append(c)
out = []
i = len(s)
while i > 0:
c, k = best_match(i)
assert c == cost[i]
out.append(s[i-k:i])
i -= k
return u" ".join(reversed(out)) | Uses dynamic programming to infer the location of spaces in a string
without spaces. |
#vtb
def from_const(cls, value, size, dtype=type(None)):
assert isinstance(size, (int, long)) and size >= 0, "size must be a positive int"
if not isinstance(value, (type(None), int, float, str, array.array, list, dict, datetime.datetime)):
raise TypeError( % str(type(value)))
proxy = UnitySArrayProxy()
proxy.load_from_const(value, size, dtype)
return cls(_proxy=proxy) | Constructs an SArray of size with a const value.
Parameters
----------
value : [int | float | str | array.array | list | dict | datetime]
The value to fill the SArray
size : int
The size of the SArray
dtype : type
The type of the SArray. If not specified, is automatically detected
from the value. This should be specified if value=None since the
actual type of the SArray can be anything.
Examples
--------
Construct an SArray consisting of 10 zeroes:
>>> turicreate.SArray.from_const(0, 10)
Construct an SArray consisting of 10 missing string values:
>>> turicreate.SArray.from_const(None, 10, str) |
#vtb
def show_support_save_status_output_show_support_save_status_percentage_of_completion(self, **kwargs):
config = ET.Element("config")
show_support_save_status = ET.Element("show_support_save_status")
config = show_support_save_status
output = ET.SubElement(show_support_save_status, "output")
show_support_save_status = ET.SubElement(output, "show-support-save-status")
percentage_of_completion = ET.SubElement(show_support_save_status, "percentage-of-completion")
percentage_of_completion.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
#vtb
def to_csv(self, encoding=export.ENCODING, dialect=export.DIALECT,
make_filename=export.MAKE_FILENAME):
for s in self._sheets:
s.to_csv(None, encoding, dialect, make_filename) | Dump all worksheets of the spreadsheet to individual CSV files.
Args:
encoding (str): result string encoding
dialect (str): :mod:`csv` dialect name or object to use
make_filename: template or one-argument callable returning the filename
If ``make_filename`` is a string, it is string-interpolated with an
infos-dictionary with the fields ``id`` (spreadhseet id), ``title``
(spreadsheet title), ``sheet`` (worksheet title), ``gid`` (worksheet
id), ``index`` (worksheet index), and ``dialect`` CSV dialect to
generate the filename: ``filename = make_filename % infos``.
If ``make_filename`` is a callable, it will be called with the
infos-dictionary to generate the filename:
``filename = make_filename(infos)``. |
#vtb
def sanitize_turbo(html, allowed_tags=TURBO_ALLOWED_TAGS, allowed_attrs=TURBO_ALLOWED_ATTRS):
return clean(html, tags=allowed_tags, attributes=allowed_attrs, strip=True) | Sanitizes HTML, removing not allowed tags and attributes.
:param str|unicode html:
:param list allowed_tags: List of allowed tags.
:param dict allowed_attrs: Dictionary with attributes allowed for tags.
:rtype: unicode |
#vtb
def patch_runtime_class(self, name, body, **kwargs):
kwargs[] = True
if kwargs.get():
return self.patch_runtime_class_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_runtime_class_with_http_info(name, body, **kwargs)
return data | partially update the specified RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_runtime_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RuntimeClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread. |
#vtb
def star_stats_table(self):
headers = OrderedDict()
headers[] = {
: ,
: ,
: 100,
: 0,
: ,
:
}
headers[] = {
: .format(config.read_count_prefix),
: .format(config.read_count_desc),
: 0,
: ,
: lambda x: x * config.read_count_multiplier,
:
}
self.general_stats_addcols(self.star_data, headers) | Take the parsed stats from the STAR report and add them to the
basic stats table at the top of the report |
#vtb
def get_capabilities_by_ext(self, strict_type_matching: bool = False) -> Dict[str, Dict[Type, Dict[str, Parser]]]:
check_var(strict_type_matching, var_types=bool, var_name=)
res = dict()
for ext in self.get_all_supported_exts_for_type(type_to_match=JOKER, strict=strict_type_matching):
res[ext] = self.get_capabilities_for_ext(ext, strict_type_matching)
return res | For all extensions that are supported,
lists all types that can be parsed from this extension.
For each type, provide the list of parsers supported. The order is "most pertinent first"
This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine.
That will ensure consistency of the results.
:param strict_type_matching:
:return: |
#vtb
def expand_url(url, protocol):
if protocol == :
ws_part =
elif protocol == :
ws_part =
else:
ws_part =
return url.endswith() and url + ws_part or url + + ws_part | Expands the given URL to a full URL by adding
the magento soap/wsdl parts
:param url: URL to be expanded
:param service: 'xmlrpc' or 'soap' |
#vtb
def impute_dataframe_range(df_impute, col_to_max, col_to_min, col_to_median):
columns = df_impute.columns
if not set(columns) <= set(col_to_median.keys()) or \
not set(columns) <= set(col_to_max.keys()) or \
not set(columns) <= set(col_to_min.keys()):
raise ValueError("Some of the dictionaries col_to_median, col_to_max, col_to_min contains more or less keys "
"than the column names in df")
if np.any(~np.isfinite(list(col_to_median.values()))) or \
np.any(~np.isfinite(list(col_to_min.values()))) or \
np.any(~np.isfinite(list(col_to_max.values()))):
raise ValueError("Some of the dictionaries col_to_median, col_to_max, col_to_min contains non finite values "
"to replace")
col_to_max = pd.DataFrame([col_to_max]*len(df_impute), index=df_impute.index)
col_to_min = pd.DataFrame([col_to_min]*len(df_impute), index=df_impute.index)
col_to_median = pd.DataFrame([col_to_median]*len(df_impute), index=df_impute.index)
df_impute.where(df_impute.values != np.PINF, other=col_to_max, inplace=True)
df_impute.where(df_impute.values != np.NINF, other=col_to_min, inplace=True)
df_impute.where(~np.isnan(df_impute.values), other=col_to_median, inplace=True)
df_impute.astype(np.float64, copy=False)
return df_impute | Columnwise replaces all ``NaNs``, ``-inf`` and ``+inf`` from the DataFrame `df_impute` with average/extreme values
from the provided dictionaries.
This is done as follows: Each occurring ``inf`` or ``NaN`` in `df_impute` is replaced by
* ``-inf`` -> by value in col_to_min
* ``+inf`` -> by value in col_to_max
* ``NaN`` -> by value in col_to_median
If a column of df_impute is not found in the one of the dictionaries, this method will raise a ValueError.
Also, if one of the values to replace is not finite a ValueError is returned
This function modifies `df_impute` in place. Afterwards df_impute is
guaranteed to not contain any non-finite values.
Also, all columns will be guaranteed to be of type ``np.float64``.
:param df_impute: DataFrame to impute
:type df_impute: pandas.DataFrame
:param col_to_max: Dictionary mapping column names to max values
:type col_to_max: dict
:param col_to_min: Dictionary mapping column names to min values
:type col_to_max: dict
:param col_to_median: Dictionary mapping column names to median values
:type col_to_max: dict
:return df_impute: imputed DataFrame
:rtype df_impute: pandas.DataFrame
:raise ValueError: if a column of df_impute is missing in col_to_max, col_to_min or col_to_median or a value
to replace is non finite |
#vtb
def multiple_choice_field_data(field, **kwargs):
if field.choices:
from django_any.functions import valid_choices
l = list(valid_choices(field.choices))
random.shuffle(l)
choices = []
count = xunit.any_int(min_value=1, max_value=len(field.choices))
for i in xrange(0, count):
choices.append(l[i])
return .join(choices)
return | Return random value for MultipleChoiceField
>>> CHOICES = [('YNG', 'Child'), ('MIDDLE', 'Parent') ,('OLD', 'GrandParent')]
>>> result = any_form_field(forms.MultipleChoiceField(choices=CHOICES))
>>> type(result)
<type 'str'> |
#vtb
def get_properties(elt, keys=None, ctx=None):
if isinstance(keys, string_types):
keys = (keys,)
result = _get_properties(elt, keys=keys, local=False, ctx=ctx)
return result | Get elt properties.
:param elt: properties elt. Not None methods or unhashable types.
:param keys: key(s) of properties to get from elt.
If None, get all properties.
:type keys: list or str
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
:return: list of properties by elt and name.
:rtype: list |
#vtb
def perform_experiment(self, engine_list):
result = []
for endine_idx, engine in enumerate(engine_list):
print( % (endine_idx, len(engine_list)))
engine.clean_all_buckets()
avg_recall = 0.0
avg_precision = 0.0
avg_search_time = 0.0
for index, v in enumerate(self.vectors):
engine.store_vector(v, % index)
for index in self.query_indices:
real_nearest = set(self.closest[index])
search_time_start = time.time()
nearest = engine.neighbours(self.vectors[index])
search_time = time.time() - search_time_start
nearest = set([self.__index_of_vector(x[0]) for x in nearest])
nearest.remove(index)
if len(nearest) == 0:
recall = 0.0
precision = 0.0
else:
inter_count = float(len(real_nearest & nearest))
recall = inter_count/float(len(real_nearest))
precision = inter_count/float(len(nearest))
avg_recall += recall
avg_precision += precision
avg_search_time += search_time
avg_recall /= float(len(self.query_indices))
avg_precision /= float(len(self.query_indices))
avg_search_time = avg_search_time / float(len(self.query_indices))
avg_search_time /= self.exact_search_time_per_vector
print( % (avg_recall,
avg_precision,
avg_search_time))
result.append((avg_recall, avg_precision, avg_search_time))
return result | Performs nearest neighbour recall experiments with custom vector data
for all engines in the specified list.
Returns self.result contains list of (recall, precision, search_time)
tuple. All are the averaged values over all request vectors.
search_time is the average retrieval/search time compared to the
average exact search time. |
#vtb
def build_model_classes(metadata):
i = importlib.import_module(metadata)
env = get_jinja_env()
model_template = env.get_template()
for model in i.models:
with open(model_path(model.name.lower()), ) as t:
t.write(model_template.render(model_md=model)) | Generate a model class for any models contained in the specified spec file. |
#vtb
def dispatch(splits, *funcs, **kwargs):
map_func = kwargs.get(, _map_func)
apply_func = kwargs.get(, _apply_func)
return map_func(partial(apply_func, funcs), splits) | takes multiple iterables (returned by dispatch or broadcast) and delivers
the items to multiple functions
/-----> _INPUT1 --> double(_INPUT1) --> \
/ \
splits ------> _INPUT2 --> triple(_INPUT2) ---> _OUTPUT
\ /
\--> _INPUT3 --> quadruple(_INPUT3) --> /
One way to construct such a flow in code would be::
splits = repeat(('bar', 'baz', 'qux'), 3)
double = lambda word: word * 2
triple = lambda word: word * 3
quadruple = lambda word: word * 4
_OUTPUT = dispatch(splits, double, triple, quadruple)
_OUTPUT == repeat(('barbar', 'bazbazbaz', 'quxquxquxqux'), 3) |
#vtb
def create_prediction_estimator(hyper_params, model, checkpoint_path=None):
if checkpoint_path is None:
chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)])
checkpoint_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1]
print("Latest found checkpoint: {}".format(checkpoint_path))
estimator_spec = create_tf_estimator_spec(checkpoint_path, model, create_loss=None)
estimator = tf.estimator.Estimator(estimator_spec,
model_dir=checkpoint_path,
params=hyper_params)
return estimator | Create an estimator for prediction purpose only.
:param hyper_params: The hyper params file.
:param model: The keras model.
:param checkpoint_path: (Optional) Path to the specific checkpoint to use.
:return: |
#vtb
def course_or_program_exist(self, course_id, program_uuid):
course_exists = course_id and CourseApiClient().get_course_details(course_id)
program_exists = program_uuid and CourseCatalogApiServiceClient().program_exists(program_uuid)
return course_exists or program_exists | Return whether the input course or program exist. |
#vtb
def get_item(self, address, state = ):
self._lock.acquire()
try:
item = self._items.get(address)
if not item:
return None
self.update_item(item)
if _state_values[state] >= item.state_value:
return item
return None
finally:
self._lock.release() | Get an item from the cache.
:Parameters:
- `address`: its address.
- `state`: the worst state that is acceptable.
:Types:
- `address`: any hashable
- `state`: `str`
:return: the item or `None` if it was not found.
:returntype: `CacheItem` |
#vtb
def render(self, text, auth=None):
expected = str if sys.version_info[0] >= 3 else unicode
if not isinstance(text, expected):
raise TypeError(
.format(text))
if self.user_content:
url = .format(self.api_url)
data = {: text, : }
if self.context:
data[] = self.context
data = json.dumps(data, ensure_ascii=False).encode()
headers = {: }
else:
url = .format(self.api_url)
data = text.encode()
headers = {: }
r = requests.post(url, headers=headers, data=data, auth=auth)
r.raise_for_status()
r.encoding =
return r.text if self.raw else patch(r.text) | Renders the specified markdown content and embedded styles.
Raises TypeError if text is not a Unicode string.
Raises requests.HTTPError if the request fails. |
#vtb
def __parse_json_file(self, file_path):
if file_path == or os.path.splitext(file_path)[1] != :
raise IOError()
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data) | Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError |
#vtb
def next(self):
xy
if self._use_thread:
self._next_thread.join()
if self._current_data is None:
logger.log(99, )
self._next_thread = threading.Thread(target=self._next)
self._next_thread.start()
self._next_thread.join()
self._current_epoch, data = self._current_data
self._next_thread = threading.Thread(target=self._next)
self._next_thread.start()
else:
self._next()
self._current_epoch, data = self._current_data
return data | next
It generates tuple of data.
For example,
if :py:meth:`self._variables == ('x', 'y')`
This method returns :py:meth:` ( [[X] * batch_size], [[Y] * batch_size] )`
Returns:
tuple: tuple of data for mini-batch in numpy.ndarray. |
#vtb
def command(self, *cmd):
assert(len(cmd) <= 32)
try:
self._bus.write_i2c_block_data(self._addr, self._cmd_mode,
list(cmd))
except (IOError, OSError) as e:
if e.errno in [errno.EREMOTEIO, errno.EIO]:
raise luma.core.error.DeviceNotFoundError(
.format(self._addr))
else:
raise | Sends a command or sequence of commands through to the I²C address
- maximum allowed is 32 bytes in one go.
:param cmd: A spread of commands.
:type cmd: int
:raises luma.core.error.DeviceNotFoundError: I2C device could not be found. |
#vtb
def markov_network(potentials):
G = nx.Graph()
G.name = .format(potentials)
for clique, phis in potentials.items():
num_vars = len(clique)
if not isinstance(phis, abc.Mapping):
raise TypeError("phis should be a dict")
elif not all(config in phis for config in itertools.product((0, 1), repeat=num_vars)):
raise ValueError("not all potentials provided for {!r}".format(clique))
if num_vars == 1:
u, = clique
G.add_node(u, potential=phis)
elif num_vars == 2:
u, v = clique
G.add_edge(u, v, potential=phis, order=(u, v))
else:
raise ValueError("Only supports cliques up to size 2")
return G | Creates a Markov Network from potentials.
A Markov Network is also knows as a `Markov Random Field`_
Parameters
----------
potentials : dict[tuple, dict]
A dict where the keys are either nodes or edges and the values are a
dictionary of potentials. The potential dict should map each possible
assignment of the nodes/edges to their energy.
Returns
-------
MN : :obj:`networkx.Graph`
A markov network as a graph where each node/edge stores its potential
dict as above.
Examples
--------
>>> potentials = {('a', 'b'): {(0, 0): -1,
... (0, 1): .5,
... (1, 0): .5,
... (1, 1): 2}}
>>> MN = dnx.markov_network(potentials)
>>> MN['a']['b']['potential'][(0, 0)]
-1
.. _Markov Random Field: https://en.wikipedia.org/wiki/Markov_random_field |
#vtb
def _connect_mv_node(network, node, target_obj):
std_line_type = network.equipment_data[].loc[
network.config[][]]
std_line_kind =
target_obj_result = None
node_shp = transform(proj2equidistant(network), node.geom)
if isinstance(target_obj[], LineString):
adj_node1 = target_obj[][][0]
adj_node2 = target_obj[][][1]
conn_point_shp = target_obj[].interpolate(target_obj[].project(node_shp))
conn_point_shp = transform(proj2conformal(network), conn_point_shp)
line = network.mv_grid.graph.edge[adj_node1][adj_node2]
if not line[] == :
branch_tee = BranchTee(geom=conn_point_shp,
grid=network.mv_grid,
in_building=False)
network.mv_grid.graph.add_node(branch_tee,
type=)
line_kind = line[].kind
line_type = line[].type
network.mv_grid.graph.remove_edge(adj_node1, adj_node2)
_del_cable_from_equipment_changes(network=network,
line=line[])
line_length = calc_geo_dist_vincenty(network=network,
node_source=adj_node1,
node_target=branch_tee)
line = Line(id=random.randint(10 ** 8, 10 ** 9),
length=line_length / 1e3,
quantity=1,
kind=line_kind,
type=line_type,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(adj_node1,
branch_tee,
line=line,
type=)
_add_cable_to_equipment_changes(network=network,
line=line)
line_length = calc_geo_dist_vincenty(network=network,
node_source=adj_node2,
node_target=branch_tee)
line = Line(id=random.randint(10 ** 8, 10 ** 9),
length=line_length / 1e3,
quantity=1,
kind=line_kind,
type=line_type,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(adj_node2,
branch_tee,
line=line,
type=)
_add_cable_to_equipment_changes(network=network,
line=line)
line_length = calc_geo_dist_vincenty(network=network,
node_source=node,
node_target=branch_tee)
line = Line(id=random.randint(10 ** 8, 10 ** 9),
length=line_length / 1e3,
quantity=1,
kind=std_line_kind,
type=std_line_type,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(node,
branch_tee,
line=line,
type=)
_add_cable_to_equipment_changes(network=network,
line=line)
target_obj_result = branch_tee
else:
if isinstance(node, LVStation):
valid_conn_objects = (LVStation, BranchTee)
elif isinstance(node, Generator):
valid_conn_objects = (LVStation, BranchTee, Generator)
else:
raise ValueError()
if isinstance(target_obj[], (Generator, Load)):
target_is_aggregated = any([_ for _ in network.mv_grid.graph.edge[target_obj[]].values()
if _[] == ])
else:
target_is_aggregated = False
if isinstance(target_obj[], valid_conn_objects) and not target_is_aggregated:
line_length = calc_geo_dist_vincenty(network=network,
node_source=node,
node_target=target_obj[])
line = Line(id=random.randint(10 ** 8, 10 ** 9),
type=std_line_type,
kind=std_line_kind,
quantity=1,
length=line_length / 1e3,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(node,
target_obj[],
line=line,
type=)
_add_cable_to_equipment_changes(network=network,
line=line)
target_obj_result = target_obj[]
return target_obj_result | Connects MV node to target object in MV grid
If the target object is a node, a new line is created to it.
If the target object is a line, the node is connected to a newly created branch tee
(using perpendicular projection) on this line.
New lines are created using standard equipment.
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
node : :class:`~.grid.components.Component`
Node to connect (e.g. :class:`~.grid.components.Generator`)
Node must be a member of MV grid's graph (network.mv_grid.graph)
target_obj : :class:`~.grid.components.Component`
Object that node shall be connected to
Returns
-------
:class:`~.grid.components.Component` or None
Node that node was connected to
Notes
-----
Adapted from `Ding0 <https://github.com/openego/ding0/blob/\
21a52048f84ec341fe54e0204ac62228a9e8a32a/\
ding0/grid/mv_grid/mv_connect.py#L311>`_. |
#vtb
def default(self):
try:
return self.__resolved_default
except AttributeError:
resolved_default = super(EnumField, self).default
if isinstance(resolved_default, (six.string_types,
six.integer_types)):
resolved_default = self.type(resolved_default)
self.__resolved_default = resolved_default
return self.__resolved_default | Default for enum field.
Will cause resolution of Enum type and unresolved default value. |
#vtb
def resolve_inputs(self, layers):
resolved = {}
for name, shape in self._input_shapes.items():
if shape is None:
name, shape = self._resolve_shape(name, layers)
resolved[name] = shape
self._input_shapes = resolved | Resolve the names of inputs for this layer into shape tuples.
Parameters
----------
layers : list of :class:`Layer`
A list of the layers that are available for resolving inputs.
Raises
------
theanets.util.ConfigurationError :
If an input cannot be resolved. |
#vtb
def refresh(self):
self._update_id_list()
for _id in self.history[:]:
if _id not in self.id_list:
self.history.remove(_id) | Remove editors that are not longer open. |
#vtb
def get_spark_session(enable_hive=False, app_name=, configs=[]):
import findspark
findspark.init()
from pyspark.sql import SparkSession
spark = SparkSession.builder
spark = spark.appName(app_name)
spark = spark.enableHiveSupport() if enable_hive else spark
for config in configs:
spark = spark.config(config)
return spark.getOrCreate() | Return a Spark Session object |
#vtb
def find_name(self, template_name, search_dirs):
file_name = self.make_file_name(template_name)
return self._find_path_required(search_dirs, file_name) | Return the path to a template with the given name.
Arguments:
template_name: the name of the template.
search_dirs: the list of directories in which to search. |
#vtb
def run(self, conn, tmp, module_name, module_args, inject):
tokens = shlex.split(module_args)
source = tokens[0]
args = " ".join(tokens[1:])
source = utils.template(self.runner.basedir, source, inject)
source = utils.path_dwim(self.runner.basedir, source)
source = source.replace(,)
args = args.replace(,)
tmp_src = os.path.join(tmp, os.path.basename(source))
tmp_src = tmp_src.replace(, )
conn.put_file(source, tmp_src)
if self.runner.sudo and self.runner.sudo_user != :
prepcmd = % tmp_src
else:
prepcmd = % tmp_src
module_args = prepcmd + + tmp_src + + args
handler = utils.plugins.action_loader.get(, self.runner)
result = handler.run(conn, tmp, , module_args, inject)
if tmp.find("tmp") != -1 and C.DEFAULT_KEEP_REMOTE_FILES != :
self.runner._low_level_exec_command(conn, % tmp, tmp)
return result | handler for file transfer operations |
#vtb
def expand_details(df, detailCol=):
df = copy.deepcopy(df)
df[] = df[detailCol]
dicts = [sportsref.nfl.pbp.parse_play_details(detail) for detail in df[].values]
cols = {c for d in dicts if d for c in d.keys()}
blankEntry = {c: np.nan for c in cols}
newDicts = [d if d else blankEntry for d in dicts]
details = pd.DataFrame(newDicts)
df = pd.merge(df, details, left_index=True, right_index=True)
errors = [i for i, d in enumerate(dicts) if d is None]
df[] = False
df.loc[errors, ] = True
)
new_df = df.apply(_clean_features, axis=1)
return new_df | Expands the details column of the given dataframe and returns the
resulting DataFrame.
:df: The input DataFrame.
:detailCol: The detail column name.
:returns: Returns DataFrame with new columns from pbp parsing. |
#vtb
def subtract(
self,
years=0,
months=0,
weeks=0,
days=0,
hours=0,
minutes=0,
seconds=0,
microseconds=0,
):
return self.add(
years=-years,
months=-months,
weeks=-weeks,
days=-days,
hours=-hours,
minutes=-minutes,
seconds=-seconds,
microseconds=-microseconds,
) | Remove duration from the instance.
:param years: The number of years
:type years: int
:param months: The number of months
:type months: int
:param weeks: The number of weeks
:type weeks: int
:param days: The number of days
:type days: int
:param hours: The number of hours
:type hours: int
:param minutes: The number of minutes
:type minutes: int
:param seconds: The number of seconds
:type seconds: int
:param microseconds: The number of microseconds
:type microseconds: int
:rtype: DateTime |
#vtb
def get_float(self, key, optional=False):
return self._get_typed_value(key, float, lambda x: float(x), optional) | Tries to fetch a variable from the config and expects it to be strictly a float
:param key: Variable to look for
:param optional: Whether to raise ConfigKeyNotFoundError if key was not found
:return: float |
#vtb
def save_package_contents(self, root, team, owner, pkgname):
assert isinstance(root, RootNode)
instance_hash = hash_contents(root)
pkg_path = self.package_path(team, owner, pkgname)
if not os.path.isdir(pkg_path):
os.makedirs(pkg_path)
os.mkdir(os.path.join(pkg_path, self.CONTENTS_DIR))
os.mkdir(os.path.join(pkg_path, self.TAGS_DIR))
os.mkdir(os.path.join(pkg_path, self.VERSIONS_DIR))
dest = os.path.join(pkg_path, self.CONTENTS_DIR, instance_hash)
with open(dest, ) as contents_file:
json.dump(root, contents_file, default=encode_node, indent=2, sort_keys=True)
tag_dir = os.path.join(pkg_path, self.TAGS_DIR)
if not os.path.isdir(tag_dir):
os.mkdir(tag_dir)
latest_tag = os.path.join(pkg_path, self.TAGS_DIR, self.LATEST)
with open (latest_tag, ) as tagfile:
tagfile.write("{hsh}".format(hsh=instance_hash)) | Saves the in-memory contents to a file in the local
package repository. |
#vtb
def _gitignore(root):
gitignore_path = os.path.join(root, )
dir_patterns = []
file_patterns = []
if not os.path.exists(gitignore_path):
return (dir_patterns, file_patterns)
with open(gitignore_path, , encoding=) as f:
for line in f.readlines():
line = line.strip()
if not line:
continue
if line.startswith():
continue
if in line:
raise NotImplementedError()
if line.startswith():
raise NotImplementedError()
if line.startswith():
raise NotImplementedError()
if line.startswith():
line = + line[2:]
if line.startswith():
line = + line[2:]
if line.endswith():
dir_patterns.append(line[:-1])
else:
file_patterns.append(line)
return (dir_patterns, file_patterns) | Parses a .gitignore file and returns patterns to match dirs and files.
Only basic gitignore patterns are supported. Pattern negation, ** wildcards
and anchored patterns are not currently implemented.
:param root:
A unicode string of the path to the git repository
:return:
A 2-element tuple:
- 0: a list of unicode strings to match against dirs
- 1: a list of unicode strings to match against dirs and files |
#vtb
def _get_default_locs(self, vmin, vmax):
"Returns the default locations of ticks."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
locator = self.plot_obj.date_axis_info
if self.isminor:
return np.compress(locator[], locator[])
return np.compress(locator[], locator[]) | Returns the default locations of ticks. |
#vtb
def decode(encoded_histogram, b64_wrap=True):
hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap)
payload = hdr_payload.payload
histogram = HdrHistogram(payload.lowest_trackable_value,
payload.highest_trackable_value,
payload.significant_figures,
hdr_payload=hdr_payload)
return histogram | Decode an encoded histogram and return a new histogram instance that
has been initialized with the decoded content
Return:
a new histogram instance representing the decoded content
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
zlib.error:
in case of zlib decompression error |
#vtb
def from_shapefile(output, input_shp_files, validate):
input_parser = shapefileparser.ShapefileParser()
source_model = input_parser.read(input_shp_files[0], validate)
for f in input_shp_files[1:]:
source_model.sources.extend(input_parser.read(f, validate).sources)
if not output:
output = os.path.splitext(input_shp_files[0])[0]
shapefileparser.SourceModelParser().write(output + , source_model) | Convert multiple ESRI Shapefile(s) into a single NRML source model file. |
#vtb
def p_InSwitchDefList(p):
if len(p) <= 2:
p[0] = InSwitchDefList(None, p[1])
else:
p[0] = InSwitchDefList(p[1], p[2]) | InSwitchDefList : InSwitchDef
| InSwitchDefList InSwitchDef |
#vtb
def reminders_list(self, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
return self.api_call("reminders.list", http_verb="GET", params=kwargs) | Lists all reminders created by or for a given user. |
#vtb
def _init_externals():
if __version__ == :
sys.path.insert(0, osp.join(osp.dirname(__file__), , ))
try:
import gitdb
except ImportError:
raise ImportError(" could not be found in your PYTHONPATH") | Initialize external projects by putting them into the path |
#vtb
def start(docker_url=,
timeout=CLIENT_TIMEOUT,
tag=,
filters=None):
if __opts__.get() == :
fire_master = salt.utils.event.get_master_event(
__opts__,
__opts__[]).fire_event
else:
fire_master = None
def fire(tag, msg):
if fire_master:
fire_master(msg, tag)
else:
__salt__[](tag, msg)
try:
client = docker.APIClient(base_url=docker_url, timeout=timeout)
except AttributeError:
client = docker.Client(base_url=docker_url, timeout=timeout)
try:
events = client.events(filters=filters)
for event in events:
data = salt.utils.json.loads(event.decode(__salt_system_encoding__, errors=))
if data[]:
fire(.format(tag, data[]), data)
else:
fire(.format(tag, data[]), data)
except Exception:
traceback.print_exc() | Scan for Docker events and fire events
Example Config
.. code-block:: yaml
engines:
- docker_events:
docker_url: unix://var/run/docker.sock
filters:
event:
- start
- stop
- die
- oom
The config above sets up engines to listen
for events from the Docker daemon and publish
them to the Salt event bus.
For filter reference, see https://docs.docker.com/engine/reference/commandline/events/ |
#vtb
def favorite_remove(self, post_id):
return self._get(.format(post_id), method=,
auth=True) | Remove a post from favorites (Requires login).
Parameters:
post_id (int): Where post_id is the post id. |
#vtb
def find_class(self):
if self.value <= 1:
return InstructionsProperty
elif self.value <= 3:
return NextTablesProperty
elif self.value <= 7:
return ActionsProperty
return OxmProperty | Return a class related with this type. |
#vtb
def _ReadFloatingPointDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
return self._ReadFixedSizeDataTypeDefinition(
definitions_registry, definition_values,
data_types.FloatingPointDefinition, definition_name,
self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE,
is_member=is_member, supported_size_values=(4, 8)) | Reads a floating-point data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
FloatingPointDefinition: floating-point data type definition. |
#vtb
def set_rendering_intent(self, rendering_intent):
if rendering_intent not in (None,
PERCEPTUAL,
RELATIVE_COLORIMETRIC,
SATURATION,
ABSOLUTE_COLORIMETRIC):
raise FormatError()
self.rendering_intent = rendering_intent | Set rendering intent variant for sRGB chunk |
#vtb
def load(self, name):
if self.reload:
self._maybe_purge_cache()
template = self.cache.get(name)
if template:
return template
path = self.resolve(name)
if not path:
raise OSError(errno.ENOENT, "File not found: %s" % name)
with codecs.open(path, , encoding=) as f:
contents = f.read()
mtime = os.fstat(f.fileno()).st_mtime
template = self.load_string(contents, filename=path)
template.mtime = mtime
template.path = path
self.cache[name] = template
return template | If not yet in the cache, load the named template and compiles it,
placing it into the cache.
If in cache, return the cached template. |
#vtb
def _handle_comparison(self, truism):
is_lt, is_equal, is_unsigned = self.comparison_info[truism.op]
size = len(truism.args[0])
int_max = 2**size-1 if is_unsigned else 2**(size-1)-1
int_min = -2**(size-1)
left_min = self._min(truism.args[0], signed=not is_unsigned)
left_max = self._max(truism.args[0], signed=not is_unsigned)
right_min = self._min(truism.args[1], signed=not is_unsigned)
right_max = self._max(truism.args[1], signed=not is_unsigned)
bound_max = right_max if is_equal else (right_max-1 if is_lt else right_max+1)
bound_min = right_min if is_equal else (right_min-1 if is_lt else right_min+1)
if is_lt and bound_max < int_min:
raise ClaripyBalancerUnsatError()
elif not is_lt and bound_min > int_max:
raise ClaripyBalancerUnsatError()
current_min = int_min
current_max = int_max
if is_lt:
current_max = min(int_max, left_max, bound_max)
self._add_upper_bound(truism.args[0], current_max)
else:
current_min = max(int_min, left_min, bound_min)
self._add_lower_bound(truism.args[0], current_min) | Handles all comparisons. |
#vtb
def list(self):
return self._post(
request=ApiActions.LIST.value,
uri=ApiUri.HOOKS.value,
).get() | Get all current hooks
:return: All hooks
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries |
#vtb
def publish_server_closed(self, server_address, topology_id):
event = ServerClosedEvent(server_address, topology_id)
for subscriber in self.__server_listeners:
try:
subscriber.closed(event)
except Exception:
_handle_exception() | Publish a ServerClosedEvent to all server listeners.
:Parameters:
- `server_address`: The address (host/port pair) of the server.
- `topology_id`: A unique identifier for the topology this server
is a part of. |
#vtb
def load(self, read_tuple_name):
self.prefix_width = 0
self.read_tuple_id_width = 0
self.genome_id_width = 0
self.chr_id_width = 0
self.coor_width = 0
parts = read_tuple_name.split("__")
self.prefix_width = len(parts[0])
self.read_tuple_id_width = len(parts[1])
segments = parts[2][1:-1].split("),(")
for segment in segments:
int_widths = list(map(len, segment.split(",")))
self.genome_id_width = max(self.genome_id_width, int_widths[0])
self.chr_id_width = max(self.chr_id_width, int_widths[1])
self.coor_width = max(self.coor_width, int_widths[2], int_widths[3]) | Load RNF values from a read tuple name.
Args:
read_tuple_name (str): Read tuple name which the values are taken from. |
#vtb
def sid(tnet, communities, axis=0, calc=, decay=0):
r
tnet, netinfo = utils.process_input(tnet, [, , ])
D = temporal_degree_centrality(
tnet, calc=, communities=communities, decay=decay)
network_ids = np.unique(communities)
communities_size = np.array([sum(communities == n) for n in network_ids])
sid = np.zeros([network_ids.max()+1, network_ids.max()+1, tnet.shape[-1]])
for n in network_ids:
for m in network_ids:
betweenmodulescaling = 1/(communities_size[n]*communities_size[m])
if netinfo[][1] == :
withinmodulescaling = 1 / \
(communities_size[n]*communities_size[n])
elif netinfo[][1] == :
withinmodulescaling = 2 / \
(communities_size[n]*(communities_size[n]-1))
if n == m:
betweenmodulescaling = withinmodulescaling
sid[n, m, :] = withinmodulescaling * \
D[n, n, :] - betweenmodulescaling * D[n, m, :]
sid[np.isnan(sid)] = 0
if calc == :
return np.sum(np.sum(sid, axis=1), axis=0)
elif calc == :
return np.sum(sid, axis=axis)
else:
return sid | r"""
Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength.[sid-1]_
Parameters
----------
tnet: array, dict
Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd'
communities : array
a Nx1 vector or NxT array of community assignment.
axis : int
Dimension that is returned 0 or 1 (default 0).
Note, only relevant for directed networks.
i.e. if 0, node i has Aijt summed over j and t.
and if 1, node j has Aijt summed over i and t.
calc : str
'global' returns temporal degree centrality (a 1xnode vector) (default);
'community_pairs' returns a community x community x time matrix, which is the SID for each community pairing;
'community_avg' (returns a community x time matrix). Which is the normalized average of each community to all other communities.
decay: int
if calc = 'time', then decay is possible where the centrality of
the previous time point is carried over to the next time point but decays
at a value of $e^decay$ such that the temporal centrality measure becomes: $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$.
Returns
-------
sid: array
segregation-integration difference. Format: 2d or 3d numpy array (depending on calc) representing (community,community,time) or (community,time)
Notes
------
SID tries to quantify if there is more segergation or intgration compared to other time-points.
If SID > 0, then there is more segregation than usual. If SID < 0, then there is more integration than usual.
There are three different variants of SID, one is a global measure (calc='global'), the second is a value per community (calc='community_avg'),
the third is a value for each community-community pairing (calc='community_pairs').
First we calculate the temporal strength for each edge. This is calculate by
.. math:: S_{i,t} = \sum_j G_{i,j,t}
The pairwise SID, when the network is undirected, is calculated by
.. math:: SID_{A,B,t} = ({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_B}}) S_{A,B,t})
Where :math:`S_{A,t}` is the average temporal strength at time-point t for community A. :math:`N_A` is the number of nodes in community A.
When calculating the SID for a community, it is calculated byL
.. math:: SID_{A,t} = \sum_b^C({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_b}}) S_{A,b,t})
Where C is the number of communities.
When calculating the SID globally, it is calculated byL
.. math:: SID_{t} = \sum_a^C\sum_b^C({2 \over {N_a (N_a - 1)}}) S_{A,t} - ({{1} \over {N_a * N_b}}) S_{a,b,t})
References
-----------
.. [sid-1] Fransson et al (2018) Brain network segregation and integration during an epoch-related working memory fMRI experiment. Neuroimage. 178. [`Link <https://www.sciencedirect.com/science/article/pii/S1053811918304476>`_] |
#vtb
def config_dir_setup(filename):
path = os.path.dirname(filename)
if not os.path.isdir(path):
Shell.mkdir(path) | sets the config file and makes sure the directory exists if it has not yet been created.
:param filename:
:return: |
#vtb
def _get_default(self, obj):
if self.name in obj._property_values:
raise RuntimeError("Bokeh internal error, does not handle the case of self.name already in _property_values")
is_themed = obj.themed_values() is not None and self.name in obj.themed_values()
default = self.instance_default(obj)
if is_themed:
unstable_dict = obj._unstable_themed_values
else:
unstable_dict = obj._unstable_default_values
if self.name in unstable_dict:
return unstable_dict[self.name]
if self.property._may_have_unstable_default():
if isinstance(default, PropertyValueContainer):
default._register_owner(obj, self)
unstable_dict[self.name] = default
return default | Internal implementation of instance attribute access for default
values.
Handles bookeeping around |PropertyContainer| value, etc. |
#vtb
def remove_labels(self, labels, relabel=False):
self.check_labels(labels)
self.reassign_label(labels, new_label=0)
if relabel:
self.relabel_consecutive() | Remove one or more labels.
Removed labels are assigned a value of zero (i.e., background).
Parameters
----------
labels : int, array-like (1D, int)
The label number(s) to remove.
relabel : bool, optional
If `True`, then the segmentation image will be relabeled
such that the labels are in consecutive order starting from
1.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.remove_labels(labels=[5, 3])
>>> segm.data
array([[1, 1, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 0, 0, 0, 0],
[7, 0, 0, 0, 0, 0],
[7, 7, 0, 0, 0, 0],
[7, 7, 0, 0, 0, 0]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.remove_labels(labels=[5, 3], relabel=True)
>>> segm.data
array([[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 0, 2],
[0, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0]]) |
#vtb
def full_data(self):
data = [
self.chat.title,
self._username(),
self._type(),
self._id()
]
return " ".join(filter(None, data)) | Returns all the info available for the chat in the following format:
title [username] (type) <id>
If any data is not available, it is not added. |
#vtb
def _validate_backend(self):
try:
self.backend_class
except (ImportError, AttributeError) as e:
raise ValidationError(_() % e) | ensure backend string representation is correct |
#vtb
def project_move(object_id, input_params={}, always_retry=False, **kwargs):
return DXHTTPRequest( % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /project-xxxx/move API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2Fmove |
#vtb
def Execute(self, message):
self.message = message
if message:
self.require_fastpoll = message.require_fastpoll
args = None
try:
if self.message.args_rdf_name:
if not self.in_rdfvalue:
raise RuntimeError("Did not expect arguments, got %s." %
self.message.args_rdf_name)
if self.in_rdfvalue.__name__ != self.message.args_rdf_name:
raise RuntimeError(
"Unexpected arg type %s != %s." %
(self.message.args_rdf_name, self.in_rdfvalue.__name__))
args = self.message.payload
if self._authentication_required and (
self.message.auth_state !=
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED):
raise RuntimeError("Message for %s was not Authenticated." %
self.message.name)
self.cpu_start = self.proc.cpu_times()
self.cpu_limit = self.message.cpu_limit
if getattr(flags.FLAGS, "debug_client_actions", False):
pdb.set_trace()
try:
self.Run(args)
finally:
used = self.proc.cpu_times()
self.cpu_used = (used.user - self.cpu_start.user,
used.system - self.cpu_start.system)
except NetworkBytesExceededError as e:
self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.NETWORK_LIMIT_EXCEEDED,
"%r: %s" % (e, e), traceback.format_exc())
except Exception as e:
self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR,
"%r: %s" % (e, e), traceback.format_exc())
if flags.FLAGS.pdb_post_mortem:
self.DisableNanny()
pdb.post_mortem()
if self.status.status != rdf_flows.GrrStatus.ReturnedStatus.OK:
logging.info("Job Error (%s): %s", self.__class__.__name__,
self.status.error_message)
if self.status.backtrace:
logging.debug(self.status.backtrace)
if self.cpu_used:
self.status.cpu_time_used.user_cpu_time = self.cpu_used[0]
self.status.cpu_time_used.system_cpu_time = self.cpu_used[1]
self.SendReply(self.status, message_type=rdf_flows.GrrMessage.Type.STATUS)
self._RunGC() | This function parses the RDFValue from the server.
The Run method will be called with the specified RDFValue.
Args:
message: The GrrMessage that we are called to process.
Returns:
Upon return a callback will be called on the server to register
the end of the function and pass back exceptions.
Raises:
RuntimeError: The arguments from the server do not match the expected
rdf type. |
#vtb
def _shuffle_items(items, bucket_key=None, disable=None, seed=None, session=None):
if seed is not None:
random.seed(seed)
if not bucket_key and not disable:
random.shuffle(items)
return
def get_full_bucket_key(item):
assert bucket_key or disable
if bucket_key and disable:
return ItemKey(bucket=bucket_key(item, session), disabled=disable(item, session))
elif disable:
return ItemKey(disabled=disable(item, session))
else:
return ItemKey(bucket=bucket_key(item, session))
buckets = OrderedDict()
for item in items:
full_bucket_key = get_full_bucket_key(item)
if full_bucket_key not in buckets:
buckets[full_bucket_key] = []
buckets[full_bucket_key].append(item)
bucket_keys = list(buckets.keys())
for full_bucket_key in buckets.keys():
if full_bucket_key.bucket == FAILED_FIRST_LAST_FAILED_BUCKET_KEY:
continue
if not full_bucket_key.disabled:
random.shuffle(buckets[full_bucket_key])
if bucket_keys and bucket_keys[0].bucket == FAILED_FIRST_LAST_FAILED_BUCKET_KEY:
new_bucket_keys = list(buckets.keys())[1:]
random.shuffle(new_bucket_keys)
new_bucket_keys.insert(0, bucket_keys[0])
else:
new_bucket_keys = list(buckets.keys())
random.shuffle(new_bucket_keys)
items[:] = [item for bk in new_bucket_keys for item in buckets[bk]]
return | Shuffles a list of `items` in place.
If `bucket_key` is None, items are shuffled across the entire list.
`bucket_key` is an optional function called for each item in `items` to
calculate the key of bucket in which the item falls.
Bucket defines the boundaries across which items will not
be shuffled.
`disable` is a function that takes an item and returns a falsey value
if this item is ok to be shuffled. It returns a truthy value otherwise and
the truthy value is used as part of the item's key when determining the bucket
it belongs to. |
#vtb
def MCMC_pdf_samples(self, fNew, num_samples=1000, starting_loc=None, stepsize=0.1, burn_in=1000, Y_metadata=None):
print("Warning, using MCMC for sampling y*, needs to be tuned!")
if starting_loc is None:
starting_loc = fNew
from functools import partial
logpdf = partial(self.logpdf, f=fNew, Y_metadata=Y_metadata)
pdf = lambda y_star: np.exp(logpdf(y=y_star[:, None]))
par_chains = starting_loc.shape[0]
chain_values = np.zeros((par_chains, num_samples))
chain_values[:, 0][:,None] = starting_loc
stepsize = np.ones(par_chains)*stepsize
accepted = np.zeros((par_chains, num_samples+burn_in))
accept_ratio = np.zeros(num_samples+burn_in)
burnin_cache = np.zeros(par_chains)
burnin_cache[:] = starting_loc.flatten()
burning_in = True
for i in range(burn_in+num_samples):
next_ind = i-burn_in
if burning_in:
old_y = burnin_cache
else:
old_y = chain_values[:,next_ind-1]
old_lik = pdf(old_y)
new_y = np.random.normal(loc=old_y, scale=stepsize)
new_lik = pdf(new_y)
accept_probability = np.minimum(1, new_lik/old_lik)
u = np.random.uniform(0,1,par_chains)
accepts = u < accept_probability
if burning_in:
burnin_cache[accepts] = new_y[accepts]
burnin_cache[~accepts] = old_y[~accepts]
if i == burn_in:
burning_in = False
chain_values[:,0] = burnin_cache
else:
chain_values[accepts, next_ind] = new_y[accepts]
chain_values[~accepts, next_ind] = old_y[~accepts]
accepted[~accepts, i] = 0
accepted[accepts, i] = 1
accept_ratio[i] = np.sum(accepted[:,i])/float(par_chains)
if i % int((burn_in+num_samples)*0.1) == 0:
print("{}% of samples taken ({})".format((i/int((burn_in+num_samples)*0.1)*10), i))
print("Last run accept ratio: ", accept_ratio[i])
print("Average accept ratio: ", np.mean(accept_ratio))
return chain_values | Simple implementation of Metropolis sampling algorithm
Will run a parallel chain for each input dimension (treats each f independently)
Thus assumes f*_1 independant of f*_2 etc.
:param num_samples: Number of samples to take
:param fNew: f at which to sample around
:param starting_loc: Starting locations of the independant chains (usually will be conditional_mean of likelihood), often link_f
:param stepsize: Stepsize for the normal proposal distribution (will need modifying)
:param burnin: number of samples to use for burnin (will need modifying)
:param Y_metadata: Y_metadata for pdf |
#vtb
def HumanReadableStartType(self):
if isinstance(self.start_type, py2to3.STRING_TYPES):
return self.start_type
return human_readable_service_enums.SERVICE_ENUMS[].get(
self.start_type, .format(self.start_type)) | Return a human readable string describing the start type value.
Returns:
str: human readable description of the start type value. |
#vtb
def get_subgraph_peripheral_nodes(graph: BELGraph,
subgraph: Iterable[BaseEntity],
node_predicates: NodePredicates = None,
edge_predicates: EdgePredicates = None,
):
node_filter = concatenate_node_predicates(node_predicates=node_predicates)
edge_filter = and_edge_predicates(edge_predicates=edge_predicates)
result = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for u, v, k, d in get_peripheral_successor_edges(graph, subgraph):
if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k):
continue
result[v][][u].append((k, d))
for u, v, k, d in get_peripheral_predecessor_edges(graph, subgraph):
if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k):
continue
result[u][][v].append((k, d))
return result | Get a summary dictionary of all peripheral nodes to a given sub-graph.
:return: A dictionary of {external node: {'successor': {internal node: list of (key, dict)},
'predecessor': {internal node: list of (key, dict)}}}
:rtype: dict
For example, it might be useful to quantify the number of predecessors and successors:
>>> from pybel.struct.filters import exclude_pathology_filter
>>> value = 'Blood vessel dilation subgraph'
>>> sg = get_subgraph_by_annotation_value(graph, annotation='Subgraph', value=value)
>>> p = get_subgraph_peripheral_nodes(graph, sg, node_predicates=exclude_pathology_filter)
>>> for node in sorted(p, key=lambda n: len(set(p[n]['successor']) | set(p[n]['predecessor'])), reverse=True):
>>> if 1 == len(p[value][node]['successor']) or 1 == len(p[value][node]['predecessor']):
>>> continue
>>> print(node,
>>> len(p[node]['successor']),
>>> len(p[node]['predecessor']),
>>> len(set(p[node]['successor']) | set(p[node]['predecessor']))) |
#vtb
def dot_v2(vec1, vec2):
return vec1.x * vec2.x + vec1.y * vec2.y | Return the dot product of two vectors |
#vtb
def fieldvalue_pairs(self, exclude_cache=False):
t be included.
:rtype: a generator of two-elements tuples'
for field in self._meta.scalarfields:
if exclude_cache and field.as_cache:
continue
name = field.attname
if hasattr(self, name):
yield field, getattr(self, name) | Generator of fields,values pairs. Fields correspond to
the ones which have been loaded (usually all of them) or
not loaded but modified.
Check the :ref:`load_only <performance-loadonly>` query function for more
details.
If *exclude_cache* evaluates to ``True``, fields with :attr:`Field.as_cache`
attribute set to ``True`` won't be included.
:rtype: a generator of two-elements tuples |
#vtb
def process_directory_statements_sorted_by_pmid(directory_name):
s_dict = defaultdict(list)
mp = process_directory(directory_name, lazy=True)
for statement in mp.iter_statements():
s_dict[statement.evidence[0].pmid].append(statement)
return s_dict | Processes a directory filled with CSXML files, first normalizing the
character encoding to utf-8, and then processing into INDRA statements
sorted by pmid.
Parameters
----------
directory_name : str
The name of a directory filled with csxml files to process
Returns
-------
pmid_dict : dict
A dictionary mapping pmids to a list of statements corresponding to
that pmid |
#vtb
def delete(cls, resources, background=False, force=False):
if not isinstance(resources, (list, tuple)):
resources = [resources]
ifaces = []
for item in resources:
try:
ip_ = cls.info(item)
except UsageError:
cls.error("Caniface_idid'])
return Iface.delete(ifaces, background) | Delete an ip by deleting the iface |
#vtb
def resume(self, vehID):
self._connection._beginMessage(
tc.CMD_SET_VEHICLE_VARIABLE, tc.CMD_RESUME, vehID, 1 + 4)
self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 0)
self._connection._sendExact() | resume(string) -> None
Resumes the vehicle from the current stop (throws an error if the vehicle is not stopped). |
#vtb
def columns_used(self):
return list(tz.unique(tz.concatv(
self.choosers_columns_used(),
self.alts_columns_used(),
self.interaction_columns_used(),
util.columns_in_formula(self.default_model_expr),
[self.segmentation_col]))) | Columns from any table used in the model. May come from either
the choosers or alternatives tables. |
#vtb
def Delete(self):
public_ip_set = [{: o.id} for o in self.parent.public_ips if o!=self]
self.parent.public_ips = [o for o in self.parent.public_ips if o!=self]
return(clc.v2.Requests(clc.v2.API.Call(, % (self.parent.server.alias,self.parent.server.id,self.id),
session=self.session),
alias=self.parent.server.alias,
session=self.session)) | Delete public IP.
>>> clc.v2.Server("WA1BTDIX01").PublicIPs().public_ips[0].Delete().WaitUntilComplete()
0 |
#vtb
def can_use_c_for(self, node):
assert isinstance(node.target, ast.Name)
if sys.version_info.major == 3:
range_name =
else:
range_name =
pattern_range = ast.Call(func=ast.Attribute(
value=ast.Name(id=,
ctx=ast.Load(),
annotation=None),
attr=range_name, ctx=ast.Load()),
args=AST_any(), keywords=[])
is_assigned = {node.target.id: False}
[is_assigned.update(self.gather(IsAssigned, stmt))
for stmt in node.body]
nodes = ASTMatcher(pattern_range).search(node.iter)
if (node.iter not in nodes or is_assigned[node.target.id]):
return False
args = node.iter.args
if len(args) < 3:
return True
if isinstance(args[2], ast.Num):
return True
return False | Check if a for loop can use classic C syntax.
To use C syntax:
- target should not be assign in the loop
- xrange should be use as iterator
- order have to be known at compile time |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 41