code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def upload(self, payload=None, content_type=None):
payload = payload or self.config.payload
content_type = content_type or self.config.content_type
if payload is None:
raise ValueError()
if not os.path.exists(payload):
raise IOError( % payload)
upload_results = client.upload(
self.config, self.connection, payload, content_type)
return upload_results | Upload the archive at `path` with content type `content_type`
returns (int): upload status code |
def normalizeInternalObjectType(value, cls, name):
if not isinstance(value, cls):
raise TypeError("%s must be a %s instance, not %s."
% (name, name, type(value).__name__))
return value | Normalizes an internal object type.
* **value** must be a instance of **cls**.
* Returned value is the same type as the input value. |
def mergeNewMSBWT(mergedDir, inputBwtDirs, numProcs, logger):
re allowed to use
@param logger - output goes here
NOTE: in practice, since we
msbwts[i] = MultiStringBWT.loadBWT(dirName, logger)
mergedLength += msbwts[i].totalSize
binSize = 2**28
logger.info()
mergedBWT = np.lib.format.open_memmap(mergedDir+, , , (mergedLength,))
logger.info()
placeArray = np.lib.format.open_memmap(mergedDir+, , , (mergedBWT.shape[0],))
copiedPlaceArray = np.lib.format.open_memmap(mergedDir+, , , (mergedBWT.shape[0],))
start = msbwts[0].totalSize
end = 0
for i, msbwt in enumerate(msbwts):
end += msbwt.getTotalSize()
placeArray[start:end].fill(i)
copiedPlaceArray[start:end].fill(i)
start = end
TODO: the below False is there because this only works if you do a full file copy right now. Itt properly updated. Itm going with the no-skip, no-copy form until I can resolve the
problem (if there
if False and not binHasChanged[x] and sameOffset:
for key in binUpdates[x]:
nextOffsetCounts[key] += binUpdates[x][key]
ignored += 1
else:
if i % 2 == 0:
tup = (x, binSize, vcLen, currOffsetCounts[x], mergedDir+, mergedDir+, inputBwtDirs)
else:
tup = (x, binSize, vcLen, currOffsetCounts[x], mergedDir+, mergedDir+, inputBwtDirs)
tups.append(tup)
if numProcs > 1:
myPool = multiprocessing.Pool(numProcs)
rets = myPool.imap(mergeNewMSBWTPoolCall, tups, chunksize=10)
else:
rets = []
for tup in tups:
rets.append(mergeNewMSBWTPoolCall(tup))
progressCounter = ignored
sys.stdout.write(+str(100*progressCounter*binSize/mergedBWT.shape[0])+)
sys.stdout.flush()
for ret in rets:
(x, nBHC, nOC, nMI) = ret
binUpdates[x] = nOC
for k in nBHC:
nextBinHasChanged[k] |= nBHC[k]
for b in nOC:
nextOffsetCounts[b] += nOC[b]
needsMoreIterations |= nMI
progressCounter += 1
sys.stdout.write(+str(min(100*progressCounter*binSize/mergedBWT.shape[0], 100))+)
sys.stdout.flush()
nextOffsetCounts = np.cumsum(nextOffsetCounts, axis=0)-nextOffsetCounts
if numProcs > 1:
myPool.terminate()
myPool.join()
myPool = None
sys.stdout.write()
sys.stdout.flush()
logger.info()
offsets = np.zeros(dtype=, shape=(numInputs,))
for i in xrange(0, mergedBWT.shape[0]/binSize+1):
ind = placeArray[i*binSize:(i+1)*binSize]
if i == mergedBWT.shape[0]/binSize:
ind = ind[0:mergedBWT.shape[0]-i*binSize]
bc = np.bincount(ind, minlength=numInputs)
for x in xrange(0, numInputs):
mergedBWT[np.add(i*binSize, np.where(ind == x))] = msbwts[x].getBWTRange(int(offsets[x]), int(offsets[x]+bc[x]))
offsets += bc
et = time.time()
logger.info(+str(et-st)+) | This function will take a list of input BWTs (compressed or not) and merge them into a single BWT
@param mergedFN - the destination for the final merged MSBWT
@param inputBWTFN1 - the fn of the first BWT to merge
@param inputBWTFN2 - the fn of the second BWT to merge
@param numProcs - number of processes we're allowed to use
@param logger - output goes here |
def long_description(*filenames):
res = []
for filename in filenames:
with open(filename) as fp:
for line in fp:
res.append( + line)
res.append()
res.append()
return EMPTYSTRING.join(res) | Provide a long description. |
def _StatusUpdateThreadMain(self):
while self._status_update_active:
for pid in list(self._process_information_per_pid.keys()):
self._CheckStatusAnalysisProcess(pid)
self._UpdateForemanProcessStatus()
if self._status_update_callback:
self._status_update_callback(self._processing_status)
time.sleep(self._STATUS_UPDATE_INTERVAL) | Main function of the status update thread. |
def transpose(self, name=None, activate_final=None):
if name is None:
name = self.module_name + "_transpose"
if activate_final is None:
activate_final = self.activate_final
output_sizes = [lambda l=layer: l.input_shape[1] for layer in self._layers]
output_sizes.reverse()
return MLP(
name=name,
output_sizes=output_sizes,
activation=self.activation,
activate_final=activate_final,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
use_bias=self.use_bias,
use_dropout=self.use_dropout) | Returns transposed `MLP`.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
Returns:
Matching transposed `MLP` module. |
def oplog_thread_join(self):
LOG.info("MongoConnector: Stopping all OplogThreads")
for thread in self.shard_set.values():
thread.join() | Stops all the OplogThreads |
def query(self,
watch_key,
time_indices=None,
slicing=None,
mapping=None):
if watch_key not in self._tensor_data:
raise KeyError("watch_key not found: %s" % watch_key)
if time_indices is None:
time_indices =
time_slicing = tensor_helper.parse_time_indices(time_indices)
all_time_indices = list(range(self._tensor_data[watch_key].num_total()))
sliced_time_indices = all_time_indices[time_slicing]
if not isinstance(sliced_time_indices, list):
sliced_time_indices = [sliced_time_indices]
recombine_and_map = False
step_mapping = mapping
if len(sliced_time_indices) > 1 and mapping not in (None, ):
recombine_and_map = True
step_mapping = None
output = []
for index in sliced_time_indices:
value = self._tensor_data[watch_key].query(index)[0]
if (value is not None and
not isinstance(value, debug_data.InconvertibleTensorProto)):
output.append(tensor_helper.array_view(
value, slicing=slicing, mapping=step_mapping)[2])
else:
output.append(None)
if recombine_and_map:
if mapping == :
output = tensor_helper.array_to_base64_png(output)
elif mapping and mapping != :
logger.warn(
,
mapping)
return output | Query tensor store for a given watch_key.
Args:
watch_key: The watch key to query.
time_indices: A numpy-style slicing string for time indices. E.g.,
`-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1.
slicing: A numpy-style slicing string for individual time steps.
mapping: An mapping string or a list of them. Supported mappings:
`{None, 'image/png', 'health-pill'}`.
Returns:
The potentially sliced values as a nested list of values or its mapped
format. A `list` of nested `list` of values.
Raises:
ValueError: If the shape of the sliced array is incompatible with mapping
mode. Or if the mapping type is invalid. |
def stream_mapred(self, inputs, query, timeout):
_validate_timeout(timeout)
def make_op(transport):
return transport.stream_mapred(inputs, query, timeout)
for phase, data in self._stream_with_retry(make_op):
yield phase, data | Streams a MapReduce query as (phase, data) pairs. This is a
generator method which should be iterated over.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might
not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(mymapred.stream()) as results:
for phase, result in results:
do_something(phase, result)
# Explicit close()
stream = mymapred.stream()
for phase, result in stream:
do_something(phase, result)
stream.close()
:param inputs: the input list/structure
:type inputs: list, dict
:param query: the list of query phases
:type query: list
:param timeout: the query timeout
:type timeout: integer, None
:rtype: iterator |
def get_vartype(data):
if data.name is not None and data.name in _MEMO:
return _MEMO[data.name]
vartype = None
try:
distinct_count = get_groupby_statistic(data)[1]
leng = len(data)
if distinct_count <= 1:
vartype = S_TYPE_CONST
elif pd.api.types.is_bool_dtype(data) or (distinct_count == 2 and pd.api.types.is_numeric_dtype(data)):
vartype = TYPE_BOOL
elif pd.api.types.is_numeric_dtype(data):
vartype = TYPE_NUM
elif pd.api.types.is_datetime64_dtype(data):
vartype = TYPE_DATE
elif distinct_count == leng:
vartype = S_TYPE_UNIQUE
else:
vartype = TYPE_CAT
except:
vartype = S_TYPE_UNSUPPORTED
if data.name is not None:
_MEMO[data.name] = vartype
return vartype | Infer the type of a variable (technically a Series).
The types supported are split in standard types and special types.
Standard types:
* Categorical (`TYPE_CAT`): the default type if no other one can be determined
* Numerical (`TYPE_NUM`): if it contains numbers
* Boolean (`TYPE_BOOL`): at this time only detected if it contains boolean values, see todo
* Date (`TYPE_DATE`): if it contains datetime
Special types:
* Constant (`S_TYPE_CONST`): if all values in the variable are equal
* Unique (`S_TYPE_UNIQUE`): if all values in the variable are different
* Unsupported (`S_TYPE_UNSUPPORTED`): if the variable is unsupported
The result is cached by column name in a global variable to avoid recomputing.
Parameters
----------
data : Series
The data type of the Series.
Returns
-------
str
The data type of the Series.
Notes
----
* Should improve verification when a categorical or numeric field has 3 values, it could be a categorical field
or just a boolean with NaN values
* #72: Numeric with low Distinct count should be treated as "Categorical" |
def disconnect(self, device):
self.log("TELNET disconnect")
try:
self.device.ctrl.send(chr(4))
except OSError:
self.log("Protocol already disconnected") | Disconnect using protocol specific method. |
def kill(self):
logger.info(.format(self.name))
if hasattr(self, ) and self.remote_client is not None:
self.kill_sent = True
self.remote_client.close()
return
if not self.process:
raise DagobahError()
self.kill_sent = True
self.process.kill() | Send SIGKILL to the task's process. |
def _wait_output(popen, is_slow):
proc = Process(popen.pid)
try:
proc.wait(settings.wait_slow_command if is_slow
else settings.wait_command)
return True
except TimeoutExpired:
for child in proc.children(recursive=True):
_kill_process(child)
_kill_process(proc)
return False | Returns `True` if we can get output of the command in the
`settings.wait_command` time.
Command will be killed if it wasn't finished in the time.
:type popen: Popen
:rtype: bool |
def create_switch(type, settings, pin):
switch = None
if type == "A":
group, device = settings.split(",")
switch = pi_switch.RCSwitchA(group, device)
elif type == "B":
addr, channel = settings.split(",")
addr = int(addr)
channel = int(channel)
switch = pi_switch.RCSwitchB(addr, channel)
elif type == "C":
family, group, device = settings.split(",")
group = int(group)
device = int(device)
switch = pi_switch.RCSwitchC(family, group, device)
elif type == "D":
group, device = settings.split(",")
device = int(device)
switch = pi_switch.RCSwitchD(group, device)
else:
print "Type %s is not supported!" % type
sys.exit()
switch.enableTransmit(pin)
return switch | Create a switch.
Args:
type: (str): type of the switch [A,B,C,D]
settings (str): a comma separted list
pin (int): wiringPi pin
Returns:
switch |
def delete_collection_namespaced_replica_set(self, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs)
return data | delete_collection_namespaced_replica_set # noqa: E501
delete collection of ReplicaSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_replica_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread. |
def parse_or_die(self, args=None):
from .cli import die
try:
return self.parse(args)
except KwargvError as e:
die(e) | Like :meth:`ParseKeywords.parse`, but calls :func:`pkwit.cli.die` if a
:exc:`KwargvError` is raised, printing the exception text. Returns
*self* for convenience. |
def copy(input, **params):
PARAM_FIELDS =
def filter_fields(obj, fields):
return {k:v for k,v in obj.items() if k in fields}
if PARAM_FIELDS in params:
fields = params.get(PARAM_FIELDS)
if isinstance(input, list):
res = []
for row in input:
res.append(filter_fields(row, fields))
return res
elif isinstance(input, dict):
return filter_fields(input, fields)
else:
raise NotImplementedError(.format(type(input)))
else:
return input | Copies input or input's selected fields
:param input:
:param params:
:return: input |
def fs_obj_query_info(self, path, follow_symlinks):
if not isinstance(path, basestring):
raise TypeError("path can only be an instance of type basestring")
if not isinstance(follow_symlinks, bool):
raise TypeError("follow_symlinks can only be an instance of type bool")
info = self._call("fsObjQueryInfo",
in_p=[path, follow_symlinks])
info = IGuestFsObjInfo(info)
return info | Queries information about a file system object (file, directory, etc)
in the guest.
in path of type str
Path to the file system object to gather information about.
Guest path style.
in follow_symlinks of type bool
Information about symbolic links is returned if @c false. Otherwise,
symbolic links are followed and the returned information concerns
itself with the symlink target if @c true.
return info of type :class:`IGuestFsObjInfo`
:py:class:`IGuestFsObjInfo` object containing the information.
raises :class:`VBoxErrorObjectNotFound`
The file system object was not found.
raises :class:`VBoxErrorIprtError`
Error while querying information. |
def convert_values(args_list):
rate_map = get_rates(map(itemgetter(1, 2), args_list))
value_map = {}
for value, source, target in args_list:
args = (value, source, target)
if source == target:
value_map[args] = value
else:
value_map[args] = value * rate_map[(source, target)]
return value_map | convert_value in bulk.
:param args_list: list of value, source, target currency pairs
:return: map of converted values |
def explain_weights_dfs(estimator, **kwargs):
kwargs = _set_defaults(kwargs)
return format_as_dataframes(
eli5.explain_weights(estimator, **kwargs)) | Explain weights and export them to a dict with ``pandas.DataFrame``
values (as :func:`eli5.formatters.as_dataframe.format_as_dataframes` does).
All keyword arguments are passed to :func:`eli5.explain_weights`.
Weights of all features are exported by default. |
def zeq_magic(meas_file=, spec_file=,crd=,input_dir_path=, angle=0,
n_plots=5, save_plots=True, fmt="svg", interactive=False, specimen="",
samp_file=, contribution=None,fignum=1):
def plot_interpretations(ZED, spec_container, this_specimen, this_specimen_measurements, datablock):
if cb.is_null(spec_container) or cb.is_null(this_specimen_measurements) or cb.is_null(datablock):
return ZED
if not in spec_container.df.columns:
return ZED
prior_spec_data = spec_container.get_records_for_code(
, strict_match=False)
prior_specimen_interpretations=[]
if not len(prior_spec_data):
return ZED
mpars = {"specimen_direction_type": "Error"}
if len(prior_spec_data):
prior_specimen_interpretations = prior_spec_data[prior_spec_data[].astype(str) == this_specimen]
if len(prior_specimen_interpretations):
if len(prior_specimen_interpretations)>0:
beg_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_min.values).tolist()
end_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_max.values).tolist()
spec_methods = prior_specimen_interpretations.method_codes.tolist()
for ind in range(len(beg_pcas)):
spec_meths = spec_methods[ind].split()
for m in spec_meths:
if in m:
calculation_type =
if in m:
calculation_type =
if in m:
calculation_type =
if in m:
calculation_type =
treatments = pd.to_numeric(this_specimen_measurements.treatment).tolist()
if len(beg_pcas)!=0:
try:
start, end = treatments.index(beg_pcas[ind]), treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError as ex:
mpars[] = "Error"
try:
if beg_pcas[ind] == 0:
start = 0
else:
start = treatments.index(beg_pcas[ind])
if end_pcas[ind] == 0:
end = 0
else:
end = treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError:
mpars[] = "Error"
if mpars["specimen_direction_type"] != "Error":
pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
else:
print(.format(this_specimen))
print(prior_spec_data.loc[this_specimen][[, ]])
print()
cols = list(set([, ]).intersection(this_specimen_measurements.columns))
print(this_specimen_measurements[cols])
print()
return ZED
def make_plots(spec, cnt, meas_df, spec_container, samp_container=None):
if spec_container:
try:
samps = spec_container.df.loc[spec, ]
except KeyError:
samps = ""
samp_df = []
if isinstance(samps, int) or isinstance(samps, float) or isinstance(samps, np.int64):
if np.isnan(samps):
samp = ""
samp_df = []
else:
samp = str(samps)
samp_container.df.index = samp_container.df.index.astype(str)
samp_df = samp_container.df[samp_container.df.index == samp]
elif isinstance(samps, type(None)):
samp = ""
samp_df = []
elif len(samps):
if isinstance(samps, str):
samp = samps
else:
samp = samps.iloc[0]
samp_df = samp_container.df[samp_container.df.index == samp]
else:
samp_df = []
ZED = {: cnt, : cnt+1, : cnt+2}
spec_df = meas_df[meas_df.specimen == s]
spec_df = spec_df[- spec_df.method_codes.str.contains(
)]
spec_df_nrm = spec_df[spec_df.method_codes.str.contains(
)]
spec_df_th = spec_df[spec_df.method_codes.str.contains(
)]
try:
cond = spec_df.method_codes.str.contains()
spec_df_th = spec_df_th[-cond]
except ValueError:
keep_inds = []
n = 0
for ind, row in spec_df_th.copy().iterrows():
if in row[] and not in row[]:
keep_inds.append(n)
else:
pass
n += 1
if len(keep_inds) < n:
spec_df_th = spec_df_th.iloc[keep_inds]
spec_df_af = spec_df[spec_df.method_codes.str.contains()]
this_spec_meas_df = None
datablock = None
if (not len(spec_df_th.index) > 1) and (not len(spec_df_af.index) > 1):
return
if len(spec_df_th.index) > 1:
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_th])
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how=, subset=[, , ])
if n_rows > len(this_spec_meas_df):
print(.format(s, n_rows - len(this_spec_meas_df)))
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units =
try:
this_spec_meas_df[] = this_spec_meas_df[].astype(float)
this_spec_meas_df[] = this_spec_meas_df[].astype(float)
except:
print(.format(spec))
return
datablock = this_spec_meas_df[[, , ,
, , ]].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
if len(spec_df_af.index) > 1:
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_af])
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how=, subset=[, , ])
if n_rows > len(this_spec_meas_df):
print(.format(s, n_rows - len(this_spec_meas_df)))
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units =
try:
this_spec_meas_df[] = this_spec_meas_df[].astype(float)
this_spec_meas_df[] = this_spec_meas_df[].astype(float)
except:
print(.format(spec))
return
datablock = this_spec_meas_df[[, , ,
, , ]].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
return plot_interpretations(ZED, spec_container, s, this_spec_meas_df, datablock)
if interactive:
save_plots = False
if not isinstance(contribution, cb.Contribution):
input_dir_path = os.path.realpath(input_dir_path)
file_path = pmag.resolve_file_name(meas_file, input_dir_path)
if not os.path.exists(file_path):
print(, file_path)
return False, []
custom_filenames = {: file_path, : spec_file, : samp_file}
contribution = cb.Contribution(input_dir_path, custom_filenames=custom_filenames,
read_tables=[, ,
, ])
if pmagplotlib.isServer:
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
meas_container = contribution.tables[]
meas_df = contribution.tables[].df
spec_container = contribution.tables.get(, None)
samp_container = contribution.tables.get(, None)
meas_df[] = ""
if in meas_df.columns:
if in meas_df.columns:
meas_df[] = meas_df[].where(
cond=meas_df[].astype(bool), other=meas_df[])
else:
meas_df[] = meas_df[]
else:
meas_df[] = meas_df[]
if crd == "s":
coord = "-1"
elif crd == "t":
coord = "100"
else:
coord = "0"
specimens = meas_df.specimen.unique()
if len(specimens) == 0:
print()
return False, []
saved.extend(pmagplotlib.save_plots(ZED, titles))
else:
continue
else:
cnt += 3
return True, saved | zeq_magic makes zijderveld and equal area plots for magic formatted measurements files.
Parameters
----------
meas_file : str
input measurement file
spec_file : str
input specimen interpretation file
samp_file : str
input sample orientations file
crd : str
coordinate system [s,g,t] for specimen, geographic, tilt corrected
g,t options require a sample file with specimen and bedding orientation
input_dir_path : str
input directory of meas_file, default "."
angle : float
angle of X direction with respect to specimen X
n_plots : int, default 5
maximum number of plots to make
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
specimen : str, default ""
specimen name to plot
samp_file : str, default 'samples.txt'
name of samples file
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
fignum : matplotlib figure number |
def update(self, dist):
assert isinstance(dist, DDist)
for k, c in iteritems(dist.counts):
self.counts[k] += c
self.total += dist.total | Adds the given distribution's counts to the current distribution. |
def reftag_to_cls(fn):
names, _, _, values = inspect.getargspec(fn)
@wraps(fn)
def wrapped(*args, **kwargs):
i = 0
backend = args[0]
for name in names[1:]:
value = args[i]
if name == "concrete" and isinstance(value, six.string_types):
args[i] = backend.REFTAG_CONCRETE[value]
elif name == "resource" and isinstance(value, six.string_types):
args[i] = backend.REFTAG_RESOURCE[value]
i += 1
return fn(*args, **kwargs)
return wrapped | decorator that checks function arguments for `concrete` and `resource`
and will properly set them to class references if a string (reftag) is
passed as the value |
def predict_proba(self, p):
if p.size != p.shape[0]:
p = p[:, 1]
calibrated_proba = np.zeros(p.shape[0])
for i in range(self.calibration_map.shape[0]):
calibrated_proba[np.logical_and(self.calibration_map[i, 1] <= p, self.calibration_map[i, 0] > p)] = \
self.calibration_map[i, 2]
return calibrated_proba | Calculate the calibrated probabilities
Parameters
----------
y_prob : array-like of shape = [n_samples, 2]
Predicted probabilities to be calibrated using calibration map
Returns
-------
y_prob_cal : array-like of shape = [n_samples, 1]
Predicted calibrated probabilities |
def setup_scrollarea(self):
self.view = QWidget()
self.scene = QGridLayout(self.view)
self.scene.setColumnStretch(0, 100)
self.scene.setColumnStretch(2, 100)
self.scrollarea = QScrollArea()
self.scrollarea.setWidget(self.view)
self.scrollarea.setWidgetResizable(True)
self.scrollarea.setFrameStyle(0)
self.scrollarea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollarea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollarea.setSizePolicy(QSizePolicy(QSizePolicy.Ignored,
QSizePolicy.Preferred))
self.scrollarea.setVerticalScrollBar(QScrollBar())
return self.scrollarea | Setup the scrollarea that will contain the FigureThumbnails. |
def handle(self, *args, **options):
logger.info("Build started")
self.set_options(*args, **options)
if not options.get("keep_build_dir"):
self.init_build_dir()
if not options.get("skip_static"):
self.build_static()
if not options.get("skip_media"):
self.build_media()
self.build_views()
logger.info("Build finished") | Making it happen. |
def read_data(archive, arc_type, day, stachans, length=86400):
st = []
available_stations = _check_available_data(archive, arc_type, day)
for station in stachans:
if len(station[1]) == 2:
station_map = (station[0], station[1][0] + + station[1][1])
available_stations_map = [(sta[0], sta[1][0] + + sta[1][-1])
for sta in available_stations]
else:
station_map = station
available_stations_map = available_stations
if station_map not in available_stations_map:
msg = .join([station[0], station_map[1], ,
day.strftime()])
warnings.warn(msg)
continue
if arc_type.lower() == :
client = SeishubClient(archive)
st += client.get_waveforms(
network=, station=station_map[0], location=,
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
elif arc_type.upper() == "FDSN":
client = FDSNClient(archive)
try:
st += client.get_waveforms(
network=, station=station_map[0], location=,
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
except FDSNException:
warnings.warn( +
)
continue
elif arc_type.lower() == :
wavfiles = _get_station_file(os.path.join(
archive, day.strftime( + os.sep + )),
station_map[0], station_map[1])
for wavfile in wavfiles:
st += read(wavfile, starttime=day, endtime=day + length)
st = Stream(st)
return st | Function to read the appropriate data from an archive for a day.
:type archive: str
:param archive:
The archive source - if arc_type is seishub, this should be a url,
if the arc_type is FDSN then this can be either a url or a known obspy
client. If arc_type is day_vols, then this is the path to the top
directory.
:type arc_type: str
:param arc_type: The type of archive, can be: seishub, FDSN, day_volumes
:type day: datetime.date
:param day: Date to retrieve data for
:type stachans: list
:param stachans: List of tuples of Stations and channels to try and get,
will not fail if stations are not available, but will warn.
:type length: float
:param length: Data length to extract in seconds, defaults to 1 day.
:returns: Stream of data
:rtype: obspy.core.stream.Stream
.. note:: A note on arc_types, if arc_type is day_vols, then this will \
look for directories labelled in the IRIS DMC conventions of \
Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \
Data within these files directories should be stored as day-long, \
single-channel files. This is not implemented in the fasted way \
possible to allow for a more general situation. If you require more \
speed you will need to re-write this.
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, missing data
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, local day-volumes
>>> # Get the path to the test data
>>> import eqcorrscan
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')]
>>> st = read_data(TEST_PATH + '/day_vols', 'day_vols',
... t1, stachans)
>>> print(st)
2 Trace(s) in Stream:
AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples |
def append(self, tweet):
c = self.connection.cursor()
last_tweet = c.execute("SELECT tweet from tweetlist where label=").next()[0]
c.execute("INSERT INTO tweets(message, previous_tweet, next_tweet) VALUES (?,?,NULL)", (tweet, last_tweet))
tweet_id = c.lastrowid
c.execute("UPDATE tweetlist SET tweet=? WHERE label=", (tweet_id,))
if last_tweet is None:
c.execute("UPDATE tweetlist SET tweet=? WHERE label=", (tweet_id,))
else:
c.execute("UPDATE tweets SET next_tweet = ? WHERE id= ? ", (tweet_id, last_tweet))
self.connection.commit()
c.close() | Add a tweet to the end of the list. |
def verify(self, message, pubkey, rnum, snum):
m = self.GFn.value(message)
r = self.GFn.value(rnum)
s = self.GFn.value(snum)
R = self.G * (m / s) + pubkey * (r / s)
return R.x == r | Verify the signature
for message m, pubkey Y, signature (r,s)
r = xcoord(R)
verify that : G*m+Y*r=R*s
this is true because: { Y=G*x, and R=G*k, s=(m+x*r)/k }
G*m+G*x*r = G*k*(m+x*r)/k ->
G*(m+x*r) = G*(m+x*r)
several ways to do the verification:
r == xcoord[ G*(m/s) + Y*(r/s) ] <<< the standard way
R * s == G*m + Y*r
r == xcoord[ (G*m + Y*r)/s) ] |
def bound_bboxes(bboxes):
group_x0 = min(map(lambda l: l[x0], bboxes))
group_y0 = min(map(lambda l: l[y0], bboxes))
group_x1 = max(map(lambda l: l[x1], bboxes))
group_y1 = max(map(lambda l: l[y1], bboxes))
return (group_x0, group_y0, group_x1, group_y1) | Finds the minimal bbox that contains all given bboxes |
def SegmentMax(a, ids):
func = lambda idxs: np.amax(a[idxs], axis=0)
return seg_map(func, a, ids), | Segmented max op. |
def load_data(self, grid_method="gamma", num_samples=1000, condition_threshold=0.5, zero_inflate=False,
percentile=None):
self.percentile = percentile
if self.track_forecasts == {}:
self.load_track_forecasts()
if self.track_forecasts == {}:
return -1
if self.data is None:
self.data = np.zeros((len(self.members), self.times.size, self.grid_shape[0], self.grid_shape[1]),
dtype=np.float32)
else:
self.data[:] = 0
if grid_method in ["mean", "median", "samples"]:
for m, member in enumerate(self.members):
print("Sampling " + member)
for track_forecast in self.track_forecasts[member]:
times = track_forecast["properties"]["times"]
for s, step in enumerate(track_forecast["features"]):
forecast_pdf = np.array(step[][self.variable + "_" +
self.ensemble_name.replace(" ", "-")])
forecast_time = self.run_date + timedelta(hours=times[s])
t = np.where(self.times == forecast_time)[0][0]
mask = np.array(step[]["masks"], dtype=int)
i = np.array(step[]["i"], dtype=int)
i = i[mask == 1]
j = np.array(step[]["j"], dtype=int)
j = j[mask == 1]
if grid_method == "samples":
intensities = np.array(step["properties"]["timesteps"], dtype=float)[mask == 1]
rankings = np.argsort(intensities)
samples = np.random.choice(self.forecast_bins, size=intensities.size, replace=True,
p=forecast_pdf)
self.data[m, t, i[rankings], j[rankings]] = samples
else:
if grid_method == "mean":
forecast_value = np.sum(forecast_pdf * self.forecast_bins)
elif grid_method == "median":
forecast_cdf = np.cumsum(forecast_pdf)
forecast_value = self.forecast_bins[np.argmin(np.abs(forecast_cdf - 0.5))]
else:
forecast_value = 0
self.data[m, t, i, j] = forecast_value
if grid_method in ["gamma"]:
full_condition_name = "condition_" + self.condition_model_name.replace(" ", "-")
dist_model_name = self.variable + "_" + self.ensemble_name.replace(" ", "-")
for m, member in enumerate(self.members):
for track_forecast in self.track_forecasts[member]:
times = track_forecast["properties"]["times"]
for s, step in enumerate(track_forecast["features"]):
forecast_params = step["properties"][dist_model_name]
if self.condition_model_name is not None:
condition = step["properties"][full_condition_name]
else:
condition = None
forecast_time = self.run_date + timedelta(hours=times[s])
if forecast_time in self.times:
t = np.where(self.times == forecast_time)[0][0]
mask = np.array(step["properties"]["masks"], dtype=int)
rankings = np.argsort(step["properties"]["timesteps"])[mask == 1]
i = np.array(step["properties"]["i"], dtype=int)[mask == 1][rankings]
j = np.array(step["properties"]["j"], dtype=int)[mask == 1][rankings]
if rankings.size > 0:
raw_samples = np.sort(gamma.rvs(forecast_params[0], loc=forecast_params[1],
scale=forecast_params[2],
size=(num_samples, rankings.size)),
axis=1)
if zero_inflate:
raw_samples *= bernoulli.rvs(condition,
size=(num_samples, rankings.size))
if percentile is None:
samples = raw_samples.mean(axis=0)
else:
samples = np.percentile(raw_samples, percentile, axis=0)
if condition is None or condition >= condition_threshold:
self.data[m, t, i, j] = samples
return 0 | Reads the track forecasts and converts them to grid point values based on random sampling.
Args:
grid_method: "gamma" by default
num_samples: Number of samples drawn from predicted pdf
condition_threshold: Objects are not written to the grid if condition model probability is below this
threshold.
zero_inflate: Whether to sample zeros from a Bernoulli sampler based on the condition model probability
percentile: If None, outputs the mean of the samples at each grid point, otherwise outputs the specified
percentile from 0 to 100.
Returns:
0 if tracks are successfully sampled on to grid. If no tracks are found, returns -1. |
def hex_2_rgb(self, color):
if not self.RE_HEX.match(color):
color = "
if len(color) == 7:
return (int(color[i : i + 2], 16) / 255 for i in [1, 3, 5])
return (int(c, 16) / 15 for c in color) | convert a hex color to rgb |
def apply_activation(
books,
x,
activation,
activation_args=(),
activation_kwargs=None):
if activation is None:
return x
if activation_kwargs is None:
activation_kwargs = {}
y = activation(x, *activation_args, **activation_kwargs)
if activation in (tf.nn.relu, functions.leaky_relu, functions.softplus):
books.add_scalar_summary(
tf.reduce_mean(tf.cast(tf.less(x, 0.0), tf.float32)),
% y.op.name)
elif activation is tf.nn.relu6:
books.add_scalar_summary(
tf.reduce_mean(tf.cast(tf.less(x, 0.0), tf.float32)),
% y.op.name)
books.add_scalar_summary(
tf.reduce_mean(tf.cast(tf.greater(x, 6.0), tf.float32)),
% y.op.name)
elif activation in (functions.l2_normalize, tf.nn.l2_normalize,
functions.l1_normalize):
books.add_scalar_summary(
tf.reduce_mean(tf.sqrt(tf.reduce_sum(
tf.square(x), 1))), % y.op.name)
return y | Returns activation(x, *activation_args, **activation_kwargs).
This applies the given activation and adds useful summaries specific to the
activation.
Args:
books: The bookkeeper.
x: The tensor to apply activation to.
activation: An activation function.
activation_args: Optional additional arguments for the activation.
activation_kwargs: Optional keyword args for activation.
Returns:
A tensor with activation applied to x. |
def create_array(self, json):
result = Array(json[])
result.total = json[]
result.skip = json[]
result.limit = json[]
result.items = []
result.items_mapped = {: {}, : {}}
self.process_array_items(result, json)
self.process_array_includes(result, json)
return result | Create :class:`.resources.Array` from JSON.
:param json: JSON dict.
:return: Array instance. |
def _get_erase_command(self, drive, pattern):
cmd_args = []
cmd_args.append("pd %s" % drive)
cmd_args.extend([, , pattern])
if pattern != :
cmd_args.append()
cmd_args.append()
return cmd_args | Return the command arguments based on the pattern.
Erase command examples:
1) Sanitize: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=overwrite unrestricted=off forced"
2) Zeros: "ssacli ctrl slot=0 pd 1I:1:1 modify erase
erasepattern=zero forced"
:param drive: A string with comma separated list of drives.
:param pattern: A string which defines the type of erase.
:returns: A list of ssacli command arguments. |
def _GetIdentifierFromPath(self, parser_mediator):
file_entry = parser_mediator.GetFileEntry()
path = file_entry.path_spec.location
file_system = file_entry.GetFileSystem()
path_segments = file_system.SplitPath(path)
return path_segments[-2] | Extracts a container or a graph ID from a JSON file's path.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
Returns:
str: container or graph identifier. |
def exists_locked(filepath: str) -> Tuple[bool, bool]:
exists = False
locked = None
file_object = None
if os.path.exists(filepath):
exists = True
locked = True
try:
buffer_size = 8
file_object = open(filepath, , buffer_size)
if file_object:
locked = False
except IOError:
pass
finally:
if file_object:
file_object.close()
return exists, locked | Checks if a file is locked by opening it in append mode.
(If no exception is thrown in that situation, then the file is not locked.)
Args:
filepath: file to check
Returns:
tuple: ``(exists, locked)``
See https://www.calazan.com/how-to-check-if-a-file-is-locked-in-python/. |
def __calculate_dataset_difference(self, amount_clusters):
dataset_differences = numpy.zeros((amount_clusters, len(self.__pointer_data)))
for index_center in range(amount_clusters):
if self.__metric.get_type() != type_metric.USER_DEFINED:
dataset_differences[index_center] = self.__metric(self.__pointer_data, self.__centers[index_center])
else:
dataset_differences[index_center] = [ self.__metric(point, self.__centers[index_center])
for point in self.__pointer_data ]
return dataset_differences | !
@brief Calculate distance from each point to each cluster center. |
def analysis_provenance_details_simplified_extractor(
impact_report, component_metadata):
context = {}
extra_args = component_metadata.extra_args
default_source = resolve_from_dictionary(
extra_args, [, ])
default_reference = resolve_from_dictionary(
extra_args, [, ])
provenance_format_args = resolve_from_dictionary(
extra_args, )
hazard_keywords = impact_report.impact_function.provenance[
]
header = resolve_from_dictionary(
provenance_format_args, )
provenance_format = resolve_from_dictionary(
provenance_format_args, )
hazard_provenance = {
: header,
: provenance_format.format(
layer_name=hazard_keywords.get(),
source=QgsDataSourceUri.removePassword(
decode_full_layer_uri(hazard_keywords.get())[0]
or default_source))
}
exposure_keywords = impact_report.impact_function.provenance[
]
header = resolve_from_dictionary(
provenance_format_args, )
provenance_format = resolve_from_dictionary(
provenance_format_args, )
exposure_provenance = {
: header,
: provenance_format.format(
layer_name=exposure_keywords.get(),
source=QgsDataSourceUri.removePassword(
decode_full_layer_uri(exposure_keywords.get())[0]
or default_source))
}
aggregation_keywords = impact_report.impact_function.provenance[
]
header = resolve_from_dictionary(
provenance_format_args, )
provenance_format = resolve_from_dictionary(
provenance_format_args, )
if aggregation_keywords:
provenance_string = provenance_format.format(
layer_name=aggregation_keywords.get(),
source=QgsDataSourceUri.removePassword(
decode_full_layer_uri(aggregation_keywords.get())[0]
or default_source))
else:
aggregation_not_used = resolve_from_dictionary(
extra_args, [, ])
provenance_string = aggregation_not_used
aggregation_provenance = {
: header,
: provenance_string
}
impact_function_name = impact_report.impact_function.name
header = resolve_from_dictionary(
provenance_format_args, )
provenance_format = resolve_from_dictionary(
provenance_format_args, )
impact_function_provenance = {
: header,
: provenance_format.format(
impact_function_name=impact_function_name,
reference=default_reference)
}
provenance_detail = OrderedDict()
provenance_detail[] = hazard_provenance
provenance_detail[] = exposure_provenance
provenance_detail[] = aggregation_provenance
provenance_detail[] = impact_function_provenance
analysis_details_header = resolve_from_dictionary(
extra_args, [, ])
context[] = component_metadata.key
context.update({
: analysis_details_header,
: provenance_detail
})
return context | Extracting simplified version of provenance details of layers.
This extractor will produce provenance details which will be displayed in
the main report.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0 |
def decode_response(content: bytes) -> set:
content = content[4:].decode(config.ENCODING)
if not in content and not in content:
return set()
connected_devices = set()
device_list = [i for i in content.split() if i]
for each_device in device_list:
device_id, device_status = each_device.split()
if device_status == :
connected_devices.add(device_id)
return connected_devices | adb response text -> device set |
def get_files(client, bucket, prefix=):
bucket = client.get_bucket(bucket)
files = list(bucket.list_blobs(prefix=prefix))
return files | Lists files/objects on a bucket.
TODO: docstring |
def build_config(config_file=get_system_config_directory()):
config = Config(config_file, allow_no_value=True)
application_versions = find_applications_on_system()
for item in application_versions.iteritems():
if not config.has_option(Config.EXECUTABLES, item[0]):
config.set(Config.EXECUTABLES, item[0], item[1])
return config | Construct the config object from necessary elements. |
def commit(self, offset=None, limit=None, dryrun=False):
self.stream.command = "rsync -avRK --files-from={path} {source} {destination}"
self.stream.append_tasks_to_streamlets(offset=offset, limit=limit)
self.stream.commit_streamlets()
self.stream.run_streamlets()
self.stream.reset_streamlet() | Start the rsync download |
def _record_sort_by_indicators(record):
for tag, fields in record.items():
record[tag] = _fields_sort_by_indicators(fields) | Sort the fields inside the record by indicators. |
def main():
"Process CLI arguments and call appropriate functions."
try:
args = docopt.docopt(__doc__, version=__about__.__version__)
except docopt.DocoptExit:
if len(sys.argv) > 1:
print(f"{Fore.RED}Invalid command syntax, "
f"check help:{Fore.RESET}\n")
print(__doc__)
sys.exit(1)
print_all = False
if not (args["--int-width"] or args["--int-height"] or args["--decimal"]):
print_all = True
width = float(args["WIDTH"])
height = float(args["HEIGHT"])
as_int_ = as_int(width, height)
as_float_ = as_float(width, height)
if args["--ndigits"]:
as_float_ = round(as_float_, int(args["--ndigits"]))
to_print = []
if args["--int-width"] or print_all:
to_print.append(f"{Fore.BLUE}{as_int_[0]!s}")
if args["--int-height"] or print_all:
to_print.append(f"{Fore.BLUE}{as_int_[1]!s}")
if args["--decimal"] or print_all:
to_print.append(f"{Fore.MAGENTA}{as_float_!s}")
print(" ".join(to_print)) | Process CLI arguments and call appropriate functions. |
def findSector(self,x,y):
m = x.size
x_pos_guess = (np.ones(m)*self.x_n/2).astype(int)
y_pos_guess = (np.ones(m)*self.y_n/2).astype(int)
violationCheck = lambda x_check,y_check,x_bound_1,y_bound_1,x_bound_2,y_bound_2 : (
(y_bound_2 - y_bound_1)*x_check - (x_bound_2 - x_bound_1)*y_check > x_bound_1*y_bound_2 - y_bound_1*x_bound_2 ) + 0
these = np.ones(m,dtype=bool)
max_loops = self.x_n + self.y_n
loops = 0
while np.any(these) and loops < max_loops:
x_temp = x[these]
y_temp = y[these]
xA = self.x_values[x_pos_guess[these],y_pos_guess[these]]
xB = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]]
xC = self.x_values[x_pos_guess[these],y_pos_guess[these]+1]
xD = self.x_values[x_pos_guess[these]+1,y_pos_guess[these]+1]
yA = self.y_values[x_pos_guess[these],y_pos_guess[these]]
yB = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]]
yC = self.y_values[x_pos_guess[these],y_pos_guess[these]+1]
yD = self.y_values[x_pos_guess[these]+1,y_pos_guess[these]+1]
move_down = (y_temp < np.minimum(yA,yB)) + 0
move_right = (x_temp > np.maximum(xB,xD)) + 0
move_up = (y_temp > np.maximum(yC,yD)) + 0
move_left = (x_temp < np.minimum(xA,xC)) + 0
c = (move_down + move_right + move_up + move_left) == 0
move_down[c] = violationCheck(x_temp[c],y_temp[c],xA[c],yA[c],xB[c],yB[c])
move_right[c] = violationCheck(x_temp[c],y_temp[c],xB[c],yB[c],xD[c],yD[c])
move_up[c] = violationCheck(x_temp[c],y_temp[c],xD[c],yD[c],xC[c],yC[c])
move_left[c] = violationCheck(x_temp[c],y_temp[c],xC[c],yC[c],xA[c],yA[c])
x_pos_next = x_pos_guess[these] - move_left + move_right
x_pos_next[x_pos_next < 0] = 0
x_pos_next[x_pos_next > (self.x_n-2)] = self.x_n-2
y_pos_next = y_pos_guess[these] - move_down + move_up
y_pos_next[y_pos_next < 0] = 0
y_pos_next[y_pos_next > (self.y_n-2)] = self.y_n-2
no_move = np.array(np.logical_and(x_pos_guess[these] == x_pos_next, y_pos_guess[these] == y_pos_next))
x_pos_guess[these] = x_pos_next
y_pos_guess[these] = y_pos_next
temp = these.nonzero()
these[temp[0][no_move]] = False
loops += 1
x_pos = x_pos_guess
y_pos = y_pos_guess
return x_pos, y_pos | Finds the quadrilateral "sector" for each (x,y) point in the input.
Only called as a subroutine of _evaluate().
Parameters
----------
x : np.array
Values whose sector should be found.
y : np.array
Values whose sector should be found. Should be same size as x.
Returns
-------
x_pos : np.array
Sector x-coordinates for each point of the input, of the same size.
y_pos : np.array
Sector y-coordinates for each point of the input, of the same size. |
def fo_pct_by_zone(self):
bz = self.by_zone
return {
t: {
z: bz[t][z][]/(1.0*bz[t][z][]) if bz[t][z][] else 0.0
for z in self.__zones
if z !=
}
for t in [ , ]
} | Get the by team face-off win % by zone. Format is
:returns: dict ``{ 'home/away': { 'off/def/neut': % } }`` |
def sub_path(self, path):
if not self.regular_expression:
return self.new_path
return re.sub(self.old_path, self.new_path, path) | If this redirect is a regular expression, it will return a
rewritten version of `path`; otherwise returns the `new_path`. |
def check_terminate(self):
if not self._has_run:
return False
else:
terminate = self.check_completion()
terminate |= (self._num_iter >= self.max_iter)
return terminate | Returns a Bool of whether to terminate.
Checks whether a satisfactory minimum has been found or whether
too many iterations have occurred. |
def add(self,dist):
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key]) | Add `dist` if we ``can_add()`` it and it isn't already added |
def override_temp(replacement):
if not os.path.isdir(replacement):
os.makedirs(replacement)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved | Monkey-patch tempfile.tempdir with replacement, ensuring it exists |
def get_tracks(self, catalog, cache=True):
if not (cache and ( in self.cache) and (catalog in [td[] for td in self.cache[]])):
kwargs = {
:[, % catalog],
}
response = self.get_attribute(, **kwargs)
if not in self.cache:
self.cache[] = []
| Get the tracks for a song given a catalog.
Args:
catalog (str): a string representing the catalog whose track you want to retrieve.
Returns:
A list of Track dicts.
Example:
>>> s = song.Song('SOWDASQ12A6310F24F')
>>> s.get_tracks('7digital')[0]
{u'catalog': u'7digital',
u'foreign_id': u'7digital:track:8445818',
u'id': u'TRJGNNY12903CC625C',
u'preview_url': u'http://previews.7digital.com/clips/34/8445818.clip.mp3',
u'release_image': u'http://cdn.7static.com/static/img/sleeveart/00/007/628/0000762838_200.jpg'}
>>> |
def collect(self, name, arr):
name = py_str(name)
if self.include_layer is not None and not self.include_layer(name):
return
handle = ctypes.cast(arr, NDArrayHandle)
arr = NDArray(handle, writable=False).copyto(cpu())
if self.logger is not None:
self.logger.info("Collecting layer %s output of shape %s" % (name, arr.shape))
if name in self.nd_dict:
self.nd_dict[name].append(arr)
else:
self.nd_dict[name] = [arr] | Callback function for collecting layer output NDArrays. |
def mergecn(args):
p = OptionParser(mergecn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
samples = [x.replace("-cn", "").strip().strip("/") for x in open(csvfile)]
betadir = "beta"
mkdir(betadir)
for seqid in allsomes:
names = [op.join(s + "-cn", "{}.{}.cn".
format(op.basename(s), seqid)) for s in samples]
arrays = [np.fromfile(name, dtype=np.float) for name in names]
shapes = [x.shape[0] for x in arrays]
med_shape = np.median(shapes)
arrays = [x for x in arrays if x.shape[0] == med_shape]
ploidy = 2 if seqid not in ("chrY", "chrM") else 1
if seqid in sexsomes:
chr_med = [np.median([x for x in a if x > 0]) for a in arrays]
chr_med = np.array(chr_med)
idx = get_kmeans(chr_med, k=2)
zero_med = np.median(chr_med[idx == 0])
one_med = np.median(chr_med[idx == 1])
logging.debug("K-means with {} c0:{} c1:{}"
.format(seqid, zero_med, one_med))
higher_idx = 1 if one_med > zero_med else 0
arrays = np.array(arrays)[idx == higher_idx]
arrays = [[x] for x in arrays]
ar = np.concatenate(arrays)
print(seqid, ar.shape)
rows, columns = ar.shape
beta = []
std = []
for j in xrange(columns):
a = ar[:, j]
beta.append(np.median(a))
std.append(np.std(a) / np.mean(a))
beta = np.array(beta) / ploidy
betafile = op.join(betadir, "{}.beta".format(seqid))
beta.tofile(betafile)
stdfile = op.join(betadir, "{}.std".format(seqid))
std = np.array(std)
std.tofile(stdfile)
logging.debug("Written to `{}`".format(betafile))
ar.tofile("{}.bin".format(seqid)) | %prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another. |
def add_event(self, event):
self._events.append(event)
self._events_by_name[event.get_name] = event | Adds an IEvent event to this command set.
:param event: an event instance to be added |
def declare_func(self, id_, lineno, type_=None):
if not self.check_class(id_, , lineno):
entry = self.get_entry(id_)
an = if entry.class_.lower()[0] in else
syntax_error(lineno, " already declared as %s %s at %i" % (id_, an, entry.class_, entry.lineno))
return None
entry = self.get_entry(id_)
if entry is not None:
if entry.declared and not entry.forwarded:
syntax_error(lineno, "Duplicate function name , previously defined at %i" % (id_, entry.lineno))
return None
if entry.class_ != CLASS.unknown and entry.callable is False:
syntax_error_not_array_nor_func(lineno, id_)
return None
if id_[-1] in DEPRECATED_SUFFIXES and entry.type_ != self.basic_types[SUFFIX_TYPE[id_[-1]]]:
syntax_error_func_type_mismatch(lineno, entry)
if entry.token == :
symbols.VAR.to_function(entry, lineno=lineno)
entry.mangled = % (self.mangle, entry.name)
else:
entry = self.declare(id_, lineno, symbols.FUNCTION(id_, lineno, type_=type_))
if entry.forwarded:
entry.forwared = False
old_type = entry.type_
if entry.type_ is not None:
if entry.type_ != old_type:
syntax_error_func_type_mismatch(lineno, entry)
else:
entry.type_ = old_type
else:
entry.params_size = 0
entry.locals_size = 0
return entry | Declares a function in the current scope.
Checks whether the id exist or not (error if exists).
And creates the entry at the symbol table. |
def with_tz(request):
dt = datetime.now()
t = Template()
c = RequestContext(request)
response = t.render(c)
return HttpResponse(response) | Get the time with TZ enabled |
def place_new_order(self, stock, price, qty, direction, order_type):
url_fragment = .format(
venue=self.venue,
stock=stock,
)
data = {
"stock": stock,
"price": price,
"venue": self.venue,
"account": self.account,
"qty": qty,
"direction": direction,
"orderType": order_type,
}
url = urljoin(self.base_url, url_fragment)
resp = self.session.post(url, json=data)
return resp.json() | Place an order for a stock.
https://starfighter.readme.io/docs/place-new-order |
def pretty(price, currency, *, abbrev=True, trim=True):
currency = validate_currency(currency)
price = validate_price(price)
space = if nospace(currency) else
fmtstr =
if trim:
fmtstr = .format(price, x=decimals(currency)).rstrip().rstrip()
else:
fmtstr = .format(price).rstrip().rstrip()
if abbrev:
if issuffix(currency):
return fmtstr + space + symbol(currency)
return symbol(currency, native=False) + space + fmtstr
return fmtstr + + code(currency) | return format price with symbol. Example format(100, 'USD') return '$100'
pretty(price, currency, abbrev=True, trim=False)
abbrev:
True: print value + symbol. Symbol can either be placed before or after value
False: print value + currency code. currency code is placed behind value
trim:
True: trim float value to the maximum digit numbers of that currency
False: keep number of decimal in initial argument |
def _recv_ack(self, method_frame):
if self._ack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple = method_frame.args.read_bit()
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._ack_listener(self._last_ack_id)
else:
self._last_ack_id = delivery_tag
self._ack_listener(self._last_ack_id) | Receive an ack from the broker. |
def bbox(self):
return self.left, self.top, self.right, self.bottom | BBox |
def inverse(d):
output = {}
for k, v in unwrap(d).items():
output[v] = output.get(v, [])
output[v].append(k)
return output | reverse the k:v pairs |
def del_object_from_parent(self):
if self.parent:
self.parent.objects.pop(self.ref) | Delete object from parent object. |
def parse_post(self, response):
content = json.loads(response.body.decode(), encoding=)
post = response.meta[]
post[] = content.get(, )
if not all([post[]]):
raise ValueError()
post[] = html.escape(content.get(, ))
if not all([post[]]):
raise ValueError(.format(post.get()))
if content.get() == 1:
self.logger.warn(.format(post[]))
return post
soup = BeautifulSoup(content.get(, ), )
author_obj = soup.select()
self.logger.debug(author_obj)
if author_obj:
author_list = []
for author in author_obj:
author_list.append(
author.string.rstrip().replace(, ))
author_list = list(set(author_list))
post[] = html.escape(.join(author_list))
post[] = str(soup.div)
image_back = content.get(, [None])[0]
if image_back:
post[][] = \
content.get(, image_back)
self.logger.debug(post) | 根据 :meth:`.ZhihuDailySpider.parse` 中生成的具体文章地址,获取到文章内容,
并对其进行格式化处理,结果填充到对象属性 ``item_list`` 中
:param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象 |
def _call_and_store(getter_func, data, field_name, error_store, index=None):
try:
value = getter_func(data)
except ValidationError as err:
error_store.store_error(err.messages, field_name, index=index)
return err.valid_data or missing
return value | Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
:param callable getter_func: Function for getting the serialized/deserialized
value from ``data``.
:param data: The data passed to ``getter_func``.
:param str field_name: Field name.
:param int index: Index of the item being validated, if validating a collection,
otherwise `None`. |
def fetchall(self):
result = self.query.result()
return [row.values() for row in result] | Fetch all rows. |
async def start(request):
global session
try:
body = await request.json()
except json.decoder.JSONDecodeError:
log.debug("No body in {}".format(request))
body = {}
if not session or body.get():
hardware = hw_from_req(request)
if body.get() and session:
await release(data={})
session = SessionManager(hardware)
res = init_pipette()
if res:
status = 201
data = {: session.id, : res}
else:
session = None
status = 403
data = {: }
else:
data = {:
}
status = 409
return web.json_response(data, status=status) | Begins the session manager for factory calibration, if a session is not
already in progress, or if the "force" key is specified in the request. To
force, use the following body:
{
"force": true
}
:return: The current session ID token or an error message |
def parse(self, data):
self.binding_var_count = 0
self.segment_count = 0
segments = self.parser.parse(data)
path_wildcard = False
for segment in segments:
if segment.kind == _TERMINAL and segment.literal == :
if path_wildcard:
raise ValidationException(
)
path_wildcard = True
return segments | Returns a list of path template segments parsed from data.
Args:
data: A path template string.
Returns:
A list of _Segment. |
def get_client(client_id):
client = Client.query.get(client_id)
if client and client.user.active:
return client | Load the client.
Needed for grant_type client_credentials.
Add support for OAuth client_credentials access type, with user
inactivation support.
:param client_id: The client ID.
:returns: The client instance or ``None``. |
def getMechanismName(self):
if self._server_side:
mech = self._authenticator.current_mech
return mech.getMechanismName() if mech else None
else:
return getattr(self._authenticator, , None) | Return the authentication mechanism name. |
def write_object_array(f, data, options):
ref_dtype = h5py.special_dtype(ref=h5py.Reference)
data_refs = np.zeros(shape=data.shape, dtype=)
try:
dset_a = grp2[]
if dset_a.shape != (2,) \
or not dset_a.dtype.name.startswith() \
or np.any(dset_a[...] != np.uint64([0, 0])) \
or get_attribute_string(dset_a, ) != \
\
or get_attribute(dset_a, ) != 1:
del grp2[]
dset_a = grp2.create_dataset(, data=np.uint64([0, 0]))
set_attribute_string(dset_a, ,
)
set_attribute(dset_a, ,
np.uint8(1))
except:
dset_a = grp2.create_dataset(, data=np.uint64([0, 0]))
set_attribute_string(dset_a, ,
)
set_attribute(dset_a, ,
np.uint8(1))
grp2name = grp2.name
for index, x in np.ndenumerate(data):
name_for_ref = next_unused_name_in_group(grp2, 16)
write_data(f, grp2, name_for_ref, x, None, options)
try:
dset = grp2[name_for_ref]
data_refs[index] = dset.ref
if options.matlab_compatible:
set_attribute_string(dset,
, grp2name)
else:
del_attribute(dset, )
except:
data_refs[index] = dset_a.ref
return data_refs.astype(ref_dtype).copy() | Writes an array of objects recursively.
Writes the elements of the given object array recursively in the
HDF5 Group ``options.group_for_references`` and returns an
``h5py.Reference`` array to all the elements.
Parameters
----------
f : h5py.File
The HDF5 file handle that is open.
data : numpy.ndarray of objects
Numpy object array to write the elements of.
options : hdf5storage.core.Options
hdf5storage options object.
Returns
-------
obj_array : numpy.ndarray of h5py.Reference
A reference array pointing to all the elements written to the
HDF5 file. For those that couldn't be written, the respective
element points to the canonical empty.
Raises
------
TypeNotMatlabCompatibleError
If writing a type not compatible with MATLAB and
`options.action_for_matlab_incompatible` is set to ``'error'``.
See Also
--------
read_object_array
hdf5storage.Options.group_for_references
h5py.Reference |
def train(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
job = train_async(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud)
job.wait()
print(job.state) | Blocking version of train_async(). The only difference is that it blocks the caller
until the job finishes, and it does not have a return value. |
def validate_arg_values(ast, bo):
if not bo.api_url:
log.info("No API endpoint defined")
return bo
log.debug(f"AST: {ast}")
if isinstance(ast, NSArg):
term_id = "{}:{}".format(ast.namespace, ast.value)
value_types = ast.value_types
log.debug(f"Value types: {value_types} AST value: {ast.value}")
if ast.namespace == "DEFAULT":
for value_type in value_types:
default_namespace = [
ns["name"] for ns in bo.spec["namespaces"][value_type]["info"]
] + [
ns["abbreviation"]
for ns in bo.spec["namespaces"][value_type]["info"]
]
if ast.value in default_namespace:
log.debug("Default namespace valid term: {}".format(term_id))
break
else:
log.debug(
f
)
if (
len(
set(ast.value_types).intersection(
result.get("entity_types", [])
)
)
== 0
):
log.debug(
"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format(
term_id, ast.value_types, result.get("entity_types", [])
)
)
bo.validation_messages.append(
(
"WARNING",
"Invalid Term - statement term {} allowable entity types: {} do not match API term entity types: {}".format(
term_id, ast.value_types, result.get("entity_types", [])
),
)
)
if term_id in result.get("obsolete_ids", []):
bo.validation_messages.append(
(
"WARNING",
f,
)
)
elif r.status_code == 404:
bo.validation_messages.append(
("WARNING", f"Term: {term_id} not found in namespace")
)
else:
log.error(f"Status {r.status_code} - Bad URL: {request_url}")
if isinstance(ast, StrArg):
log.debug(f" Check String Arg: {ast.value} {ast.value_types}")
for value_type in ast.value_types:
if re.match("/", value_type):
value_type = re.sub("^/", "", value_type)
value_type = re.sub("/$", "", value_type)
match = re.match(value_type, ast.value)
if match:
break
if value_type in bo.spec["namespaces"]:
default_namespace = [
ns["name"] for ns in bo.spec["namespaces"][value_type]["info"]
] + [
ns["abbreviation"]
for ns in bo.spec["namespaces"][value_type]["info"]
]
if ast.value in default_namespace:
break
else:
bo.validation_messages.append(
(
"WARNING",
f"String value {ast.value} does not match default namespace value or regex pattern: {ast.value_types}",
)
)
if hasattr(ast, "args"):
for arg in ast.args:
validate_arg_values(arg, bo)
return bo | Recursively validate arg (NSArg and StrArg) values
Check that NSArgs are found in BELbio API and match appropriate entity_type.
Check that StrArgs match their value - either default namespace or regex string
Generate a WARNING if not.
Args:
bo: bel object
Returns:
bel object |
def get_num_sig(self, alpha=0.05):
ctr = cx.Counter()
flds = set([, , , ])
for ntd in self.nts:
for fld in flds:
if getattr(ntd, fld) < alpha:
ctr[fld] += 1
return ctr | Print the number of significant results using various metrics. |
def limit(self, limit):
params = join_params(self.parameters, {"limit": limit})
return self.__class__(**params) | Set absolute limit on number of images to return, or set to None to return
as many results as needed; default 50 posts. |
def _gettype(self):
current = self
lastname = getattr(current._parser, , None)
while hasattr(current, ):
current = current._sub
tn = getattr(current._parser, , None)
if tn is not None:
lastname = tn
return lastname | Return current type of this struct
:returns: a typedef object (e.g. nstruct) |
def wo_resp(self, resp):
if self._data is not None:
resp[] = self.to_str(self._data)
return self.wo_json(resp) | can override for other style |
def set_attrs(self, username, attrs):
ldap_client = self._bind()
tmp = self._get_user(self._byte_p2(username), ALL_ATTRS)
if tmp is None:
raise UserDoesntExist(username, self.backend_name)
dn = self._byte_p2(tmp[0])
old_attrs = tmp[1]
for attr in attrs:
bcontent = self._byte_p2(attrs[attr])
battr = self._byte_p2(attr)
new = {battr: self._modlist(self._byte_p3(bcontent))}
if attr.lower() == self.dn_user_attr.lower():
ldap_client.rename_s(
dn,
ldap.dn.dn2str([[(battr, bcontent, 1)]])
)
dn = ldap.dn.dn2str(
[[(battr, bcontent, 1)]] + ldap.dn.str2dn(dn)[1:]
)
else:
if attr in old_attrs:
if type(old_attrs[attr]) is list:
tmp = []
for value in old_attrs[attr]:
tmp.append(self._byte_p2(value))
bold_value = tmp
else:
bold_value = self._modlist(
self._byte_p3(old_attrs[attr])
)
old = {battr: bold_value}
else:
old = {}
ldif = modlist.modifyModlist(old, new)
if ldif:
try:
ldap_client.modify_s(dn, ldif)
except Exception as e:
ldap_client.unbind_s()
self._exception_handler(e)
ldap_client.unbind_s() | set user attributes |
def to_graph_tool(self):
return gt | Converts this Graph object to a graph_tool-compatible object.
Requires the graph_tool library.
Note that the internal ordering of graph_tool seems to be column-major. |
def rfdist_task(newick_string_a, newick_string_b, normalise, min_overlap=4, overlap_fail_value=0):
tree_a = Tree(newick_string_a)
tree_b = Tree(newick_string_b)
return treedist.rfdist(tree_a, tree_b, normalise, min_overlap, overlap_fail_value) | Distributed version of tree_distance.rfdist
Parameters: two valid newick strings and a boolean |
def sanitize(s, strict=True):
allowed = .join(
[
,
,
,
]
)
if not strict:
allowed +=
s = str(s).replace(, )
return .join([i for i in s if i in allowed]) | Sanitize a string.
Spaces are converted to underscore; if strict=True they are then removed.
Parameters
----------
s : str
String to sanitize
strict : bool
If True, only alphanumeric characters are allowed. If False, a limited
set of additional characters (-._) will be allowed. |
def permutation_entropy(x, n, tau):
PeSeq = []
Em = embed_seq(x, tau, n)
for i in range(0, len(Em)):
r = []
z = []
for j in range(0, len(Em[i])):
z.append(Em[i][j])
for j in range(0, len(Em[i])):
z.sort()
r.append(z.index(Em[i][j]))
z[z.index(Em[i][j])] = -1
PeSeq.append(r)
RankMat = []
while len(PeSeq) > 0:
RankMat.append(PeSeq.count(PeSeq[0]))
x = PeSeq[0]
for j in range(0, PeSeq.count(PeSeq[0])):
PeSeq.pop(PeSeq.index(x))
RankMat = numpy.array(RankMat)
RankMat = numpy.true_divide(RankMat, RankMat.sum())
EntropyMat = numpy.multiply(numpy.log2(RankMat), RankMat)
PE = -1 * EntropyMat.sum()
return PE | Compute Permutation Entropy of a given time series x, specified by
permutation order n and embedding lag tau.
Parameters
----------
x
list
a time series
n
integer
Permutation order
tau
integer
Embedding lag
Returns
----------
PE
float
permutation entropy
Notes
----------
Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)].
We first build embedding matrix Em, of dimension(n*N-n+1),
such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence
the embedding lag and the embedding dimension are 1 and n
respectively. We build this matrix from a given time series,
X, by calling pyEEg function embed_seq(x,1,n).
We then transform each row of the embedding matrix into
a new sequence, comprising a set of integers in range of 0,..,n-1.
The order in which the integers are placed within a row is the
same as those of the original elements:0 is placed where the smallest
element of the row was and n-1 replaces the largest element of the row.
To calculate the Permutation entropy, we calculate the entropy of PeSeq.
In doing so, we count the number of occurrences of each permutation
in PeSeq and write it in a sequence, RankMat. We then use this sequence to
calculate entropy by using Shannon's entropy formula.
Permutation entropy is usually calculated with n in range of 3 and 7.
References
----------
Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural
complexity measure for time series." Physical Review Letters 88.17
(2002): 174102.
Examples
----------
>>> import pyeeg
>>> x = [1,2,4,5,12,3,4,5]
>>> pyeeg.permutation_entropy(x,5,1)
2.0 |
def load_sampleset(self, f, name):
self.encoder_x = np.array(f[name + ])
self.decoder_x = np.array(f[name + ])
self.decoder_y = np.array(f[name + ]) | Read the sampleset from using the HDF5 format. Name is usually in {train, test}. |
def setParentAnalysisRequest(self, value):
self.Schema().getField("ParentAnalysisRequest").set(self, value)
if not value:
noLongerProvides(self, IAnalysisRequestPartition)
else:
alsoProvides(self, IAnalysisRequestPartition) | Sets a parent analysis request, making the current a partition |
def fill(self, *args):
a,b = args[:2]
assert a in (,,), %a
assert b in (,,), %b
if len(args) == 3:
args = color_args(args, 2)
else:
args = color_args(args, 3,5)
self.fills.append(.join(map(str,args)))
return self | Apply a solid fill to your chart
args are of the form <fill type>,<fill style>,...
fill type must be one of c,bg,a
fill style must be one of s,lg,ls
the rest of the args refer to the particular style
APIPARAM: chf |
def get_img_attrs(self, style=None, **kwargs):
add = {}
if in kwargs:
attr_prefixes = kwargs.get()
if isinstance(kwargs[], str):
attr_prefixes = [attr_prefixes]
for prefix in attr_prefixes:
for k, val in kwargs.items():
if k.startswith(prefix):
add[k[len(prefix):]] = val
return self._get_img_attrs(style, {**kwargs, **add}) | Get an attribute list (src, srcset, style, et al) for the image.
style -- an optional list of CSS style fragments
Returns: a dict of attributes e.g. {'src':'foo.jpg','srcset':'foo.jpg 1x, bar.jpg 2x'] |
def get_by_index(self, i):
if i >= self.n:
raise ValueError( % i +
% self.n)
return self._gene_sets[self._gene_set_ids[i]] | Look up a gene set by its index.
Parameters
----------
i: int
The index of the gene set.
Returns
-------
GeneSet
The gene set.
Raises
------
ValueError
If the given index is out of bounds. |
def getLabel(self):
if self._state & self.ST_CLEAN:
return self._label
else:
raise SmiError(
% self.__class__.__name__) | Returns symbolic path to this MIB variable.
Meaning a sequence of symbolic identifications for each of parent
MIB objects in MIB tree.
Returns
-------
tuple
sequence of names of nodes in a MIB tree from the top of the tree
towards this MIB variable.
Raises
------
SmiError
If MIB variable conversion has not been performed.
Notes
-----
Returned sequence may not contain full path to this MIB variable
if some symbols are now known at the moment of MIB look up.
Examples
--------
>>> objectIdentity = ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)
>>> objectIdentity.resolveWithMib(mibViewController)
>>> objectIdentity.getOid()
('iso', 'org', 'dod', 'internet', 'mgmt', 'mib-2', 'system', 'sysDescr')
>>> |
def open(self):
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0) | initialize visit variables and statistics |
def _check_for_fail_message(self, transport, exc_info, timeout):
try:
transport.read_message(timeout)
except usb_exceptions.CommonUsbError:
if sys.exc_info()[0] is usb_exceptions.AdbRemoteError:
raise
raise_with_traceback(exc_info[0](exc_info[1]), traceback=exc_info[2]) | Check for a 'FAIL' message from transport.
This method always raises, if 'FAIL' was read, it will raise an
AdbRemoteError with the message, otherwise it will raise based on
exc_info, which should be a tuple as per sys.exc_info().
Args:
transport: Transport from which to read for a 'FAIL' message.
exc_info: Exception info to raise if no 'FAIL' is read.
timeout: Timeout to use for the read operation.
Raises:
AdbRemoteError: If a 'FAIL' is read, otherwise raises exc_info. |
def config_parse(files=None, config=None, config_profile=".fissconfig", **kwargs):
local_config = config
config = __fcconfig
cfgparser = configparser.SafeConfigParser()
filenames = list()
filenames.append(os.path.join(os.path.expanduser(), config_profile))
filenames.append(os.path.join(os.getcwd(), config_profile))
if files:
if isinstance(files, string_types):
filenames.append(files)
elif isinstance(files, Iterable):
for f in files:
if isinstance(f, IOBase):
f = f.name
filenames.append(f)
cfgparser.read(filenames)
for keyval in cfgparser.items():
__fcconfig[keyval[0]] = keyval[1]
for section in cfgparser.sections():
config[section] = attrdict()
for option in cfgparser.options(section):
if not config[option]:
config[section][option] = cfgparser.get(section, option)
config.verbosity = int(config.verbosity)
if not config.root_url.endswith():
config.root_url +=
if os.path.isfile(config.credentials):
os.environ[environment_vars.CREDENTIALS] = config.credentials
if local_config is not None:
for key, value in local_config.items():
config[key] = value
for key, value in kwargs.items():
config[key] = value
return config | Read initial configuration state, from named config files; store
this state within a config dictionary (which may be nested) whose keys may
also be referenced as attributes (safely, defaulting to None if unset). A
config object may be passed in, as a way of accumulating or overwriting
configuration state; if one is NOT passed, the default config obj is used |
def get_design_document(self, ddoc_id):
ddoc = DesignDocument(self, ddoc_id)
try:
ddoc.fetch()
except HTTPError as error:
if error.response.status_code != 404:
raise
return ddoc | Retrieves a design document. If a design document exists remotely
then that content is wrapped in a DesignDocument object and returned
to the caller. Otherwise a "shell" DesignDocument object is returned.
:param str ddoc_id: Design document id
:returns: A DesignDocument instance, if exists remotely then it will
be populated accordingly |
def profiling(self):
self._log_profiler_started()
while self.clients:
try:
self.profiler.start()
except RuntimeError:
pass
yield
self.profiler.stop()
result = self.profiler.result()
data = pack_msg(RESULT, result,
pickle_protocol=self.pickle_protocol)
self._latest_result_data = data
closed_clients = []
for client in self.clients:
try:
self._send(client, data)
except socket.error as exc:
if exc.errno == EPIPE:
closed_clients.append(client)
del data
for client in closed_clients:
self.disconnected(client)
self._log_profiler_stopped() | A generator which profiles then broadcasts the result. Implement
sleeping loop using this::
def profile_periodically(self):
for __ in self.profiling():
time.sleep(self.interval) |
def get_plan(self, nodes=None):
if nodes:
plan = self.graph.resolve_nodes(nodes)
else:
plan = self.graph.resolve_node()
return plan | Retrieve a plan, e.g. a list of fixtures to be loaded sorted on
dependency.
:param list nodes: list of nodes to be loaded.
:return: |
def to_ufo_paths(self, ufo_glyph, layer):
pen = ufo_glyph.getPointPen()
for path in layer.paths:
nodes = list(path.nodes)
for node in nodes:
self.to_ufo_node_user_data(ufo_glyph, node)
pen.beginPath()
if not nodes:
pen.endPath()
continue
if not path.closed:
node = nodes.pop(0)
assert node.type == "line", "Open path starts with off-curve points"
pen.addPoint(tuple(node.position), segmentType="move")
else:
nodes.insert(0, nodes.pop())
for node in nodes:
node_type = _to_ufo_node_type(node.type)
pen.addPoint(
tuple(node.position), segmentType=node_type, smooth=node.smooth
)
pen.endPath() | Draw .glyphs paths onto a pen. |
def get(self,path):
path = path.upper()
if path in self._configCache:
return self._configCache[path]
else :
return self._findConfig(path) | permet de récupérer une config
Args:
path (String): Nom d'une config
Returns:
type: String
la valeur de la config ou None |
def work_cancel(self, hash):
hash = self._process_value(hash, )
payload = {"hash": hash}
resp = self.call(, payload)
return resp == {} | Stop generating **work** for block
.. enable_control required
:param hash: Hash to stop generating work for
:type hash: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.work_cancel(
... hash="718CC2121C3E641059BC1C2CFC45666C99E8AE922F7A807B7D07B62C995D79E2"
... )
True |
def new(self):
ori = self.original.action
if isinstance(ori, (
types.ChannelAdminLogEventActionChangeAbout,
types.ChannelAdminLogEventActionChangeTitle,
types.ChannelAdminLogEventActionChangeUsername,
types.ChannelAdminLogEventActionToggleInvites,
types.ChannelAdminLogEventActionTogglePreHistoryHidden,
types.ChannelAdminLogEventActionToggleSignatures
)):
return ori.new_value
elif isinstance(ori, types.ChannelAdminLogEventActionChangePhoto):
return ori.new_photo
elif isinstance(ori, types.ChannelAdminLogEventActionChangeStickerSet):
return ori.new_stickerset
elif isinstance(ori, types.ChannelAdminLogEventActionEditMessage):
return ori.new_message
elif isinstance(ori, (
types.ChannelAdminLogEventActionParticipantToggleAdmin,
types.ChannelAdminLogEventActionParticipantToggleBan
)):
return ori.new_participant
elif isinstance(ori, types.ChannelAdminLogEventActionParticipantInvite):
return ori.participant
elif isinstance(ori, types.ChannelAdminLogEventActionDefaultBannedRights):
return ori.new_banned_rights
elif isinstance(ori, types.ChannelAdminLogEventActionStopPoll):
return ori.message | The new value present in the event. |
def add_namespace_uri(self, ns_uri, prefix=None, schema_location=None):
assert ns_uri
if ns_uri in self.__ns_uri_map:
ni = self.__lookup_uri(ns_uri)
new_ni = copy.deepcopy(ni)
if prefix:
self.__check_prefix_conflict(ni, prefix)
new_ni.prefixes.add(prefix)
self.__merge_schema_locations(new_ni, schema_location)
for p in new_ni.prefixes:
self.__prefix_map[p] = new_ni
self.__ns_uri_map[new_ni.uri] = new_ni
else:
if prefix:
self.__check_prefix_conflict(ns_uri, prefix)
ni = _NamespaceInfo(ns_uri, prefix, schema_location)
self.__add_namespaceinfo(ni) | Adds a new namespace to this set, optionally with a prefix and
schema location URI.
If the namespace already exists, the given prefix and schema location
are merged with the existing entry:
* If non-None, ``prefix`` is added to the set. The preferred
prefix is not modified.
* If a schema location is not already associated with the
namespace, it is set to ``schema_location`` (if given).
If the namespace doesn't already exist in this set (so a new one is
being created) and a prefix is given, that prefix becomes preferred.
If not given, a preference as a default namespace is used.
Args:
ns_uri (str): The URI of the new namespace
prefix (str): The desired prefix for the new namespace (optional)
schema_location (str): The desired schema location for the new
namespace (optional).
Raises:
DuplicatePrefixError: If a prefix is given which already maps to a
different namespace
ConflictingSchemaLocationError: If a schema location is given and
the namespace already exists in this set with a different
schema location. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.