code stringlengths 70 11.9k | docstring stringlengths 4 7.08k | text stringlengths 128 15k |
|---|---|---|
def compute_acf(cls, filename, start_index=None, end_index=None,
per_walker=False, walkers=None, parameters=None,
temps=None):
acfs = {}
with cls._io(filename, ) as fp:
if parameters is None:
parameters = fp.variable_params
if isinstance(parameters, str) or isinstance(parameters, unicode):
parameters = [parameters]
if isinstance(temps, int):
temps = [temps]
elif temps == :
temps = numpy.arange(fp.ntemps)
elif temps is None:
temps = [0]
for param in parameters:
subacfs = []
for tk in temps:
if per_walker:
if walkers is None:
walkers = numpy.arange(fp.nwalkers)
arrays = [cls.compute_acfs(filename,
start_index=start_index,
end_index=end_index,
per_walker=False,
walkers=ii,
parameters=param,
temps=tk)[param][0, :]
for ii in walkers]
subacfs.append(numpy.vstack(arrays))
else:
samples = fp.read_raw_samples(
param, thin_start=start_index,
thin_interval=1, thin_end=end_index,
walkers=walkers, temps=tk, flatten=False)[param]
samples = samples.mean(axis=1)[0, :]
thisacf = autocorrelation.calculate_acf(
samples).numpy()
subacfs.append(thisacf)
acfs[param] = numpy.stack(subacfs)
return acfs | Computes the autocorrleation function of the model params in the
given file.
By default, parameter values are averaged over all walkers at each
iteration. The ACF is then calculated over the averaged chain for each
temperature. An ACF per-walker will be returned instead if
``per_walker=True``.
Parameters
-----------
filename : str
Name of a samples file to compute ACFs for.
start_index : {None, int}
The start index to compute the acl from. If None, will try to use
the number of burn-in iterations in the file; otherwise, will start
at the first sample.
end_index : {None, int}
The end index to compute the acl to. If None, will go to the end
of the current iteration.
per_walker : optional, bool
Return the ACF for each walker separately. Default is False.
walkers : optional, int or array
Calculate the ACF using only the given walkers. If None (the
default) all walkers will be used.
parameters : optional, str or array
Calculate the ACF for only the given parameters. If None (the
default) will calculate the ACF for all of the model params.
temps : optional, (list of) int or 'all'
The temperature index (or list of indices) to retrieve. If None
(the default), the ACF will only be computed for the coldest (= 0)
temperature chain. To compute an ACF for all temperates pass 'all',
or a list of all of the temperatures.
Returns
-------
dict :
Dictionary of arrays giving the ACFs for each parameter. If
``per-walker`` is True, the arrays will have shape
``ntemps x nwalkers x niterations``. Otherwise, the returned array
will have shape ``ntemps x niterations``. | ### Input:
Computes the autocorrleation function of the model params in the
given file.
By default, parameter values are averaged over all walkers at each
iteration. The ACF is then calculated over the averaged chain for each
temperature. An ACF per-walker will be returned instead if
``per_walker=True``.
Parameters
-----------
filename : str
Name of a samples file to compute ACFs for.
start_index : {None, int}
The start index to compute the acl from. If None, will try to use
the number of burn-in iterations in the file; otherwise, will start
at the first sample.
end_index : {None, int}
The end index to compute the acl to. If None, will go to the end
of the current iteration.
per_walker : optional, bool
Return the ACF for each walker separately. Default is False.
walkers : optional, int or array
Calculate the ACF using only the given walkers. If None (the
default) all walkers will be used.
parameters : optional, str or array
Calculate the ACF for only the given parameters. If None (the
default) will calculate the ACF for all of the model params.
temps : optional, (list of) int or 'all'
The temperature index (or list of indices) to retrieve. If None
(the default), the ACF will only be computed for the coldest (= 0)
temperature chain. To compute an ACF for all temperates pass 'all',
or a list of all of the temperatures.
Returns
-------
dict :
Dictionary of arrays giving the ACFs for each parameter. If
``per-walker`` is True, the arrays will have shape
``ntemps x nwalkers x niterations``. Otherwise, the returned array
will have shape ``ntemps x niterations``.
### Response:
def compute_acf(cls, filename, start_index=None, end_index=None,
per_walker=False, walkers=None, parameters=None,
temps=None):
acfs = {}
with cls._io(filename, ) as fp:
if parameters is None:
parameters = fp.variable_params
if isinstance(parameters, str) or isinstance(parameters, unicode):
parameters = [parameters]
if isinstance(temps, int):
temps = [temps]
elif temps == :
temps = numpy.arange(fp.ntemps)
elif temps is None:
temps = [0]
for param in parameters:
subacfs = []
for tk in temps:
if per_walker:
if walkers is None:
walkers = numpy.arange(fp.nwalkers)
arrays = [cls.compute_acfs(filename,
start_index=start_index,
end_index=end_index,
per_walker=False,
walkers=ii,
parameters=param,
temps=tk)[param][0, :]
for ii in walkers]
subacfs.append(numpy.vstack(arrays))
else:
samples = fp.read_raw_samples(
param, thin_start=start_index,
thin_interval=1, thin_end=end_index,
walkers=walkers, temps=tk, flatten=False)[param]
samples = samples.mean(axis=1)[0, :]
thisacf = autocorrelation.calculate_acf(
samples).numpy()
subacfs.append(thisacf)
acfs[param] = numpy.stack(subacfs)
return acfs |
def servers(self):
self.__init()
items = []
for k,v in self._json_dict.items():
if k == "servers":
for s in v:
if in s:
url = "%s/%s" % (self.root, s[])
items.append(
self.Server(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
del k,v
return items | gets all the server resources | ### Input:
gets all the server resources
### Response:
def servers(self):
self.__init()
items = []
for k,v in self._json_dict.items():
if k == "servers":
for s in v:
if in s:
url = "%s/%s" % (self.root, s[])
items.append(
self.Server(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
del k,v
return items |
def run(self,
max_iter=10,
verbose=True,
projection=,
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance=,
preprocessing=,
proj_kwargs={}):
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype=).tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print()
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print( + str(i) + + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns[] = ranked_genes
self.adata.obsm[] = wPCA_data
self.adata.uns[] = {}
self.adata.uns[][] = EDM
if(projection == ):
print()
self.run_tsne(**proj_kwargs)
elif(projection == ):
print()
self.run_umap(**proj_kwargs)
elif(projection == ):
print()
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print( + str(elapsed) + ) | Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions. | ### Input:
Runs the Self-Assembling Manifold algorithm.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when constructing cell distance
matrices. Can be any of the distance metrics supported by
sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
stopping_condition - float, optional, default 5e-3
The stopping condition threshold for the RMSE between gene weights
in adjacent iterations.
verbose - bool, optional, default True
If True, the iteration number and error between gene weights in
adjacent iterations will be displayed.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
preprocessing - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets and
'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
proj_kwargs - dict, optional, default {}
A dictionary of keyword arguments to pass to the projection
functions.
### Response:
def run(self,
max_iter=10,
verbose=True,
projection=,
stopping_condition=5e-3,
num_norm_avg=50,
k=20,
distance=,
preprocessing=,
proj_kwargs={}):
self.distance = distance
D = self.adata.X
self.k = k
if(self.k < 5):
self.k = 5
elif(self.k > 100):
self.k = 100
if(self.k > D.shape[0] - 1):
self.k = D.shape[0] - 2
numcells = D.shape[0]
n_genes = 8000
if numcells > 3000 and n_genes > 3000:
n_genes = 3000
elif numcells > 2000 and n_genes > 4500:
n_genes = 4500
elif numcells > 1000 and n_genes > 6000:
n_genes = 6000
elif n_genes > 8000:
n_genes = 8000
npcs = None
if npcs is None and numcells > 3000:
npcs = 150
elif npcs is None and numcells > 2000:
npcs = 250
elif npcs is None and numcells > 1000:
npcs = 350
elif npcs is None:
npcs = 500
tinit = time.time()
edm = sp.coo_matrix((numcells, numcells), dtype=).tolil()
nums = np.arange(edm.shape[1])
RINDS = np.random.randint(
0, numcells, (self.k - 1) * numcells).reshape((numcells,
(self.k - 1)))
RINDS = np.hstack((nums[:, None], RINDS))
edm[np.tile(np.arange(RINDS.shape[0])[:, None],
(1, RINDS.shape[1])).flatten(), RINDS.flatten()] = 1
edm = edm.tocsr()
print()
W = self.dispersion_ranking_NN(
edm, num_norm_avg=1)
old = np.zeros(W.size)
new = W
i = 0
err = ((new - old)**2).mean()**0.5
if max_iter < 5:
max_iter = 5
nnas = num_norm_avg
self.Ns=[edm]
self.Ws = [W]
while (i < max_iter and err > stopping_condition):
conv = err
if(verbose):
print( + str(i) + + str(conv))
i += 1
old = new
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
new = W
err = ((new - old)**2).mean()**0.5
self.Ns.append(EDM)
self.Ws.append(W)
W, wPCA_data, EDM, = self.calculate_nnm(
D, W, n_genes, preprocessing, npcs, numcells, nnas)
self.Ns.append(EDM)
all_gene_names = np.array(list(self.adata.var_names))
indices = np.argsort(-W)
ranked_genes = all_gene_names[indices]
self.corr_bin_genes(number_of_features=1000)
self.adata.uns[] = ranked_genes
self.adata.obsm[] = wPCA_data
self.adata.uns[] = {}
self.adata.uns[][] = EDM
if(projection == ):
print()
self.run_tsne(**proj_kwargs)
elif(projection == ):
print()
self.run_umap(**proj_kwargs)
elif(projection == ):
print()
self.run_diff_umap(**proj_kwargs)
elapsed = time.time() - tinit
if verbose:
print( + str(elapsed) + ) |
def dict_filter(d, exclude=[]):
if isinstance(d, list):
ret = []
for e in d:
ret.append(dict_filter(e, exclude))
return ret
elif isinstance(d, dict):
ret = {}
for k, v in d.items():
if isinstance(k, builtin_str):
k = str(k)
assert isinstance(k, str)
if k in exclude:
continue
ret[k] = dict_filter(v, exclude)
return ret
return d | Exclude specified keys from a nested dict | ### Input:
Exclude specified keys from a nested dict
### Response:
def dict_filter(d, exclude=[]):
if isinstance(d, list):
ret = []
for e in d:
ret.append(dict_filter(e, exclude))
return ret
elif isinstance(d, dict):
ret = {}
for k, v in d.items():
if isinstance(k, builtin_str):
k = str(k)
assert isinstance(k, str)
if k in exclude:
continue
ret[k] = dict_filter(v, exclude)
return ret
return d |
def train_rdp_classifier_and_assign_taxonomy(
training_seqs_file, taxonomy_file, seqs_to_classify, min_confidence=0.80,
model_output_dir=None, classification_output_fp=None, max_memory=None,
tmp_dir=tempfile.gettempdir()):
if model_output_dir is None:
training_dir = tempfile.mkdtemp(prefix=, dir=tmp_dir)
else:
training_dir = model_output_dir
training_results = train_rdp_classifier(
training_seqs_file, taxonomy_file, training_dir, max_memory=max_memory,
tmp_dir=tmp_dir)
training_data_fp = training_results[].name
assignment_results = assign_taxonomy(
seqs_to_classify, min_confidence=min_confidence,
output_fp=classification_output_fp, training_data_fp=training_data_fp,
max_memory=max_memory, fixrank=False, tmp_dir=tmp_dir)
if model_output_dir is None:
try:
rmtree(training_dir)
except OSError:
msg = (
"Temporary training directory %s not removed" % training_dir)
if os.path.isdir(training_dir):
training_dir_files = os.listdir(training_dir)
msg += "\nDetected files %s" % training_dir_files
warnings.warn(msg, RuntimeWarning)
return assignment_results | Train RDP Classifier and assign taxonomy in one fell swoop
The file objects training_seqs_file and taxonomy_file are used to
train the RDP Classifier (see RdpTrainer documentation for
details). Model data is stored in model_output_dir. If
model_output_dir is not provided, a temporary directory is created
and removed after classification.
The sequences in seqs_to_classify are classified according to the
model and filtered at the desired confidence level (default:
0.80).
The results are saved to classification_output_fp if provided,
otherwise a dict of {seq_id:(taxonomy_assignment,confidence)} is
returned. | ### Input:
Train RDP Classifier and assign taxonomy in one fell swoop
The file objects training_seqs_file and taxonomy_file are used to
train the RDP Classifier (see RdpTrainer documentation for
details). Model data is stored in model_output_dir. If
model_output_dir is not provided, a temporary directory is created
and removed after classification.
The sequences in seqs_to_classify are classified according to the
model and filtered at the desired confidence level (default:
0.80).
The results are saved to classification_output_fp if provided,
otherwise a dict of {seq_id:(taxonomy_assignment,confidence)} is
returned.
### Response:
def train_rdp_classifier_and_assign_taxonomy(
training_seqs_file, taxonomy_file, seqs_to_classify, min_confidence=0.80,
model_output_dir=None, classification_output_fp=None, max_memory=None,
tmp_dir=tempfile.gettempdir()):
if model_output_dir is None:
training_dir = tempfile.mkdtemp(prefix=, dir=tmp_dir)
else:
training_dir = model_output_dir
training_results = train_rdp_classifier(
training_seqs_file, taxonomy_file, training_dir, max_memory=max_memory,
tmp_dir=tmp_dir)
training_data_fp = training_results[].name
assignment_results = assign_taxonomy(
seqs_to_classify, min_confidence=min_confidence,
output_fp=classification_output_fp, training_data_fp=training_data_fp,
max_memory=max_memory, fixrank=False, tmp_dir=tmp_dir)
if model_output_dir is None:
try:
rmtree(training_dir)
except OSError:
msg = (
"Temporary training directory %s not removed" % training_dir)
if os.path.isdir(training_dir):
training_dir_files = os.listdir(training_dir)
msg += "\nDetected files %s" % training_dir_files
warnings.warn(msg, RuntimeWarning)
return assignment_results |
def write_matrix_to_tsv(net, filename=None, df=None):
import pandas as pd
if df is None:
df = net.dat_to_df()
return df[].to_csv(filename, sep=) | This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object. | ### Input:
This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object.
### Response:
def write_matrix_to_tsv(net, filename=None, df=None):
import pandas as pd
if df is None:
df = net.dat_to_df()
return df[].to_csv(filename, sep=) |
def count(self, signature = None):
query = self._session.query(CrashDTO.id)
if signature:
sig_pickled = pickle.dumps(signature, protocol = 0)
query = query.filter_by(signature = sig_pickled)
return query.count() | Counts how many crash dumps have been stored in this database.
Optionally filters the count by heuristic signature.
@type signature: object
@param signature: (Optional) Count only the crashes that match
this signature. See L{Crash.signature} for more details.
@rtype: int
@return: Count of crash dumps stored in this database. | ### Input:
Counts how many crash dumps have been stored in this database.
Optionally filters the count by heuristic signature.
@type signature: object
@param signature: (Optional) Count only the crashes that match
this signature. See L{Crash.signature} for more details.
@rtype: int
@return: Count of crash dumps stored in this database.
### Response:
def count(self, signature = None):
query = self._session.query(CrashDTO.id)
if signature:
sig_pickled = pickle.dumps(signature, protocol = 0)
query = query.filter_by(signature = sig_pickled)
return query.count() |
def _prepare_socket_file(self, socket_path, default_prefix):
if socket_path is not None:
if os.path.exists(socket_path):
raise Exception("Socket file {} exists!".format(socket_path))
socket_dir = os.path.dirname(socket_path)
try_to_create_directory(socket_dir)
return socket_path
return self._make_inc_temp(
prefix=default_prefix, directory_name=self._sockets_dir) | Prepare the socket file for raylet and plasma.
This method helps to prepare a socket file.
1. Make the directory if the directory does not exist.
2. If the socket file exists, raise exception.
Args:
socket_path (string): the socket file to prepare. | ### Input:
Prepare the socket file for raylet and plasma.
This method helps to prepare a socket file.
1. Make the directory if the directory does not exist.
2. If the socket file exists, raise exception.
Args:
socket_path (string): the socket file to prepare.
### Response:
def _prepare_socket_file(self, socket_path, default_prefix):
if socket_path is not None:
if os.path.exists(socket_path):
raise Exception("Socket file {} exists!".format(socket_path))
socket_dir = os.path.dirname(socket_path)
try_to_create_directory(socket_dir)
return socket_path
return self._make_inc_temp(
prefix=default_prefix, directory_name=self._sockets_dir) |
def fetch_upstream(self):
set_state(WORKFLOW_STATES.FETCHING_UPSTREAM)
cmd = ["git", "fetch", self.upstream]
self.run_cmd(cmd)
set_state(WORKFLOW_STATES.FETCHED_UPSTREAM) | git fetch <upstream> | ### Input:
git fetch <upstream>
### Response:
def fetch_upstream(self):
set_state(WORKFLOW_STATES.FETCHING_UPSTREAM)
cmd = ["git", "fetch", self.upstream]
self.run_cmd(cmd)
set_state(WORKFLOW_STATES.FETCHED_UPSTREAM) |
def resolve_field(
self,
parent_type: GraphQLObjectType,
source: Any,
field_nodes: List[FieldNode],
path: ResponsePath,
) -> AwaitableOrValue[Any]:
field_node = field_nodes[0]
field_name = field_node.name.value
field_def = get_field_def(self.schema, parent_type, field_name)
if not field_def:
return INVALID
resolve_fn = field_def.resolve or self.field_resolver
if self.middleware_manager:
resolve_fn = self.middleware_manager.get_field_resolver(resolve_fn)
info = self.build_resolve_info(field_def, field_nodes, parent_type, path)
result = self.resolve_field_value_or_error(
field_def, field_nodes, resolve_fn, source, info
)
return self.complete_value_catching_error(
field_def.type, field_nodes, info, path, result
) | Resolve the field on the given source object.
In particular, this figures out the value that the field returns by calling its
resolve function, then calls complete_value to await coroutine objects,
serialize scalars, or execute the sub-selection-set for objects. | ### Input:
Resolve the field on the given source object.
In particular, this figures out the value that the field returns by calling its
resolve function, then calls complete_value to await coroutine objects,
serialize scalars, or execute the sub-selection-set for objects.
### Response:
def resolve_field(
self,
parent_type: GraphQLObjectType,
source: Any,
field_nodes: List[FieldNode],
path: ResponsePath,
) -> AwaitableOrValue[Any]:
field_node = field_nodes[0]
field_name = field_node.name.value
field_def = get_field_def(self.schema, parent_type, field_name)
if not field_def:
return INVALID
resolve_fn = field_def.resolve or self.field_resolver
if self.middleware_manager:
resolve_fn = self.middleware_manager.get_field_resolver(resolve_fn)
info = self.build_resolve_info(field_def, field_nodes, parent_type, path)
result = self.resolve_field_value_or_error(
field_def, field_nodes, resolve_fn, source, info
)
return self.complete_value_catching_error(
field_def.type, field_nodes, info, path, result
) |
def enhance(self):
self.update({:
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemPuppetClasses)})
self.update({:
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemParameter)})
self.update({:
SubDict(self.api, self.objName,
self.payloadObj, self.key,
ItemSmartClassParameter)}) | Function enhance
Enhance the object with new item or enhanced items | ### Input:
Function enhance
Enhance the object with new item or enhanced items
### Response:
def enhance(self):
self.update({:
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemPuppetClasses)})
self.update({:
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemParameter)})
self.update({:
SubDict(self.api, self.objName,
self.payloadObj, self.key,
ItemSmartClassParameter)}) |
def devpiserver_cmdline_run(xom):
semantic-ui
if xom.config.args.theme == :
xom.config.args.theme = resource_filename(, )
xom.log.info("Semantic UI Theme loaded") | Load theme when `theme` parameter is 'semantic-ui'. | ### Input:
Load theme when `theme` parameter is 'semantic-ui'.
### Response:
def devpiserver_cmdline_run(xom):
semantic-ui
if xom.config.args.theme == :
xom.config.args.theme = resource_filename(, )
xom.log.info("Semantic UI Theme loaded") |
def register_function_hooks(self, func):
for hook_kind, hooks in func.xworkflows_hook.items():
for field_name, hook in hooks:
if field_name and field_name != self.state_field:
continue
for transition in self.workflow.transitions:
if hook.applies_to(transition):
implem = self.implementations[transition.name]
implem.add_hook(hook) | Looks at an object method and registers it for relevent transitions. | ### Input:
Looks at an object method and registers it for relevent transitions.
### Response:
def register_function_hooks(self, func):
for hook_kind, hooks in func.xworkflows_hook.items():
for field_name, hook in hooks:
if field_name and field_name != self.state_field:
continue
for transition in self.workflow.transitions:
if hook.applies_to(transition):
implem = self.implementations[transition.name]
implem.add_hook(hook) |
def catch_conn_reset(f):
try:
import OpenSSL
ConnectionError = OpenSSL.SSL.SysCallError
except:
ConnectionError = None
def new_f(self, *args, **kwargs):
if ConnectionError:
try:
return f(self, *args, **kwargs)
except ConnectionError as e:
log.warning("caught connection reset error: %s", e)
self.connect()
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return new_f | A decorator to handle connection reset errors even ones from pyOpenSSL
until https://github.com/edsu/twarc/issues/72 is resolved | ### Input:
A decorator to handle connection reset errors even ones from pyOpenSSL
until https://github.com/edsu/twarc/issues/72 is resolved
### Response:
def catch_conn_reset(f):
try:
import OpenSSL
ConnectionError = OpenSSL.SSL.SysCallError
except:
ConnectionError = None
def new_f(self, *args, **kwargs):
if ConnectionError:
try:
return f(self, *args, **kwargs)
except ConnectionError as e:
log.warning("caught connection reset error: %s", e)
self.connect()
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return new_f |
def write_option(**kwargs):
if len(kwargs) != 1:
raise TypeError(_BAD_OPTION_ERR)
name, value = kwargs.popitem()
if name == "last_update_time":
return _helpers.LastUpdateOption(value)
elif name == "exists":
return _helpers.ExistsOption(value)
else:
extra = "{!r} was provided".format(name)
raise TypeError(_BAD_OPTION_ERR, extra) | Create a write option for write operations.
Write operations include :meth:`~.DocumentReference.set`,
:meth:`~.DocumentReference.update` and
:meth:`~.DocumentReference.delete`.
One of the following keyword arguments must be provided:
* ``last_update_time`` (:class:`google.protobuf.timestamp_pb2.\
Timestamp`): A timestamp. When set, the target document must
exist and have been last updated at that time. Protobuf
``update_time`` timestamps are typically returned from methods
that perform write operations as part of a "write result"
protobuf or directly.
* ``exists`` (:class:`bool`): Indicates if the document being modified
should already exist.
Providing no argument would make the option have no effect (so
it is not allowed). Providing multiple would be an apparent
contradiction, since ``last_update_time`` assumes that the
document **was** updated (it can't have been updated if it
doesn't exist) and ``exists`` indicate that it is unknown if the
document exists or not.
Args:
kwargs (Dict[str, Any]): The keyword arguments described above.
Raises:
TypeError: If anything other than exactly one argument is
provided by the caller. | ### Input:
Create a write option for write operations.
Write operations include :meth:`~.DocumentReference.set`,
:meth:`~.DocumentReference.update` and
:meth:`~.DocumentReference.delete`.
One of the following keyword arguments must be provided:
* ``last_update_time`` (:class:`google.protobuf.timestamp_pb2.\
Timestamp`): A timestamp. When set, the target document must
exist and have been last updated at that time. Protobuf
``update_time`` timestamps are typically returned from methods
that perform write operations as part of a "write result"
protobuf or directly.
* ``exists`` (:class:`bool`): Indicates if the document being modified
should already exist.
Providing no argument would make the option have no effect (so
it is not allowed). Providing multiple would be an apparent
contradiction, since ``last_update_time`` assumes that the
document **was** updated (it can't have been updated if it
doesn't exist) and ``exists`` indicate that it is unknown if the
document exists or not.
Args:
kwargs (Dict[str, Any]): The keyword arguments described above.
Raises:
TypeError: If anything other than exactly one argument is
provided by the caller.
### Response:
def write_option(**kwargs):
if len(kwargs) != 1:
raise TypeError(_BAD_OPTION_ERR)
name, value = kwargs.popitem()
if name == "last_update_time":
return _helpers.LastUpdateOption(value)
elif name == "exists":
return _helpers.ExistsOption(value)
else:
extra = "{!r} was provided".format(name)
raise TypeError(_BAD_OPTION_ERR, extra) |
def resendFaxRN(self, CorpNum, OrgRequestNum, SenderNum, SenderName, ReceiverNum, ReceiverName, ReserveDT=None,
UserID=None, title=None, RequestNum=None):
receivers = None
if ReceiverNum != "" or ReceiverName != "":
receivers = []
receivers.append(FaxReceiver(receiveNum=ReceiverNum,
receiveName=ReceiverName)
)
return self.resendFaxRN_multi(CorpNum, OrgRequestNum, SenderNum, SenderName, receivers, ReserveDT,
UserID, title, RequestNum) | ν©μ€ λ¨κ±΄ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
OrgRequestNum : μλ³Έ ν©μ€ μ μ‘μ ν λΉν μ μ‘μμ²λ²νΈ
ReceiptNum : ν©μ€ μ μλ²νΈ
SenderNum : λ°μ μ λ²νΈ
SenderName : λ°μ μλͺ
ReceiverNum : μμ λ²νΈ
ReceiverName : μμ μλͺ
ReserveDT : μμ½μκ°(νμ yyyyMMddHHmmss)
UserID : νλΉνμ μμ΄λ
title : ν©μ€μ λͺ©
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
return
μ μλ²νΈ (receiptNum)
raise
PopbillException | ### Input:
ν©μ€ λ¨κ±΄ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
OrgRequestNum : μλ³Έ ν©μ€ μ μ‘μ ν λΉν μ μ‘μμ²λ²νΈ
ReceiptNum : ν©μ€ μ μλ²νΈ
SenderNum : λ°μ μ λ²νΈ
SenderName : λ°μ μλͺ
ReceiverNum : μμ λ²νΈ
ReceiverName : μμ μλͺ
ReserveDT : μμ½μκ°(νμ yyyyMMddHHmmss)
UserID : νλΉνμ μμ΄λ
title : ν©μ€μ λͺ©
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
return
μ μλ²νΈ (receiptNum)
raise
PopbillException
### Response:
def resendFaxRN(self, CorpNum, OrgRequestNum, SenderNum, SenderName, ReceiverNum, ReceiverName, ReserveDT=None,
UserID=None, title=None, RequestNum=None):
receivers = None
if ReceiverNum != "" or ReceiverName != "":
receivers = []
receivers.append(FaxReceiver(receiveNum=ReceiverNum,
receiveName=ReceiverName)
)
return self.resendFaxRN_multi(CorpNum, OrgRequestNum, SenderNum, SenderName, receivers, ReserveDT,
UserID, title, RequestNum) |
def run(context, port):
global ctx
ctx = context
app.run(port=port) | Run the Webserver/SocketIO and app | ### Input:
Run the Webserver/SocketIO and app
### Response:
def run(context, port):
global ctx
ctx = context
app.run(port=port) |
def _yield_callbacks(self):
print_logs = []
for item in self.get_default_callbacks() + (self.callbacks or []):
if isinstance(item, (tuple, list)):
named_by_user = True
name, cb = item
else:
named_by_user = False
cb = item
if isinstance(cb, type):
name = cb.__name__
else:
name = cb.__class__.__name__
if isinstance(cb, PrintLog) or (cb == PrintLog):
print_logs.append((name, cb, named_by_user))
else:
yield name, cb, named_by_user
yield from print_logs | Yield all callbacks set on this instance including
a set whether its name was set by the user.
Handles these cases:
* default and user callbacks
* callbacks with and without name
* initialized and uninitialized callbacks
* puts PrintLog(s) last | ### Input:
Yield all callbacks set on this instance including
a set whether its name was set by the user.
Handles these cases:
* default and user callbacks
* callbacks with and without name
* initialized and uninitialized callbacks
* puts PrintLog(s) last
### Response:
def _yield_callbacks(self):
print_logs = []
for item in self.get_default_callbacks() + (self.callbacks or []):
if isinstance(item, (tuple, list)):
named_by_user = True
name, cb = item
else:
named_by_user = False
cb = item
if isinstance(cb, type):
name = cb.__name__
else:
name = cb.__class__.__name__
if isinstance(cb, PrintLog) or (cb == PrintLog):
print_logs.append((name, cb, named_by_user))
else:
yield name, cb, named_by_user
yield from print_logs |
def compute_laplacian_matrix(affinity_matrix, method=, **kwargs):
if method == :
method =
return Laplacian.init(method, **kwargs).laplacian_matrix(affinity_matrix) | Compute the laplacian matrix with the given method | ### Input:
Compute the laplacian matrix with the given method
### Response:
def compute_laplacian_matrix(affinity_matrix, method=, **kwargs):
if method == :
method =
return Laplacian.init(method, **kwargs).laplacian_matrix(affinity_matrix) |
def initialize_period(self, period=None):
if not period:
self.current_period = dt.datetime.now()
else:
self.current_period = period
self.current_period = dt.datetime(self.current_period.year, self.current_period.month, self.current_period.day)
self.date = self.current_period.strftime("%Y-%m-%d") | Initialize the period of BF.
:period: datetime.datetime for setting the period explicity. | ### Input:
Initialize the period of BF.
:period: datetime.datetime for setting the period explicity.
### Response:
def initialize_period(self, period=None):
if not period:
self.current_period = dt.datetime.now()
else:
self.current_period = period
self.current_period = dt.datetime(self.current_period.year, self.current_period.month, self.current_period.day)
self.date = self.current_period.strftime("%Y-%m-%d") |
def add(self, visualization):
res = self.es.create(index=self.index,
id=visualization.id or str(uuid.uuid1()),
doc_type=self.doc_type,
body=visualization.to_kibana(), refresh=True)
return res | Creates a new visualization
:param visualization: instance of Visualization
:return: | ### Input:
Creates a new visualization
:param visualization: instance of Visualization
:return:
### Response:
def add(self, visualization):
res = self.es.create(index=self.index,
id=visualization.id or str(uuid.uuid1()),
doc_type=self.doc_type,
body=visualization.to_kibana(), refresh=True)
return res |
def server_show_libcloud(self, uuid):
server_info = self.server_show(uuid)
server = next(six.itervalues(server_info))
server_name = next(six.iterkeys(server_info))
if not hasattr(self, ):
self.password = None
ret = NovaServer(server_name, server, self.password)
return ret | Make output look like libcloud output for consistency | ### Input:
Make output look like libcloud output for consistency
### Response:
def server_show_libcloud(self, uuid):
server_info = self.server_show(uuid)
server = next(six.itervalues(server_info))
server_name = next(six.iterkeys(server_info))
if not hasattr(self, ):
self.password = None
ret = NovaServer(server_name, server, self.password)
return ret |
def recommend(self, userid, user_items,
N=10, filter_already_liked_items=True, filter_items=None, recalculate_user=False):
pass | Recommends items for a user
Calculates the N best recommendations for a user, and returns a list of itemids, score.
Parameters
----------
userid : int
The userid to calculate recommendations for
user_items : csr_matrix
A sparse matrix of shape (number_users, number_items). This lets us look
up the liked items and their weights for the user. This is used to filter out
items that have already been liked from the output, and to also potentially
calculate the best items for this user.
N : int, optional
The number of results to return
filter_items : sequence of ints, optional
List of extra item ids to filter out from the output
recalculate_user : bool, optional
When true, don't rely on stored user state and instead recalculate from the
passed in user_items
Returns
-------
list
List of (itemid, score) tuples | ### Input:
Recommends items for a user
Calculates the N best recommendations for a user, and returns a list of itemids, score.
Parameters
----------
userid : int
The userid to calculate recommendations for
user_items : csr_matrix
A sparse matrix of shape (number_users, number_items). This lets us look
up the liked items and their weights for the user. This is used to filter out
items that have already been liked from the output, and to also potentially
calculate the best items for this user.
N : int, optional
The number of results to return
filter_items : sequence of ints, optional
List of extra item ids to filter out from the output
recalculate_user : bool, optional
When true, don't rely on stored user state and instead recalculate from the
passed in user_items
Returns
-------
list
List of (itemid, score) tuples
### Response:
def recommend(self, userid, user_items,
N=10, filter_already_liked_items=True, filter_items=None, recalculate_user=False):
pass |
def _get(self, path, **kwargs):
clean_kwargs = clean_dict(kwargs)
data = urllib.parse.urlencode(clean_kwargs)
if len(data) > 0:
api = self._api( % (path, data))
else:
api = self._api( % path)
req = request.Request(api, headers=self._headers, method=)
try:
resp = request.urlopen(req).read()
except urllib.error.HTTPError as e:
resp = e.fp.read()
return json.loads(resp.decode()) | return a dict. | ### Input:
return a dict.
### Response:
def _get(self, path, **kwargs):
clean_kwargs = clean_dict(kwargs)
data = urllib.parse.urlencode(clean_kwargs)
if len(data) > 0:
api = self._api( % (path, data))
else:
api = self._api( % path)
req = request.Request(api, headers=self._headers, method=)
try:
resp = request.urlopen(req).read()
except urllib.error.HTTPError as e:
resp = e.fp.read()
return json.loads(resp.decode()) |
def sort_by_fields(items, fields):
for key in reversed(fields):
reverse = False
if key[0] == :
reverse = True
key = key[1:]
items.sort(key=lambda x: (getattr(x, key) is not None, getattr(x, key)), reverse=reverse) | Sort a list of objects on the given fields. The field list works analogously to
queryset.order_by(*fields): each field is either a property of the object,
or is prefixed by '-' (e.g. '-name') to indicate reverse ordering. | ### Input:
Sort a list of objects on the given fields. The field list works analogously to
queryset.order_by(*fields): each field is either a property of the object,
or is prefixed by '-' (e.g. '-name') to indicate reverse ordering.
### Response:
def sort_by_fields(items, fields):
for key in reversed(fields):
reverse = False
if key[0] == :
reverse = True
key = key[1:]
items.sort(key=lambda x: (getattr(x, key) is not None, getattr(x, key)), reverse=reverse) |
def parse_id(input_id):
parts = input_id.split()
if len(parts) != 2:
raise exceptions.CLIAbort(
% input_id)
return parts[0], int(parts[1]) | Parse the load balancer kind and actual id from the "kind:id" form. | ### Input:
Parse the load balancer kind and actual id from the "kind:id" form.
### Response:
def parse_id(input_id):
parts = input_id.split()
if len(parts) != 2:
raise exceptions.CLIAbort(
% input_id)
return parts[0], int(parts[1]) |
def get_functions(self, dbName, pattern):
self.send_get_functions(dbName, pattern)
return self.recv_get_functions() | Parameters:
- dbName
- pattern | ### Input:
Parameters:
- dbName
- pattern
### Response:
def get_functions(self, dbName, pattern):
self.send_get_functions(dbName, pattern)
return self.recv_get_functions() |
def _butter(self, data, btype, f3=2, order=2):
b, a = signal.butter(order, f3 / (0.5 * self.fs), btype=btype)
y = signal.filtfilt(b, a, data)
return y | Applies a digital butterworth filter via filtfilt at the specified f3 and order. Default values are set to
correspond to apparently sensible filters that distinguish between vibration and tilt from an accelerometer.
:param data: the data to filter.
:param btype: high or low.
:param f3: the f3 of the filter.
:param order: the filter order.
:return: the filtered signal. | ### Input:
Applies a digital butterworth filter via filtfilt at the specified f3 and order. Default values are set to
correspond to apparently sensible filters that distinguish between vibration and tilt from an accelerometer.
:param data: the data to filter.
:param btype: high or low.
:param f3: the f3 of the filter.
:param order: the filter order.
:return: the filtered signal.
### Response:
def _butter(self, data, btype, f3=2, order=2):
b, a = signal.butter(order, f3 / (0.5 * self.fs), btype=btype)
y = signal.filtfilt(b, a, data)
return y |
def ineichen(apparent_zenith, airmass_absolute, linke_turbidity,
altitude=0, dni_extra=1364., perez_enhancement=False):
dhidnighi
if isinstance(dni, pd.Series):
irrads = pd.DataFrame.from_dict(irrads)
return irrads | Determine clear sky GHI, DNI, and DHI from Ineichen/Perez model.
Implements the Ineichen and Perez clear sky model for global
horizontal irradiance (GHI), direct normal irradiance (DNI), and
calculates the clear-sky diffuse horizontal (DHI) component as the
difference between GHI and DNI*cos(zenith) as presented in [1, 2]. A
report on clear sky models found the Ineichen/Perez model to have
excellent performance with a minimal input data set [3].
Default values for monthly Linke turbidity provided by SoDa [4, 5].
Parameters
-----------
apparent_zenith : numeric
Refraction corrected solar zenith angle in degrees.
airmass_absolute : numeric
Pressure corrected airmass.
linke_turbidity : numeric
Linke Turbidity.
altitude : numeric, default 0
Altitude above sea level in meters.
dni_extra : numeric, default 1364
Extraterrestrial irradiance. The units of ``dni_extra``
determine the units of the output.
perez_enhancement : bool, default False
Controls if the Perez enhancement factor should be applied.
Setting to True may produce spurious results for times when
the Sun is near the horizon and the airmass is high.
See https://github.com/pvlib/pvlib-python/issues/435
Returns
-------
clearsky : DataFrame (if Series input) or OrderedDict of arrays
DataFrame/OrderedDict contains the columns/keys
``'dhi', 'dni', 'ghi'``.
See also
--------
lookup_linke_turbidity
pvlib.location.Location.get_clearsky
References
----------
[1] P. Ineichen and R. Perez, "A New airmass independent formulation for
the Linke turbidity coefficient", Solar Energy, vol 73, pp. 151-157,
2002.
[2] R. Perez et. al., "A New Operational Model for Satellite-Derived
Irradiances: Description and Validation", Solar Energy, vol 73, pp.
307-317, 2002.
[3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance Clear
Sky Models: Implementation and Analysis", Sandia National
Laboratories, SAND2012-2389, 2012.
[4] http://www.soda-is.com/eng/services/climat_free_eng.php#c5 (obtained
July 17, 2012).
[5] J. Remund, et. al., "Worldwide Linke Turbidity Information", Proc.
ISES Solar World Congress, June 2003. Goteborg, Sweden. | ### Input:
Determine clear sky GHI, DNI, and DHI from Ineichen/Perez model.
Implements the Ineichen and Perez clear sky model for global
horizontal irradiance (GHI), direct normal irradiance (DNI), and
calculates the clear-sky diffuse horizontal (DHI) component as the
difference between GHI and DNI*cos(zenith) as presented in [1, 2]. A
report on clear sky models found the Ineichen/Perez model to have
excellent performance with a minimal input data set [3].
Default values for monthly Linke turbidity provided by SoDa [4, 5].
Parameters
-----------
apparent_zenith : numeric
Refraction corrected solar zenith angle in degrees.
airmass_absolute : numeric
Pressure corrected airmass.
linke_turbidity : numeric
Linke Turbidity.
altitude : numeric, default 0
Altitude above sea level in meters.
dni_extra : numeric, default 1364
Extraterrestrial irradiance. The units of ``dni_extra``
determine the units of the output.
perez_enhancement : bool, default False
Controls if the Perez enhancement factor should be applied.
Setting to True may produce spurious results for times when
the Sun is near the horizon and the airmass is high.
See https://github.com/pvlib/pvlib-python/issues/435
Returns
-------
clearsky : DataFrame (if Series input) or OrderedDict of arrays
DataFrame/OrderedDict contains the columns/keys
``'dhi', 'dni', 'ghi'``.
See also
--------
lookup_linke_turbidity
pvlib.location.Location.get_clearsky
References
----------
[1] P. Ineichen and R. Perez, "A New airmass independent formulation for
the Linke turbidity coefficient", Solar Energy, vol 73, pp. 151-157,
2002.
[2] R. Perez et. al., "A New Operational Model for Satellite-Derived
Irradiances: Description and Validation", Solar Energy, vol 73, pp.
307-317, 2002.
[3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance Clear
Sky Models: Implementation and Analysis", Sandia National
Laboratories, SAND2012-2389, 2012.
[4] http://www.soda-is.com/eng/services/climat_free_eng.php#c5 (obtained
July 17, 2012).
[5] J. Remund, et. al., "Worldwide Linke Turbidity Information", Proc.
ISES Solar World Congress, June 2003. Goteborg, Sweden.
### Response:
def ineichen(apparent_zenith, airmass_absolute, linke_turbidity,
altitude=0, dni_extra=1364., perez_enhancement=False):
dhidnighi
if isinstance(dni, pd.Series):
irrads = pd.DataFrame.from_dict(irrads)
return irrads |
def backward_word_extend_selection(self, e):
u
self.l_buffer.backward_word_extend_selection(self.argument_reset)
self.finalize() | u"""Move back to the start of the current or previous word. Words are
composed of letters and digits. | ### Input:
u"""Move back to the start of the current or previous word. Words are
composed of letters and digits.
### Response:
def backward_word_extend_selection(self, e):
u
self.l_buffer.backward_word_extend_selection(self.argument_reset)
self.finalize() |
def movingAverage(requestContext, seriesList, windowSize):
if not seriesList:
return []
windowInterval = None
if isinstance(windowSize, six.string_types):
delta = parseTimeOffset(windowSize)
windowInterval = to_seconds(delta)
if windowInterval:
previewSeconds = windowInterval
else:
previewSeconds = max([s.step for s in seriesList]) * int(windowSize)
newContext = requestContext.copy()
newContext[] = (requestContext[] -
timedelta(seconds=previewSeconds))
previewList = evaluateTokens(newContext, requestContext[][0])
result = []
for series in previewList:
if windowInterval:
windowPoints = windowInterval // series.step
else:
windowPoints = int(windowSize)
if isinstance(windowSize, six.string_types):
newName = % (series.name, windowSize)
else:
newName = "movingAverage(%s,%s)" % (series.name, windowSize)
newSeries = TimeSeries(newName, series.start + previewSeconds,
series.end, series.step, [])
newSeries.pathExpression = newName
windowSum = safeSum(series[:windowPoints]) or 0
count = safeLen(series[:windowPoints])
newSeries.append(safeDiv(windowSum, count))
for n, last in enumerate(series[windowPoints:-1]):
if series[n] is not None:
windowSum -= series[n]
count -= 1
if last is not None:
windowSum += last
count += 1
newSeries.append(safeDiv(windowSum, count))
result.append(newSeries)
return result | Graphs the moving average of a metric (or metrics) over a fixed number of
past points, or a time interval.
Takes one metric or a wildcard seriesList followed by a number N of
datapoints or a quoted string with a length of time like '1hour' or '5min'
(See ``from / until`` in the render\_api_ for examples of time formats).
Graphs the average of the preceding datapoints for each point on the graph.
Example::
&target=movingAverage(Server.instance01.threads.busy,10)
&target=movingAverage(Server.instance*.threads.idle,'5min') | ### Input:
Graphs the moving average of a metric (or metrics) over a fixed number of
past points, or a time interval.
Takes one metric or a wildcard seriesList followed by a number N of
datapoints or a quoted string with a length of time like '1hour' or '5min'
(See ``from / until`` in the render\_api_ for examples of time formats).
Graphs the average of the preceding datapoints for each point on the graph.
Example::
&target=movingAverage(Server.instance01.threads.busy,10)
&target=movingAverage(Server.instance*.threads.idle,'5min')
### Response:
def movingAverage(requestContext, seriesList, windowSize):
if not seriesList:
return []
windowInterval = None
if isinstance(windowSize, six.string_types):
delta = parseTimeOffset(windowSize)
windowInterval = to_seconds(delta)
if windowInterval:
previewSeconds = windowInterval
else:
previewSeconds = max([s.step for s in seriesList]) * int(windowSize)
newContext = requestContext.copy()
newContext[] = (requestContext[] -
timedelta(seconds=previewSeconds))
previewList = evaluateTokens(newContext, requestContext[][0])
result = []
for series in previewList:
if windowInterval:
windowPoints = windowInterval // series.step
else:
windowPoints = int(windowSize)
if isinstance(windowSize, six.string_types):
newName = % (series.name, windowSize)
else:
newName = "movingAverage(%s,%s)" % (series.name, windowSize)
newSeries = TimeSeries(newName, series.start + previewSeconds,
series.end, series.step, [])
newSeries.pathExpression = newName
windowSum = safeSum(series[:windowPoints]) or 0
count = safeLen(series[:windowPoints])
newSeries.append(safeDiv(windowSum, count))
for n, last in enumerate(series[windowPoints:-1]):
if series[n] is not None:
windowSum -= series[n]
count -= 1
if last is not None:
windowSum += last
count += 1
newSeries.append(safeDiv(windowSum, count))
result.append(newSeries)
return result |
def depth_may_average_ground_temperature(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
.format(value))
self._depth_may_average_ground_temperature = value | Corresponds to IDD Field `depth_may_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_may_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | ### Input:
Corresponds to IDD Field `depth_may_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_may_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
### Response:
def depth_may_average_ground_temperature(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
.format(value))
self._depth_may_average_ground_temperature = value |
def get_token(self, code, **params):
params[] = code
if not in params:
params[] = self.default_grant_type
params.update({: self.client_id,
: self.client_secret,
: self.redirect_uri})
response = self.http_post(self.token_uri, params)
try:
return response.json()
except TypeError:
return response.json | Get an access token from the provider token URI.
:param code: Authorization code.
:type code: str
:return: Dict containing access token, refresh token, etc.
:rtype: dict | ### Input:
Get an access token from the provider token URI.
:param code: Authorization code.
:type code: str
:return: Dict containing access token, refresh token, etc.
:rtype: dict
### Response:
def get_token(self, code, **params):
params[] = code
if not in params:
params[] = self.default_grant_type
params.update({: self.client_id,
: self.client_secret,
: self.redirect_uri})
response = self.http_post(self.token_uri, params)
try:
return response.json()
except TypeError:
return response.json |
def load(text, match=None):
if text is None: return None
text = text.strip()
if len(text) == 0: return None
nametable = {
: [],
: {}
}
root = XML(text)
items = [root] if match is None else root.findall(match)
count = len(items)
if count == 0:
return None
elif count == 1:
return load_root(items[0], nametable)
else:
return [load_root(item, nametable) for item in items] | This function reads a string that contains the XML of an Atom Feed, then
returns the
data in a native Python structure (a ``dict`` or ``list``). If you also
provide a tag name or path to match, only the matching sub-elements are
loaded.
:param text: The XML text to load.
:type text: ``string``
:param match: A tag name or path to match (optional).
:type match: ``string`` | ### Input:
This function reads a string that contains the XML of an Atom Feed, then
returns the
data in a native Python structure (a ``dict`` or ``list``). If you also
provide a tag name or path to match, only the matching sub-elements are
loaded.
:param text: The XML text to load.
:type text: ``string``
:param match: A tag name or path to match (optional).
:type match: ``string``
### Response:
def load(text, match=None):
if text is None: return None
text = text.strip()
if len(text) == 0: return None
nametable = {
: [],
: {}
}
root = XML(text)
items = [root] if match is None else root.findall(match)
count = len(items)
if count == 0:
return None
elif count == 1:
return load_root(items[0], nametable)
else:
return [load_root(item, nametable) for item in items] |
def count(self) -> "CountQuery":
return CountQuery(
db=self._db,
model=self.model,
q_objects=self._q_objects,
annotations=self._annotations,
custom_filters=self._custom_filters,
) | Return count of objects in queryset instead of objects. | ### Input:
Return count of objects in queryset instead of objects.
### Response:
def count(self) -> "CountQuery":
return CountQuery(
db=self._db,
model=self.model,
q_objects=self._q_objects,
annotations=self._annotations,
custom_filters=self._custom_filters,
) |
def calculate(cls, calc, formula_reg, data_reg, out_reg,
timestep=None, idx=None):
formula = calc[]
func = formula_reg[formula]
fargs = formula_reg.args.get(formula, [])
constants = formula_reg.isconstant.get(formula)
vargs = [] if constants is None else [a for a in fargs if a not in constants]
args = calc[]
datargs, outargs = args.get(, {}), args.get(, {})
data = index_registry(datargs, data_reg, timestep, idx)
outputs = index_registry(outargs, out_reg, timestep, idx)
kwargs = dict(data, **outputs)
args = [kwargs.pop(a) for a in fargs if a in kwargs]
returns = calc[]
if constants is None:
cov = None
else:
cov = cls.get_covariance(datargs, outargs, vargs,
data_reg.variance, out_reg.variance)
kwargs[] = cov
retval = func(*args, **kwargs)
if cov is not None:
cov, jac = retval[-2:]
retval = retval[:-2]
scale = np.asarray(
[1 / r.m if isinstance(r, UREG.Quantity) else 1 / r
for r in retval]
)
cov = (np.swapaxes((cov.T * scale), 0, 1) * scale).T
nret = len(retval)
for m in xrange(nret):
a = returns[m]
out_reg.variance[a] = {}
out_reg.uncertainty[a] = {}
out_reg.jacobian[a] = {}
for n in xrange(nret):
b = returns[n]
out_reg.variance[a][b] = cov[:, m, n]
if a == b:
unc = np.sqrt(cov[:, m, n]) * 100 * UREG.percent
out_reg.uncertainty[a][b] = unc
for n in xrange(len(vargs)):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
out_reg.jacobian[a][b] = jac[:, m, n]
LOGGER.debug(, a, out_reg.variance[a])
LOGGER.debug(, a, out_reg.jacobian[a])
LOGGER.debug(, a, out_reg.uncertainty[a])
if len(retval) == 1:
retval = retval[0]
if len(returns) > 1:
if idx is None:
out_reg.update(zip(returns, retval))
else:
for k, v in zip(returns, retval):
out_reg[k][idx] = v
else:
if idx is None:
out_reg[returns[0]] = retval
else:
out_reg[returns[0]][idx] = retval | Execute calculation
:param calc: calculation, with formula, args and return keys
:type calc: dict
:param formula_reg: Registry of formulas.
:type formula_reg: :class:`~simkit.core.FormulaRegistry`
:param data_reg: Data registry.
:type data_reg: :class:`~simkit.core.data_sources.DataRegistry`
:param out_reg: Outputs registry.
:type out_reg: :class:`~simkit.core.outputs.OutputRegistry`
:param timestep: simulation interval length [time], default is ``None``
:param idx: interval index, default is ``None``
:type idx: int | ### Input:
Execute calculation
:param calc: calculation, with formula, args and return keys
:type calc: dict
:param formula_reg: Registry of formulas.
:type formula_reg: :class:`~simkit.core.FormulaRegistry`
:param data_reg: Data registry.
:type data_reg: :class:`~simkit.core.data_sources.DataRegistry`
:param out_reg: Outputs registry.
:type out_reg: :class:`~simkit.core.outputs.OutputRegistry`
:param timestep: simulation interval length [time], default is ``None``
:param idx: interval index, default is ``None``
:type idx: int
### Response:
def calculate(cls, calc, formula_reg, data_reg, out_reg,
timestep=None, idx=None):
formula = calc[]
func = formula_reg[formula]
fargs = formula_reg.args.get(formula, [])
constants = formula_reg.isconstant.get(formula)
vargs = [] if constants is None else [a for a in fargs if a not in constants]
args = calc[]
datargs, outargs = args.get(, {}), args.get(, {})
data = index_registry(datargs, data_reg, timestep, idx)
outputs = index_registry(outargs, out_reg, timestep, idx)
kwargs = dict(data, **outputs)
args = [kwargs.pop(a) for a in fargs if a in kwargs]
returns = calc[]
if constants is None:
cov = None
else:
cov = cls.get_covariance(datargs, outargs, vargs,
data_reg.variance, out_reg.variance)
kwargs[] = cov
retval = func(*args, **kwargs)
if cov is not None:
cov, jac = retval[-2:]
retval = retval[:-2]
scale = np.asarray(
[1 / r.m if isinstance(r, UREG.Quantity) else 1 / r
for r in retval]
)
cov = (np.swapaxes((cov.T * scale), 0, 1) * scale).T
nret = len(retval)
for m in xrange(nret):
a = returns[m]
out_reg.variance[a] = {}
out_reg.uncertainty[a] = {}
out_reg.jacobian[a] = {}
for n in xrange(nret):
b = returns[n]
out_reg.variance[a][b] = cov[:, m, n]
if a == b:
unc = np.sqrt(cov[:, m, n]) * 100 * UREG.percent
out_reg.uncertainty[a][b] = unc
for n in xrange(len(vargs)):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
out_reg.jacobian[a][b] = jac[:, m, n]
LOGGER.debug(, a, out_reg.variance[a])
LOGGER.debug(, a, out_reg.jacobian[a])
LOGGER.debug(, a, out_reg.uncertainty[a])
if len(retval) == 1:
retval = retval[0]
if len(returns) > 1:
if idx is None:
out_reg.update(zip(returns, retval))
else:
for k, v in zip(returns, retval):
out_reg[k][idx] = v
else:
if idx is None:
out_reg[returns[0]] = retval
else:
out_reg[returns[0]][idx] = retval |
def heightmap_rain_erosion(
hm: np.ndarray,
nbDrops: int,
erosionCoef: float,
sedimentationCoef: float,
rnd: Optional[tcod.random.Random] = None,
) -> None:
lib.TCOD_heightmap_rain_erosion(
_heightmap_cdata(hm),
nbDrops,
erosionCoef,
sedimentationCoef,
rnd.random_c if rnd else ffi.NULL,
) | Simulate the effect of rain drops on the terrain, resulting in erosion.
``nbDrops`` should be at least hm.size.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
nbDrops (int): Number of rain drops to simulate.
erosionCoef (float): Amount of ground eroded on the drop's path.
sedimentationCoef (float): Amount of ground deposited when the drops
stops to flow.
rnd (Optional[Random]): A tcod.Random instance, or None. | ### Input:
Simulate the effect of rain drops on the terrain, resulting in erosion.
``nbDrops`` should be at least hm.size.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
nbDrops (int): Number of rain drops to simulate.
erosionCoef (float): Amount of ground eroded on the drop's path.
sedimentationCoef (float): Amount of ground deposited when the drops
stops to flow.
rnd (Optional[Random]): A tcod.Random instance, or None.
### Response:
def heightmap_rain_erosion(
hm: np.ndarray,
nbDrops: int,
erosionCoef: float,
sedimentationCoef: float,
rnd: Optional[tcod.random.Random] = None,
) -> None:
lib.TCOD_heightmap_rain_erosion(
_heightmap_cdata(hm),
nbDrops,
erosionCoef,
sedimentationCoef,
rnd.random_c if rnd else ffi.NULL,
) |
def create_epub(ident_hash, file, format=):
model = factory(ident_hash, baked=(format != ))
if isinstance(model, cnxepub.Document):
model = cnxepub.TranslucentBinder(nodes=[model])
cnxepub.make_epub(model, file) | Creates an epub from an ``ident_hash``, which is output to the given
``file`` (a file-like object).
Returns None, writes to the given ``file``. | ### Input:
Creates an epub from an ``ident_hash``, which is output to the given
``file`` (a file-like object).
Returns None, writes to the given ``file``.
### Response:
def create_epub(ident_hash, file, format=):
model = factory(ident_hash, baked=(format != ))
if isinstance(model, cnxepub.Document):
model = cnxepub.TranslucentBinder(nodes=[model])
cnxepub.make_epub(model, file) |
def equate_initial(name1, name2):
if len(name1) == 0 or len(name2) == 0:
return False
if len(name1) == 1 or len(name2) == 1:
return name1[0] == name2[0]
return name1 == name2 | Evaluates whether names match, or one name is the initial of the other | ### Input:
Evaluates whether names match, or one name is the initial of the other
### Response:
def equate_initial(name1, name2):
if len(name1) == 0 or len(name2) == 0:
return False
if len(name1) == 1 or len(name2) == 1:
return name1[0] == name2[0]
return name1 == name2 |
def _placeholders_recursif(nodelist, plist, blist):
from django.template.loader_tags import BlockNode
for node in nodelist:
if hasattr(node, ):
_placeholders_recursif(node.get_parent(dummy_context).nodelist,
plist, blist)
elif hasattr(node, ) and hasattr(node.template, ):
_placeholders_recursif(node.template.nodelist, plist, blist)
if hasattr(node, ) and hasattr(node, ) and \
hasattr(node, ) and hasattr(node, ):
already_in_plist = False
for placeholder in plist:
if placeholder.name == node.name:
already_in_plist = True
if not already_in_plist:
if len(blist):
node.found_in_block = blist[len(blist) - 1]
plist.append(node)
node.render(Context())
for key in (, , ):
if isinstance(node, BlockNode):
offset = 0
_plist = [(i, v) for i, v in enumerate(plist)]
for index, pl in _plist:
if pl.found_in_block and \
pl.found_in_block.name == node.name \
and pl.found_in_block != node:
del plist[index - offset]
offset += 1
blist.append(node)
if hasattr(node, key):
try:
_placeholders_recursif(getattr(node, key), plist, blist)
except:
pass
if isinstance(node, BlockNode):
blist.pop() | Recursively search into a template node list for PlaceholderNode
node. | ### Input:
Recursively search into a template node list for PlaceholderNode
node.
### Response:
def _placeholders_recursif(nodelist, plist, blist):
from django.template.loader_tags import BlockNode
for node in nodelist:
if hasattr(node, ):
_placeholders_recursif(node.get_parent(dummy_context).nodelist,
plist, blist)
elif hasattr(node, ) and hasattr(node.template, ):
_placeholders_recursif(node.template.nodelist, plist, blist)
if hasattr(node, ) and hasattr(node, ) and \
hasattr(node, ) and hasattr(node, ):
already_in_plist = False
for placeholder in plist:
if placeholder.name == node.name:
already_in_plist = True
if not already_in_plist:
if len(blist):
node.found_in_block = blist[len(blist) - 1]
plist.append(node)
node.render(Context())
for key in (, , ):
if isinstance(node, BlockNode):
offset = 0
_plist = [(i, v) for i, v in enumerate(plist)]
for index, pl in _plist:
if pl.found_in_block and \
pl.found_in_block.name == node.name \
and pl.found_in_block != node:
del plist[index - offset]
offset += 1
blist.append(node)
if hasattr(node, key):
try:
_placeholders_recursif(getattr(node, key), plist, blist)
except:
pass
if isinstance(node, BlockNode):
blist.pop() |
def seqids(args):
p = OptionParser(seqids.__doc__)
p.add_option("--pad0", default=0, help="How many zeros to pad")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
prefix, start, end = args
pad0 = opts.pad0
start, end = int(start), int(end)
step = 1 if start <= end else -1
print(",".join(["{}{:0{}d}".format(prefix, x, pad0) \
for x in xrange(start, end + step, step)])) | %prog seqids prefix start end
Make a list of seqids for graphics.karyotype. For example:
$ python -m jcvi.formats.base seqids chromosome_ 1 3
chromosome_1,chromosome_2,chromosome_3
$ python -m jcvi.formats.base seqids A 3 1 --pad0=2
A03,A02,A01 | ### Input:
%prog seqids prefix start end
Make a list of seqids for graphics.karyotype. For example:
$ python -m jcvi.formats.base seqids chromosome_ 1 3
chromosome_1,chromosome_2,chromosome_3
$ python -m jcvi.formats.base seqids A 3 1 --pad0=2
A03,A02,A01
### Response:
def seqids(args):
p = OptionParser(seqids.__doc__)
p.add_option("--pad0", default=0, help="How many zeros to pad")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
prefix, start, end = args
pad0 = opts.pad0
start, end = int(start), int(end)
step = 1 if start <= end else -1
print(",".join(["{}{:0{}d}".format(prefix, x, pad0) \
for x in xrange(start, end + step, step)])) |
def get_default_backend_config(appdirs):
return {
: ,
: datetime.time(5, 30, 0),
: 1,
: os.path.join(appdirs.user_data_dir, .format(appdirs.appname)),
: ,
: os.path.join(appdirs.user_data_dir, .format(appdirs.appname)),
} | Return a default config dictionary.
Args:
appdirs (HamsterAppDirs): ``HamsterAppDirs`` instance encapsulating the apps details.
Returns:
dict: Dictionary with a default configuration.
Note:
Those defaults are independent of the particular config-store. | ### Input:
Return a default config dictionary.
Args:
appdirs (HamsterAppDirs): ``HamsterAppDirs`` instance encapsulating the apps details.
Returns:
dict: Dictionary with a default configuration.
Note:
Those defaults are independent of the particular config-store.
### Response:
def get_default_backend_config(appdirs):
return {
: ,
: datetime.time(5, 30, 0),
: 1,
: os.path.join(appdirs.user_data_dir, .format(appdirs.appname)),
: ,
: os.path.join(appdirs.user_data_dir, .format(appdirs.appname)),
} |
def decorate(self, record):
attachments = {}
if record.levelno >= logging.ERROR:
attachments[] =
if record.levelno >= logging.CRITICAL:
attachments[] =
attach_text = .format(
levelname=record.levelname,
name=record.name,
module=record.module,
funcName=record.funcName,
lineno=record.lineno
)
attachments[] = attach_text
attachments[] = attach_text
return attachments | add slack-specific flourishes to responses
https://api.slack.com/docs/message-attachments
Args:
record (:obj:`logging.record`): message to log
Returns:
(:obj:`dict`): attachments object for reporting | ### Input:
add slack-specific flourishes to responses
https://api.slack.com/docs/message-attachments
Args:
record (:obj:`logging.record`): message to log
Returns:
(:obj:`dict`): attachments object for reporting
### Response:
def decorate(self, record):
attachments = {}
if record.levelno >= logging.ERROR:
attachments[] =
if record.levelno >= logging.CRITICAL:
attachments[] =
attach_text = .format(
levelname=record.levelname,
name=record.name,
module=record.module,
funcName=record.funcName,
lineno=record.lineno
)
attachments[] = attach_text
attachments[] = attach_text
return attachments |
def find_descriptor(self, uuid):
for desc in self.list_descriptors():
if desc.uuid == uuid:
return desc
return None | Return the first child descriptor found that has the specified
UUID. Will return None if no descriptor that matches is found. | ### Input:
Return the first child descriptor found that has the specified
UUID. Will return None if no descriptor that matches is found.
### Response:
def find_descriptor(self, uuid):
for desc in self.list_descriptors():
if desc.uuid == uuid:
return desc
return None |
def GetSoapXMLForComplexType(self, type_name, value):
schema = self.suds_client.wsdl.schema
definition_type = schema.elements[(type_name, self._namespace_override)]
marshaller = suds.mx.literal.Literal(schema)
content = suds.mx.Content(
tag=type_name, value=value,
name=type_name, type=definition_type)
data = marshaller.process(content)
return data | Return an XML string representing a SOAP complex type.
Args:
type_name: The name of the type with namespace prefix if necessary.
value: A python dictionary to hydrate the type instance with.
Returns:
A string containing the SOAP XML for the type. | ### Input:
Return an XML string representing a SOAP complex type.
Args:
type_name: The name of the type with namespace prefix if necessary.
value: A python dictionary to hydrate the type instance with.
Returns:
A string containing the SOAP XML for the type.
### Response:
def GetSoapXMLForComplexType(self, type_name, value):
schema = self.suds_client.wsdl.schema
definition_type = schema.elements[(type_name, self._namespace_override)]
marshaller = suds.mx.literal.Literal(schema)
content = suds.mx.Content(
tag=type_name, value=value,
name=type_name, type=definition_type)
data = marshaller.process(content)
return data |
def exclude(prop):
t replicate property that is normally replicated: ordering column,
many-to-one relation that is marked for replication from other side.'
if isinstance(prop, QueryableAttribute):
prop = prop.property
assert isinstance(prop, (Column, ColumnProperty, RelationshipProperty))
_excluded.add(prop)
if isinstance(prop, RelationshipProperty):
for local in prop.local_columns:
_excluded.add(local) | Don't replicate property that is normally replicated: ordering column,
many-to-one relation that is marked for replication from other side. | ### Input:
Don't replicate property that is normally replicated: ordering column,
many-to-one relation that is marked for replication from other side.
### Response:
def exclude(prop):
t replicate property that is normally replicated: ordering column,
many-to-one relation that is marked for replication from other side.'
if isinstance(prop, QueryableAttribute):
prop = prop.property
assert isinstance(prop, (Column, ColumnProperty, RelationshipProperty))
_excluded.add(prop)
if isinstance(prop, RelationshipProperty):
for local in prop.local_columns:
_excluded.add(local) |
def _get_unique_variable_name(vname, variables):
count = 2
vname_base = vname
while vname in variables:
vname = .format(vname_base, count)
count += 1
return vname | Get a unique variable name.
Args:
vname(str): A candidate name.
variable(OrderedDict of str and Variable)
Returns: str
A unique variable name | ### Input:
Get a unique variable name.
Args:
vname(str): A candidate name.
variable(OrderedDict of str and Variable)
Returns: str
A unique variable name
### Response:
def _get_unique_variable_name(vname, variables):
count = 2
vname_base = vname
while vname in variables:
vname = .format(vname_base, count)
count += 1
return vname |
def write_molpro(basis):
basis = manip.uncontract_spdf(basis, 0, True)
basis = manip.make_general(basis, False)
basis = sort.sort_basis(basis, True)
s =
electron_elements = [k for k, v in basis[].items() if in v]
ecp_elements = [k for k, v in basis[].items() if in v]
if len(electron_elements) > 0:
s +=
for z in electron_elements:
data = basis[][z]
sym = lut.element_sym_from_Z(z).upper()
s +=
s += .format(lut.element_name_from_Z(z), misc.contraction_string(data))
for shell in data[]:
exponents = shell[]
coefficients = shell[]
am = shell[]
amchar = lut.amint_to_char(am).lower()
s += .format(amchar, sym, .join(exponents))
for c in coefficients:
first, last = find_range(c)
s += .format(first + 1, last + 1, .join(c[first:last + 1]))
s +=
if len(ecp_elements) > 0:
s +=
for z in ecp_elements:
data = basis[][z]
sym = lut.element_sym_from_Z(z).lower()
max_ecp_am = max([x[][0] for x in data[]])
ecp_list = sorted(data[], key=lambda x: x[])
ecp_list.insert(0, ecp_list.pop())
s += .format(sym, data[], max_ecp_am)
for pot in ecp_list:
rexponents = pot[]
gexponents = pot[]
coefficients = pot[]
am = pot[]
amchar = lut.amint_to_char(am).lower()
s += .format(len(rexponents))
if am[0] == max_ecp_am:
s +=
else:
s += .format(amchar)
for p in range(len(rexponents)):
s += .format(rexponents[p], gexponents[p], coefficients[0][p])
return s | Converts a basis set to Molpro format | ### Input:
Converts a basis set to Molpro format
### Response:
def write_molpro(basis):
basis = manip.uncontract_spdf(basis, 0, True)
basis = manip.make_general(basis, False)
basis = sort.sort_basis(basis, True)
s =
electron_elements = [k for k, v in basis[].items() if in v]
ecp_elements = [k for k, v in basis[].items() if in v]
if len(electron_elements) > 0:
s +=
for z in electron_elements:
data = basis[][z]
sym = lut.element_sym_from_Z(z).upper()
s +=
s += .format(lut.element_name_from_Z(z), misc.contraction_string(data))
for shell in data[]:
exponents = shell[]
coefficients = shell[]
am = shell[]
amchar = lut.amint_to_char(am).lower()
s += .format(amchar, sym, .join(exponents))
for c in coefficients:
first, last = find_range(c)
s += .format(first + 1, last + 1, .join(c[first:last + 1]))
s +=
if len(ecp_elements) > 0:
s +=
for z in ecp_elements:
data = basis[][z]
sym = lut.element_sym_from_Z(z).lower()
max_ecp_am = max([x[][0] for x in data[]])
ecp_list = sorted(data[], key=lambda x: x[])
ecp_list.insert(0, ecp_list.pop())
s += .format(sym, data[], max_ecp_am)
for pot in ecp_list:
rexponents = pot[]
gexponents = pot[]
coefficients = pot[]
am = pot[]
amchar = lut.amint_to_char(am).lower()
s += .format(len(rexponents))
if am[0] == max_ecp_am:
s +=
else:
s += .format(amchar)
for p in range(len(rexponents)):
s += .format(rexponents[p], gexponents[p], coefficients[0][p])
return s |
def get_table(table_name):
table = get_raw_table(table_name)
if isinstance(table, TableFuncWrapper):
table = table()
return table | Get a registered table.
Decorated functions will be converted to `DataFrameWrapper`.
Parameters
----------
table_name : str
Returns
-------
table : `DataFrameWrapper` | ### Input:
Get a registered table.
Decorated functions will be converted to `DataFrameWrapper`.
Parameters
----------
table_name : str
Returns
-------
table : `DataFrameWrapper`
### Response:
def get_table(table_name):
table = get_raw_table(table_name)
if isinstance(table, TableFuncWrapper):
table = table()
return table |
def update_event_hub(self, hub_name, hub=None):
_validate_not_none(, hub_name)
request = HTTPRequest()
request.method =
request.host = self._get_host()
request.path = + _str(hub_name) +
request.body = _get_request_body(_convert_event_hub_to_xml(hub))
request.path, request.query = self._httpclient._update_request_uri_query(request)
request.headers.append((, ))
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_event_hub(response) | Updates an Event Hub.
hub_name:
Name of event hub.
hub:
Optional. Event hub properties. Instance of EventHub class.
hub.message_retention_in_days:
Number of days to retain the events for this Event Hub. | ### Input:
Updates an Event Hub.
hub_name:
Name of event hub.
hub:
Optional. Event hub properties. Instance of EventHub class.
hub.message_retention_in_days:
Number of days to retain the events for this Event Hub.
### Response:
def update_event_hub(self, hub_name, hub=None):
_validate_not_none(, hub_name)
request = HTTPRequest()
request.method =
request.host = self._get_host()
request.path = + _str(hub_name) +
request.body = _get_request_body(_convert_event_hub_to_xml(hub))
request.path, request.query = self._httpclient._update_request_uri_query(request)
request.headers.append((, ))
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_event_hub(response) |
def t_newline(self, t):
r
t.lexer.lineno += len(t.value)
self.latest_newline = t.lexpos | r'\n+ | ### Input:
r'\n+
### Response:
def t_newline(self, t):
r
t.lexer.lineno += len(t.value)
self.latest_newline = t.lexpos |
def attach_process_classic(self, command_or_pid_path, background, control=False, for_legion=False):
prefix = if for_legion else
if in command_or_pid_path:
if background:
self._set(prefix + , command_or_pid_path, multi=True)
else:
self._set(prefix + , command_or_pid_path, multi=True)
else:
if background:
raise ConfigurationError()
if control:
self._set(, command_or_pid_path, multi=True)
else:
self._set(prefix + , command_or_pid_path, multi=True)
return self._section | Attaches a command/daemon to the master process optionally managed by a pidfile.
This will allow the uWSGI master to control/monitor/respawn this process.
.. note:: This uses old classic uWSGI means of process attaching
To have more control use ``.attach_process()`` method (requires uWSGI 2.0+)
http://uwsgi-docs.readthedocs.io/en/latest/AttachingDaemons.html
:param str|unicode command_or_pid_path:
:param bool background: Must indicate whether process is in background.
:param bool control: Consider this process a control: when the daemon dies, the master exits.
.. note:: pidfile managed processed not supported.
:param bool for_legion: Legion daemons will be executed only on the legion lord node,
so there will always be a single daemon instance running in each legion.
Once the lord dies a daemon will be spawned on another node.
.. note:: uWSGI 1.9.9+ required. | ### Input:
Attaches a command/daemon to the master process optionally managed by a pidfile.
This will allow the uWSGI master to control/monitor/respawn this process.
.. note:: This uses old classic uWSGI means of process attaching
To have more control use ``.attach_process()`` method (requires uWSGI 2.0+)
http://uwsgi-docs.readthedocs.io/en/latest/AttachingDaemons.html
:param str|unicode command_or_pid_path:
:param bool background: Must indicate whether process is in background.
:param bool control: Consider this process a control: when the daemon dies, the master exits.
.. note:: pidfile managed processed not supported.
:param bool for_legion: Legion daemons will be executed only on the legion lord node,
so there will always be a single daemon instance running in each legion.
Once the lord dies a daemon will be spawned on another node.
.. note:: uWSGI 1.9.9+ required.
### Response:
def attach_process_classic(self, command_or_pid_path, background, control=False, for_legion=False):
prefix = if for_legion else
if in command_or_pid_path:
if background:
self._set(prefix + , command_or_pid_path, multi=True)
else:
self._set(prefix + , command_or_pid_path, multi=True)
else:
if background:
raise ConfigurationError()
if control:
self._set(, command_or_pid_path, multi=True)
else:
self._set(prefix + , command_or_pid_path, multi=True)
return self._section |
def set_recent_config(self, max_samples=0):
evt = self._client._request_point_recent_config(self._type, self.lid, self.pid, max_samples)
self._client._wait_and_except_if_failed(evt)
return evt.payload | Update/configure recent data settings for this Feed. If the container does not support recent storage or it
is not enabled for this owner, this function will have no effect.
`max_samples` (optional) (int) how many shares to store for later retrieval. If not supported by container, this
argument will be ignored. A value of zero disables this feature whilst a negative value requests the maximum
sample store amount.
Returns QAPI recent config function payload
#!python
{
"maxSamples": 0
}
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure | ### Input:
Update/configure recent data settings for this Feed. If the container does not support recent storage or it
is not enabled for this owner, this function will have no effect.
`max_samples` (optional) (int) how many shares to store for later retrieval. If not supported by container, this
argument will be ignored. A value of zero disables this feature whilst a negative value requests the maximum
sample store amount.
Returns QAPI recent config function payload
#!python
{
"maxSamples": 0
}
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
### Response:
def set_recent_config(self, max_samples=0):
evt = self._client._request_point_recent_config(self._type, self.lid, self.pid, max_samples)
self._client._wait_and_except_if_failed(evt)
return evt.payload |
def RunJob(self, job):
if not job.leased_until:
raise LockError("CronJob must be leased for Run() to be called.")
if job.leased_until < rdfvalue.RDFDatetime.Now():
raise LockError("CronJob lease expired for %s." % job.cron_job_id)
logging.info("Starting cron job: %s", job.cron_job_id)
if job.args.action_type == job.args.ActionType.SYSTEM_CRON_ACTION:
cls_name = job.args.system_cron_action.job_class_name
job_cls = registry.SystemCronJobRegistry.CronJobClassByName(cls_name)
name = "%s runner" % cls_name
elif job.args.action_type == job.args.ActionType.HUNT_CRON_ACTION:
job_cls = registry.CronJobRegistry.CronJobClassByName("RunHunt")
name = "Hunt runner"
else:
raise ValueError(
"CronJob %s doesnt put more things on the queue by returning
return False
signal_event.set()
wait_for_write_event.wait(TASK_STARTUP_WAIT)
return True
except threadpool.Full:
return False | Does the actual work of the Cron, if the job is due to run.
Args:
job: The cronjob rdfvalue that should be run. Must be leased.
Returns:
A boolean indicating if this cron job was started or not. False may
be returned when the threadpool is already full.
Raises:
LockError: if the object is not locked.
ValueError: If the job argument is invalid. | ### Input:
Does the actual work of the Cron, if the job is due to run.
Args:
job: The cronjob rdfvalue that should be run. Must be leased.
Returns:
A boolean indicating if this cron job was started or not. False may
be returned when the threadpool is already full.
Raises:
LockError: if the object is not locked.
ValueError: If the job argument is invalid.
### Response:
def RunJob(self, job):
if not job.leased_until:
raise LockError("CronJob must be leased for Run() to be called.")
if job.leased_until < rdfvalue.RDFDatetime.Now():
raise LockError("CronJob lease expired for %s." % job.cron_job_id)
logging.info("Starting cron job: %s", job.cron_job_id)
if job.args.action_type == job.args.ActionType.SYSTEM_CRON_ACTION:
cls_name = job.args.system_cron_action.job_class_name
job_cls = registry.SystemCronJobRegistry.CronJobClassByName(cls_name)
name = "%s runner" % cls_name
elif job.args.action_type == job.args.ActionType.HUNT_CRON_ACTION:
job_cls = registry.CronJobRegistry.CronJobClassByName("RunHunt")
name = "Hunt runner"
else:
raise ValueError(
"CronJob %s doesnt put more things on the queue by returning
return False
signal_event.set()
wait_for_write_event.wait(TASK_STARTUP_WAIT)
return True
except threadpool.Full:
return False |
def _IncludeFields(encoded_message, message, include_fields):
if include_fields is None:
return encoded_message
result = json.loads(encoded_message)
for field_name in include_fields:
try:
value = _GetField(message, field_name.split())
nullvalue = None
if isinstance(value, list):
nullvalue = []
except KeyError:
raise exceptions.InvalidDataError(
% (
field_name, type(message)))
_SetField(result, field_name.split(), nullvalue)
return json.dumps(result) | Add the requested fields to the encoded message. | ### Input:
Add the requested fields to the encoded message.
### Response:
def _IncludeFields(encoded_message, message, include_fields):
if include_fields is None:
return encoded_message
result = json.loads(encoded_message)
for field_name in include_fields:
try:
value = _GetField(message, field_name.split())
nullvalue = None
if isinstance(value, list):
nullvalue = []
except KeyError:
raise exceptions.InvalidDataError(
% (
field_name, type(message)))
_SetField(result, field_name.split(), nullvalue)
return json.dumps(result) |
def url_value_preprocessor(self, fn):
self._defer(lambda app: app.url_value_preprocessor(fn))
return fn | Register a URL value preprocessor function for all view
functions in the application. These functions will be called before the
:meth:`before_request` functions.
The function can modify the values captured from the matched url before
they are passed to the view. For example, this can be used to pop a
common language code value and place it in ``g`` rather than pass it to
every view.
The function is passed the endpoint name and values dict. The return
value is ignored. | ### Input:
Register a URL value preprocessor function for all view
functions in the application. These functions will be called before the
:meth:`before_request` functions.
The function can modify the values captured from the matched url before
they are passed to the view. For example, this can be used to pop a
common language code value and place it in ``g`` rather than pass it to
every view.
The function is passed the endpoint name and values dict. The return
value is ignored.
### Response:
def url_value_preprocessor(self, fn):
self._defer(lambda app: app.url_value_preprocessor(fn))
return fn |
def moveTo(self, vector):
self._position = vector
if self.isPenDown():
self._pointsOfPolyline.append(self._position) | Moves the turtle to the new position. Orientation is kept as it is.
If the pen is lowered it will also add to the currently drawn polyline. | ### Input:
Moves the turtle to the new position. Orientation is kept as it is.
If the pen is lowered it will also add to the currently drawn polyline.
### Response:
def moveTo(self, vector):
self._position = vector
if self.isPenDown():
self._pointsOfPolyline.append(self._position) |
def transform_annotation(self, ann, duration):
pitchrootbasss pitch class at each frame.
`bass` is a one-hot matrix indicating the chord
bass (lowest note) pitch class at each frame.
If sparsely encoded, `root` and `bass` are integers
in the range [0, 12] where 12 indicates no chord.
If densely encoded, `root` and `bass` have an extra
final dimension which is active when there is no chord
sounding.
t have any labeled intervals, fill in a no-chord
if not chords:
intervals = np.asarray([[0, duration]])
chords = []
pitches = []
roots = []
basses = []
if self.sparse:
fill = 12
else:
fill = False
for chord in chords:
root, semi, bass = mir_eval.chord.encode(chord)
pitches.append(np.roll(semi, root))
if self.sparse:
if root in self._classes:
roots.append([root])
basses.append([(root + bass) % 12])
else:
roots.append([fill])
basses.append([fill])
else:
if root in self._classes:
roots.extend(self.encoder.transform([[root]]))
basses.extend(self.encoder.transform([[(root + bass) % 12]]))
else:
roots.extend(self.encoder.transform([[]]))
basses.extend(self.encoder.transform([[]]))
pitches = np.asarray(pitches, dtype=np.bool)
roots = np.asarray(roots, dtype=dtype)
basses = np.asarray(basses, dtype=dtype)
target_pitch = self.encode_intervals(duration, intervals, pitches)
target_root = self.encode_intervals(duration, intervals, roots,
multi=False,
dtype=dtype,
fill=fill)
target_bass = self.encode_intervals(duration, intervals, basses,
multi=False,
dtype=dtype,
fill=fill)
if not self.sparse:
target_root = _pad_nochord(target_root)
target_bass = _pad_nochord(target_bass)
return {: target_pitch,
: target_root,
: target_bass} | Apply the chord transformation.
Parameters
----------
ann : jams.Annotation
The chord annotation
duration : number > 0
The target duration
Returns
-------
data : dict
data['pitch'] : np.ndarray, shape=(n, 12)
data['root'] : np.ndarray, shape=(n, 13) or (n, 1)
data['bass'] : np.ndarray, shape=(n, 13) or (n, 1)
`pitch` is a binary matrix indicating pitch class
activation at each frame.
`root` is a one-hot matrix indicating the chord
root's pitch class at each frame.
`bass` is a one-hot matrix indicating the chord
bass (lowest note) pitch class at each frame.
If sparsely encoded, `root` and `bass` are integers
in the range [0, 12] where 12 indicates no chord.
If densely encoded, `root` and `bass` have an extra
final dimension which is active when there is no chord
sounding. | ### Input:
Apply the chord transformation.
Parameters
----------
ann : jams.Annotation
The chord annotation
duration : number > 0
The target duration
Returns
-------
data : dict
data['pitch'] : np.ndarray, shape=(n, 12)
data['root'] : np.ndarray, shape=(n, 13) or (n, 1)
data['bass'] : np.ndarray, shape=(n, 13) or (n, 1)
`pitch` is a binary matrix indicating pitch class
activation at each frame.
`root` is a one-hot matrix indicating the chord
root's pitch class at each frame.
`bass` is a one-hot matrix indicating the chord
bass (lowest note) pitch class at each frame.
If sparsely encoded, `root` and `bass` are integers
in the range [0, 12] where 12 indicates no chord.
If densely encoded, `root` and `bass` have an extra
final dimension which is active when there is no chord
sounding.
### Response:
def transform_annotation(self, ann, duration):
pitchrootbasss pitch class at each frame.
`bass` is a one-hot matrix indicating the chord
bass (lowest note) pitch class at each frame.
If sparsely encoded, `root` and `bass` are integers
in the range [0, 12] where 12 indicates no chord.
If densely encoded, `root` and `bass` have an extra
final dimension which is active when there is no chord
sounding.
t have any labeled intervals, fill in a no-chord
if not chords:
intervals = np.asarray([[0, duration]])
chords = []
pitches = []
roots = []
basses = []
if self.sparse:
fill = 12
else:
fill = False
for chord in chords:
root, semi, bass = mir_eval.chord.encode(chord)
pitches.append(np.roll(semi, root))
if self.sparse:
if root in self._classes:
roots.append([root])
basses.append([(root + bass) % 12])
else:
roots.append([fill])
basses.append([fill])
else:
if root in self._classes:
roots.extend(self.encoder.transform([[root]]))
basses.extend(self.encoder.transform([[(root + bass) % 12]]))
else:
roots.extend(self.encoder.transform([[]]))
basses.extend(self.encoder.transform([[]]))
pitches = np.asarray(pitches, dtype=np.bool)
roots = np.asarray(roots, dtype=dtype)
basses = np.asarray(basses, dtype=dtype)
target_pitch = self.encode_intervals(duration, intervals, pitches)
target_root = self.encode_intervals(duration, intervals, roots,
multi=False,
dtype=dtype,
fill=fill)
target_bass = self.encode_intervals(duration, intervals, basses,
multi=False,
dtype=dtype,
fill=fill)
if not self.sparse:
target_root = _pad_nochord(target_root)
target_bass = _pad_nochord(target_bass)
return {: target_pitch,
: target_root,
: target_bass} |
def computeHWE(prefix, threshold, outPrefix):
plinkCommand = ["plink", "--noweb", "--bfile", prefix, "--hwe", threshold,
"--make-bed", "--out", outPrefix]
runCommand(plinkCommand) | Compute the Hardy Weinberg test using Plink.
:param prefix: the prefix of all the files.
:param threshold: the Hardy Weinberg threshold.
:param outPrefix: the prefix of the output file.
:type prefix: str
:type threshold: str
:type outPrefix: str
Uses Plink to exclude markers that failed the Hardy-Weinberg test at a
specified significance threshold. | ### Input:
Compute the Hardy Weinberg test using Plink.
:param prefix: the prefix of all the files.
:param threshold: the Hardy Weinberg threshold.
:param outPrefix: the prefix of the output file.
:type prefix: str
:type threshold: str
:type outPrefix: str
Uses Plink to exclude markers that failed the Hardy-Weinberg test at a
specified significance threshold.
### Response:
def computeHWE(prefix, threshold, outPrefix):
plinkCommand = ["plink", "--noweb", "--bfile", prefix, "--hwe", threshold,
"--make-bed", "--out", outPrefix]
runCommand(plinkCommand) |
def update( self, jump ):
atom = jump.initial_site.atom
dr = jump.dr( self.cell_lengths )
jump.final_site.occupation = atom.number
jump.final_site.atom = atom
jump.final_site.is_occupied = True
jump.initial_site.occupation = 0
jump.initial_site.atom = None
jump.initial_site.is_occupied = False
atom.site = jump.final_site
atom.number_of_hops += 1
atom.dr += dr
atom.summed_dr2 += np.dot( dr, dr ) | Update the lattice state by accepting a specific jump
Args:
jump (Jump): The jump that has been accepted.
Returns:
None. | ### Input:
Update the lattice state by accepting a specific jump
Args:
jump (Jump): The jump that has been accepted.
Returns:
None.
### Response:
def update( self, jump ):
atom = jump.initial_site.atom
dr = jump.dr( self.cell_lengths )
jump.final_site.occupation = atom.number
jump.final_site.atom = atom
jump.final_site.is_occupied = True
jump.initial_site.occupation = 0
jump.initial_site.atom = None
jump.initial_site.is_occupied = False
atom.site = jump.final_site
atom.number_of_hops += 1
atom.dr += dr
atom.summed_dr2 += np.dot( dr, dr ) |
def user_exists(name, database=None, user=None, password=None, host=None, port=None):
***
users = user_list(database, user, password, host, port)
if not isinstance(users, list):
return False
for user in users:
username = user.get(, user.get())
if username:
if username == name:
return True
else:
log.warning(, user)
return False | Checks if a cluster admin or database user exists.
If a database is specified: it will check for database user existence.
If a database is not specified: it will check for cluster admin existence.
name
User name
database
The database to check for the user to exist
user
The user to connect as
password
The password of the user
host
The host to connect to
port
The port to connect to
CLI Example:
.. code-block:: bash
salt '*' influxdb08.user_exists <name>
salt '*' influxdb08.user_exists <name> <database>
salt '*' influxdb08.user_exists <name> <database> <user> <password> <host> <port> | ### Input:
Checks if a cluster admin or database user exists.
If a database is specified: it will check for database user existence.
If a database is not specified: it will check for cluster admin existence.
name
User name
database
The database to check for the user to exist
user
The user to connect as
password
The password of the user
host
The host to connect to
port
The port to connect to
CLI Example:
.. code-block:: bash
salt '*' influxdb08.user_exists <name>
salt '*' influxdb08.user_exists <name> <database>
salt '*' influxdb08.user_exists <name> <database> <user> <password> <host> <port>
### Response:
def user_exists(name, database=None, user=None, password=None, host=None, port=None):
***
users = user_list(database, user, password, host, port)
if not isinstance(users, list):
return False
for user in users:
username = user.get(, user.get())
if username:
if username == name:
return True
else:
log.warning(, user)
return False |
def execute_and_reset(
expr, params=None, scope=None, aggcontext=None, **kwargs
):
result = execute(
expr, params=params, scope=scope, aggcontext=aggcontext, **kwargs
)
if isinstance(result, pd.DataFrame):
schema = expr.schema()
df = result.reset_index()
return df.loc[:, schema.names]
elif isinstance(result, pd.Series):
return result.reset_index(drop=True)
return result | Execute an expression against data that are bound to it. If no data
are bound, raise an Exception.
Notes
-----
The difference between this function and :func:`~ibis.pandas.core.execute`
is that this function resets the index of the result, if the result has
an index.
Parameters
----------
expr : ibis.expr.types.Expr
The expression to execute
params : Mapping[ibis.expr.types.Expr, object]
The data that an unbound parameter in `expr` maps to
scope : Mapping[ibis.expr.operations.Node, object]
Additional scope, mapping ibis operations to data
aggcontext : Optional[ibis.pandas.aggcontext.AggregationContext]
An object indicating how to compute aggregations. For example,
a rolling mean needs to be computed differently than the mean of a
column.
kwargs : Dict[str, object]
Additional arguments that can potentially be used by individual node
execution
Returns
-------
result : Union[
pandas.Series, pandas.DataFrame, ibis.pandas.core.simple_types
]
Raises
------
ValueError
* If no data are bound to the input expression | ### Input:
Execute an expression against data that are bound to it. If no data
are bound, raise an Exception.
Notes
-----
The difference between this function and :func:`~ibis.pandas.core.execute`
is that this function resets the index of the result, if the result has
an index.
Parameters
----------
expr : ibis.expr.types.Expr
The expression to execute
params : Mapping[ibis.expr.types.Expr, object]
The data that an unbound parameter in `expr` maps to
scope : Mapping[ibis.expr.operations.Node, object]
Additional scope, mapping ibis operations to data
aggcontext : Optional[ibis.pandas.aggcontext.AggregationContext]
An object indicating how to compute aggregations. For example,
a rolling mean needs to be computed differently than the mean of a
column.
kwargs : Dict[str, object]
Additional arguments that can potentially be used by individual node
execution
Returns
-------
result : Union[
pandas.Series, pandas.DataFrame, ibis.pandas.core.simple_types
]
Raises
------
ValueError
* If no data are bound to the input expression
### Response:
def execute_and_reset(
expr, params=None, scope=None, aggcontext=None, **kwargs
):
result = execute(
expr, params=params, scope=scope, aggcontext=aggcontext, **kwargs
)
if isinstance(result, pd.DataFrame):
schema = expr.schema()
df = result.reset_index()
return df.loc[:, schema.names]
elif isinstance(result, pd.Series):
return result.reset_index(drop=True)
return result |
def plot_conv_weights(layer, figsize=(6, 6)):
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis()
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap=,
interpolation=)
return plt | Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer | ### Input:
Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer
### Response:
def plot_conv_weights(layer, figsize=(6, 6)):
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis()
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap=,
interpolation=)
return plt |
def convert_msg(self, msg):
source = msg.msgid
if not source:
}
msg.msgstr_plural = plural
else:
foreign = self.convert(source)
msg.msgstr = self.final_newline(source, foreign) | Takes one POEntry object and converts it (adds a dummy translation to it)
msg is an instance of polib.POEntry | ### Input:
Takes one POEntry object and converts it (adds a dummy translation to it)
msg is an instance of polib.POEntry
### Response:
def convert_msg(self, msg):
source = msg.msgid
if not source:
}
msg.msgstr_plural = plural
else:
foreign = self.convert(source)
msg.msgstr = self.final_newline(source, foreign) |
def prepare(query, params):
def repl(match):
name = match.group(1)[1:]
if name in params:
return marshal(params[name])
return ":%s" % name
new, count = re.subn(_param_re, repl, query)
if len(params) > count:
raise cql.ProgrammingError("More keywords were provided "
"than parameters")
return new | For every match of the form ":param_name", call marshal
on kwargs['param_name'] and replace that section of the query
with the result | ### Input:
For every match of the form ":param_name", call marshal
on kwargs['param_name'] and replace that section of the query
with the result
### Response:
def prepare(query, params):
def repl(match):
name = match.group(1)[1:]
if name in params:
return marshal(params[name])
return ":%s" % name
new, count = re.subn(_param_re, repl, query)
if len(params) > count:
raise cql.ProgrammingError("More keywords were provided "
"than parameters")
return new |
def end_datetime(self) -> Optional[datetime.datetime]:
if not self.intervals:
return None
return max([x.end for x in self.intervals]) | Returns the end date of the set of intervals, or ``None`` if empty. | ### Input:
Returns the end date of the set of intervals, or ``None`` if empty.
### Response:
def end_datetime(self) -> Optional[datetime.datetime]:
if not self.intervals:
return None
return max([x.end for x in self.intervals]) |
def _compress(self):
rank = 0.0
current = self._head
while current and current._successor:
if current._rank + current._successor._rank + current._successor._delta <= self._invariant(rank, self._observations):
removed = current._successor
current._value = removed._value
current._rank += removed._rank
current._delta = removed._delta
current._successor = removed._successor
rank += current._rank
current = current._successor | Prunes the cataloged observations. | ### Input:
Prunes the cataloged observations.
### Response:
def _compress(self):
rank = 0.0
current = self._head
while current and current._successor:
if current._rank + current._successor._rank + current._successor._delta <= self._invariant(rank, self._observations):
removed = current._successor
current._value = removed._value
current._rank += removed._rank
current._delta = removed._delta
current._successor = removed._successor
rank += current._rank
current = current._successor |
def get_base_addr(self, addr):
base_addr, container = self._get_container(addr)
if container is None:
return None
else:
return base_addr | Get the base offset (the key we are using to index objects covering the given offset) of a specific offset.
:param int addr:
:return:
:rtype: int or None | ### Input:
Get the base offset (the key we are using to index objects covering the given offset) of a specific offset.
:param int addr:
:return:
:rtype: int or None
### Response:
def get_base_addr(self, addr):
base_addr, container = self._get_container(addr)
if container is None:
return None
else:
return base_addr |
def __get_activity_by_name(self, name, category_id = None, resurrect = True):
if category_id:
query =
res = self.fetchone(query, (self._unsorted_localized, name, category_id))
else:
query =
res = self.fetchone(query, (self._unsorted_localized, name, ))
if res:
keys = (, , , )
res = dict([(key, res[key]) for key in keys])
res[] = res[] or False
if res[] and resurrect:
update =
self.execute(update, (res[], ))
return res
return None | get most recent, preferably not deleted activity by it's name | ### Input:
get most recent, preferably not deleted activity by it's name
### Response:
def __get_activity_by_name(self, name, category_id = None, resurrect = True):
if category_id:
query =
res = self.fetchone(query, (self._unsorted_localized, name, category_id))
else:
query =
res = self.fetchone(query, (self._unsorted_localized, name, ))
if res:
keys = (, , , )
res = dict([(key, res[key]) for key in keys])
res[] = res[] or False
if res[] and resurrect:
update =
self.execute(update, (res[], ))
return res
return None |
def download_object(self, instance, bucket_name, object_name):
url = .format(instance, bucket_name, object_name)
response = self._client.get_proto(path=url)
return response.content | Download an object.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
:param str object_name: The object to fetch. | ### Input:
Download an object.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
:param str object_name: The object to fetch.
### Response:
def download_object(self, instance, bucket_name, object_name):
url = .format(instance, bucket_name, object_name)
response = self._client.get_proto(path=url)
return response.content |
def supports_heading_type(self, heading_type=None):
from .osid_errors import IllegalState, NullArgument
if not heading_type:
raise NullArgument()
if self._kwargs[] not in []:
raise IllegalState()
return heading_type in self.get_heading_types | Tests if the given heading type is supported.
arg: heading_type (osid.type.Type): a heading Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``HEADING``
raise: NullArgument - ``heading_type`` is ``null``
*compliance: mandatory -- This method must be implemented.* | ### Input:
Tests if the given heading type is supported.
arg: heading_type (osid.type.Type): a heading Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``HEADING``
raise: NullArgument - ``heading_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def supports_heading_type(self, heading_type=None):
from .osid_errors import IllegalState, NullArgument
if not heading_type:
raise NullArgument()
if self._kwargs[] not in []:
raise IllegalState()
return heading_type in self.get_heading_types |
def _from_rest_ignore(model, props):
fields = model.all_fields
for prop in props.keys():
if prop not in fields:
del props[prop] | Purge fields that are completely unknown | ### Input:
Purge fields that are completely unknown
### Response:
def _from_rest_ignore(model, props):
fields = model.all_fields
for prop in props.keys():
if prop not in fields:
del props[prop] |
def bfgs_method(f, x, line_search=1.0, maxiter=1000, tol=1e-15, num_store=None,
hessinv_estimate=None, callback=None):
r
grad = f.gradient
if x not in grad.domain:
raise TypeError(
.format(x, grad.domain))
if not callable(line_search):
line_search = ConstantLineSearch(line_search)
ys = []
ss = []
grad_x = grad(x)
for i in range(maxiter):
search_dir = -_bfgs_direction(ss, ys, grad_x, hessinv_estimate)
dir_deriv = search_dir.inner(grad_x)
if np.abs(dir_deriv) == 0:
return
step = line_search(x, direction=search_dir, dir_derivative=dir_deriv)
x_update = search_dir
x_update *= step
x += x_update
grad_x, grad_diff = grad(x), grad_x
grad_diff.lincomb(-1, grad_diff, 1, grad_x)
y_inner_s = grad_diff.inner(x_update)
if np.abs(y_inner_s) < tol:
if grad_x.norm() < tol:
return
else:
ys = []
ss = []
continue
ys.append(grad_diff)
ss.append(x_update)
if num_store is not None:
ss = ss[-num_store:]
ys = ys[-num_store:]
if callback is not None:
callback(x) | r"""Quasi-Newton BFGS method to minimize a differentiable function.
Can use either the regular BFGS method, or the limited memory BFGS method.
Notes
-----
This is a general and optimized implementation of a quasi-Newton
method with BFGS update for solving a general unconstrained
optimization problem
.. math::
\min f(x)
for a differentiable function
:math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space
:math:`\mathcal{X}`. It does so by finding a zero of the gradient
.. math::
\nabla f: \mathcal{X} \to \mathcal{X}.
The QN method is an approximate Newton method, where the Hessian
is approximated and gradually updated in each step. This
implementation uses the rank-one BFGS update schema where the
inverse of the Hessian is recalculated in each iteration.
The algorithm is described in [GNS2009], Section 12.3 and in the
`BFGS Wikipedia article
<https://en.wikipedia.org/wiki/Broyden%E2%80%93Fletcher%E2%80%93\
Goldfarb%E2%80%93Shanno_algorithm>`_
Parameters
----------
f : `Functional`
Functional with ``f.gradient``.
x : ``f.domain`` element
Starting point of the iteration
line_search : float or `LineSearch`, optional
Strategy to choose the step length. If a float is given, uses it as a
fixed step length.
maxiter : int, optional
Maximum number of iterations.
tol : float, optional
Tolerance that should be used for terminating the iteration.
num_store : int, optional
Maximum number of correction factors to store. For ``None``, the method
is the regular BFGS method. For an integer, the method becomes the
Limited Memory BFGS method.
hessinv_estimate : `Operator`, optional
Initial estimate of the inverse of the Hessian operator. Needs to be an
operator from ``f.domain`` to ``f.domain``.
Default: Identity on ``f.domain``
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
References
----------
[GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear
optimization*. Siam, 2009. | ### Input:
r"""Quasi-Newton BFGS method to minimize a differentiable function.
Can use either the regular BFGS method, or the limited memory BFGS method.
Notes
-----
This is a general and optimized implementation of a quasi-Newton
method with BFGS update for solving a general unconstrained
optimization problem
.. math::
\min f(x)
for a differentiable function
:math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space
:math:`\mathcal{X}`. It does so by finding a zero of the gradient
.. math::
\nabla f: \mathcal{X} \to \mathcal{X}.
The QN method is an approximate Newton method, where the Hessian
is approximated and gradually updated in each step. This
implementation uses the rank-one BFGS update schema where the
inverse of the Hessian is recalculated in each iteration.
The algorithm is described in [GNS2009], Section 12.3 and in the
`BFGS Wikipedia article
<https://en.wikipedia.org/wiki/Broyden%E2%80%93Fletcher%E2%80%93\
Goldfarb%E2%80%93Shanno_algorithm>`_
Parameters
----------
f : `Functional`
Functional with ``f.gradient``.
x : ``f.domain`` element
Starting point of the iteration
line_search : float or `LineSearch`, optional
Strategy to choose the step length. If a float is given, uses it as a
fixed step length.
maxiter : int, optional
Maximum number of iterations.
tol : float, optional
Tolerance that should be used for terminating the iteration.
num_store : int, optional
Maximum number of correction factors to store. For ``None``, the method
is the regular BFGS method. For an integer, the method becomes the
Limited Memory BFGS method.
hessinv_estimate : `Operator`, optional
Initial estimate of the inverse of the Hessian operator. Needs to be an
operator from ``f.domain`` to ``f.domain``.
Default: Identity on ``f.domain``
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
References
----------
[GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear
optimization*. Siam, 2009.
### Response:
def bfgs_method(f, x, line_search=1.0, maxiter=1000, tol=1e-15, num_store=None,
hessinv_estimate=None, callback=None):
r
grad = f.gradient
if x not in grad.domain:
raise TypeError(
.format(x, grad.domain))
if not callable(line_search):
line_search = ConstantLineSearch(line_search)
ys = []
ss = []
grad_x = grad(x)
for i in range(maxiter):
search_dir = -_bfgs_direction(ss, ys, grad_x, hessinv_estimate)
dir_deriv = search_dir.inner(grad_x)
if np.abs(dir_deriv) == 0:
return
step = line_search(x, direction=search_dir, dir_derivative=dir_deriv)
x_update = search_dir
x_update *= step
x += x_update
grad_x, grad_diff = grad(x), grad_x
grad_diff.lincomb(-1, grad_diff, 1, grad_x)
y_inner_s = grad_diff.inner(x_update)
if np.abs(y_inner_s) < tol:
if grad_x.norm() < tol:
return
else:
ys = []
ss = []
continue
ys.append(grad_diff)
ss.append(x_update)
if num_store is not None:
ss = ss[-num_store:]
ys = ys[-num_store:]
if callback is not None:
callback(x) |
def precision(y, z):
tp, tn, fp, fn = contingency_table(y, z)
return tp / (tp + fp) | Precision `tp / (tp + fp)` | ### Input:
Precision `tp / (tp + fp)`
### Response:
def precision(y, z):
tp, tn, fp, fn = contingency_table(y, z)
return tp / (tp + fp) |
def get_listening(self, listen=[]):
if listen == []:
return listen
value = []
for network in listen:
try:
ip = get_address_in_network(network=network, fatal=True)
except ValueError:
if is_ip(network):
ip = network
else:
try:
ip = get_iface_addr(iface=network, fatal=False)[0]
except IndexError:
continue
value.append(ip)
if value == []:
return []
return value | Returns a list of addresses SSH can list on
Turns input into a sensible list of IPs SSH can listen on. Input
must be a python list of interface names, IPs and/or CIDRs.
:param listen: list of IPs, CIDRs, interface names
:returns: list of IPs available on the host | ### Input:
Returns a list of addresses SSH can list on
Turns input into a sensible list of IPs SSH can listen on. Input
must be a python list of interface names, IPs and/or CIDRs.
:param listen: list of IPs, CIDRs, interface names
:returns: list of IPs available on the host
### Response:
def get_listening(self, listen=[]):
if listen == []:
return listen
value = []
for network in listen:
try:
ip = get_address_in_network(network=network, fatal=True)
except ValueError:
if is_ip(network):
ip = network
else:
try:
ip = get_iface_addr(iface=network, fatal=False)[0]
except IndexError:
continue
value.append(ip)
if value == []:
return []
return value |
async def starttls(
self,
server_hostname: str = None,
validate_certs: bool = None,
client_cert: DefaultStrType = _default,
client_key: DefaultStrType = _default,
cert_bundle: DefaultStrType = _default,
tls_context: DefaultSSLContextType = _default,
timeout: DefaultNumType = _default,
) -> SMTPResponse:
self._raise_error_if_disconnected()
await self._ehlo_or_helo_if_needed()
if validate_certs is not None:
self.validate_certs = validate_certs
if timeout is _default:
timeout = self.timeout
if client_cert is not _default:
self.client_cert = client_cert
if client_key is not _default:
self.client_key = client_key
if cert_bundle is not _default:
self.cert_bundle = cert_bundle
if tls_context is not _default:
self.tls_context = tls_context
if self.tls_context is not None and self.client_cert is not None:
raise ValueError(
"Either a TLS context or a certificate/key must be provided"
)
if server_hostname is None:
server_hostname = self.hostname
tls_context = self._get_tls_context()
if not self.supports_extension("starttls"):
raise SMTPException("SMTP STARTTLS extension not supported by server.")
async with self._command_lock:
try:
response, protocol = await self.protocol.starttls(
tls_context, server_hostname=server_hostname, timeout=timeout
)
except SMTPServerDisconnected:
self.close()
raise
self.transport = protocol._app_transport
self._reset_server_state()
return response | Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked (if
validate_certs is True). You can also provide a custom SSLContext
object. If no certs or SSLContext is given, and TLS config was
provided when initializing the class, STARTTLS will use to that,
otherwise it will use the Python defaults.
:raises SMTPException: server does not support STARTTLS
:raises SMTPServerDisconnected: connection lost
:raises ValueError: invalid options provided | ### Input:
Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked (if
validate_certs is True). You can also provide a custom SSLContext
object. If no certs or SSLContext is given, and TLS config was
provided when initializing the class, STARTTLS will use to that,
otherwise it will use the Python defaults.
:raises SMTPException: server does not support STARTTLS
:raises SMTPServerDisconnected: connection lost
:raises ValueError: invalid options provided
### Response:
async def starttls(
self,
server_hostname: str = None,
validate_certs: bool = None,
client_cert: DefaultStrType = _default,
client_key: DefaultStrType = _default,
cert_bundle: DefaultStrType = _default,
tls_context: DefaultSSLContextType = _default,
timeout: DefaultNumType = _default,
) -> SMTPResponse:
self._raise_error_if_disconnected()
await self._ehlo_or_helo_if_needed()
if validate_certs is not None:
self.validate_certs = validate_certs
if timeout is _default:
timeout = self.timeout
if client_cert is not _default:
self.client_cert = client_cert
if client_key is not _default:
self.client_key = client_key
if cert_bundle is not _default:
self.cert_bundle = cert_bundle
if tls_context is not _default:
self.tls_context = tls_context
if self.tls_context is not None and self.client_cert is not None:
raise ValueError(
"Either a TLS context or a certificate/key must be provided"
)
if server_hostname is None:
server_hostname = self.hostname
tls_context = self._get_tls_context()
if not self.supports_extension("starttls"):
raise SMTPException("SMTP STARTTLS extension not supported by server.")
async with self._command_lock:
try:
response, protocol = await self.protocol.starttls(
tls_context, server_hostname=server_hostname, timeout=timeout
)
except SMTPServerDisconnected:
self.close()
raise
self.transport = protocol._app_transport
self._reset_server_state()
return response |
def get_email_forwarding(netid):
subscriptions = get_netid_subscriptions(netid, Subscription.SUBS_CODE_U_FORWARDING)
for subscription in subscriptions:
if subscription.subscription_code == Subscription.SUBS_CODE_U_FORWARDING:
return_obj = UwEmailForwarding()
if subscription.data_value:
return_obj.fwd = subscription.data_value
return_obj.permitted = subscription.permitted
return_obj.status = subscription.status_name
return return_obj
return None | Return a restclients.models.uwnetid.UwEmailForwarding object
on the given uwnetid | ### Input:
Return a restclients.models.uwnetid.UwEmailForwarding object
on the given uwnetid
### Response:
def get_email_forwarding(netid):
subscriptions = get_netid_subscriptions(netid, Subscription.SUBS_CODE_U_FORWARDING)
for subscription in subscriptions:
if subscription.subscription_code == Subscription.SUBS_CODE_U_FORWARDING:
return_obj = UwEmailForwarding()
if subscription.data_value:
return_obj.fwd = subscription.data_value
return_obj.permitted = subscription.permitted
return_obj.status = subscription.status_name
return return_obj
return None |
def _parse_multi_byte(self, s):
assert(len(s) >= 2)
tmp_len = len(s)
value = 0
i = 1
byte = orb(s[i])
max_value = 1 << 64
while byte & 0x80:
value += (byte ^ 0x80) << (7 * (i - 1))
if value > max_value:
raise error.Scapy_Exception(
.format(value)
)
i += 1
assert i < tmp_len,
byte = orb(s[i])
value += byte << (7 * (i - 1))
value += self._max_value
assert(value >= 0)
return value | _parse_multi_byte parses x as a multibyte representation to get the
int value of this AbstractUVarIntField.
@param str s: the multibyte string to parse.
@return int: The parsed int value represented by this AbstractUVarIntField. # noqa: E501
@raise: AssertionError
@raise: Scapy_Exception if the input value encodes an integer larger than 1<<64 # noqa: E501 | ### Input:
_parse_multi_byte parses x as a multibyte representation to get the
int value of this AbstractUVarIntField.
@param str s: the multibyte string to parse.
@return int: The parsed int value represented by this AbstractUVarIntField. # noqa: E501
@raise: AssertionError
@raise: Scapy_Exception if the input value encodes an integer larger than 1<<64 # noqa: E501
### Response:
def _parse_multi_byte(self, s):
assert(len(s) >= 2)
tmp_len = len(s)
value = 0
i = 1
byte = orb(s[i])
max_value = 1 << 64
while byte & 0x80:
value += (byte ^ 0x80) << (7 * (i - 1))
if value > max_value:
raise error.Scapy_Exception(
.format(value)
)
i += 1
assert i < tmp_len,
byte = orb(s[i])
value += byte << (7 * (i - 1))
value += self._max_value
assert(value >= 0)
return value |
def _parse_schema_resource(info):
if "fields" not in info:
return ()
schema = []
for r_field in info["fields"]:
name = r_field["name"]
field_type = r_field["type"]
mode = r_field.get("mode", "NULLABLE")
description = r_field.get("description")
sub_fields = _parse_schema_resource(r_field)
schema.append(SchemaField(name, field_type, mode, description, sub_fields))
return schema | Parse a resource fragment into a schema field.
Args:
info: (Mapping[str->dict]): should contain a "fields" key to be parsed
Returns:
(Union[Sequence[:class:`google.cloud.bigquery.schema.SchemaField`],None])
a list of parsed fields, or ``None`` if no "fields" key found. | ### Input:
Parse a resource fragment into a schema field.
Args:
info: (Mapping[str->dict]): should contain a "fields" key to be parsed
Returns:
(Union[Sequence[:class:`google.cloud.bigquery.schema.SchemaField`],None])
a list of parsed fields, or ``None`` if no "fields" key found.
### Response:
def _parse_schema_resource(info):
if "fields" not in info:
return ()
schema = []
for r_field in info["fields"]:
name = r_field["name"]
field_type = r_field["type"]
mode = r_field.get("mode", "NULLABLE")
description = r_field.get("description")
sub_fields = _parse_schema_resource(r_field)
schema.append(SchemaField(name, field_type, mode, description, sub_fields))
return schema |
def getFeatureSet(self, id_):
if id_ not in self._featureSetIdMap:
raise exceptions.FeatureSetNotFoundException(id_)
return self._featureSetIdMap[id_] | Returns the FeatureSet with the specified id, or raises a
FeatureSetNotFoundException otherwise. | ### Input:
Returns the FeatureSet with the specified id, or raises a
FeatureSetNotFoundException otherwise.
### Response:
def getFeatureSet(self, id_):
if id_ not in self._featureSetIdMap:
raise exceptions.FeatureSetNotFoundException(id_)
return self._featureSetIdMap[id_] |
def undecorate(func):
orig_call_wrapper = lambda x: x
for call_wrapper, unwrap in SUPPORTED_DECORATOR.items():
if isinstance(func, call_wrapper):
func = unwrap(func)
orig_call_wrapper = call_wrapper
break
return orig_call_wrapper, func | Returns the decorator and the undecorated function of given object. | ### Input:
Returns the decorator and the undecorated function of given object.
### Response:
def undecorate(func):
orig_call_wrapper = lambda x: x
for call_wrapper, unwrap in SUPPORTED_DECORATOR.items():
if isinstance(func, call_wrapper):
func = unwrap(func)
orig_call_wrapper = call_wrapper
break
return orig_call_wrapper, func |
def get_invoices_per_page(self, per_page=1000, page=1, params=None):
return self._get_resource_per_page(resource=INVOICES, per_page=per_page, page=page, params=params) | Get invoices per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list | ### Input:
Get invoices per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
### Response:
def get_invoices_per_page(self, per_page=1000, page=1, params=None):
return self._get_resource_per_page(resource=INVOICES, per_page=per_page, page=page, params=params) |
def _get_on_poweroff(dom):
*
node = ElementTree.fromstring(get_xml(dom)).find()
return node.text if node is not None else | Return `on_poweroff` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_restart <domain> | ### Input:
Return `on_poweroff` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_restart <domain>
### Response:
def _get_on_poweroff(dom):
*
node = ElementTree.fromstring(get_xml(dom)).find()
return node.text if node is not None else |
def get_or_create_direct_channel(cls, initiator_key, receiver_key):
existing = cls.objects.OR().filter(
code_name= % (initiator_key, receiver_key)).filter(
code_name= % (receiver_key, initiator_key))
receiver_name = UserModel.objects.get(receiver_key).full_name
if existing:
channel = existing[0]
else:
channel_name = % (initiator_key, receiver_key)
channel = cls(is_direct=True, code_name=channel_name, typ=10).blocking_save()
with BlockSave(Subscriber):
Subscriber.objects.get_or_create(channel=channel,
user_id=initiator_key,
name=receiver_name)
Subscriber.objects.get_or_create(channel=channel,
user_id=receiver_key,
name=UserModel.objects.get(initiator_key).full_name)
return channel, receiver_name | Creates a direct messaging channel between two user
Args:
initiator: User, who want's to make first contact
receiver: User, other party
Returns:
(Channel, receiver_name) | ### Input:
Creates a direct messaging channel between two user
Args:
initiator: User, who want's to make first contact
receiver: User, other party
Returns:
(Channel, receiver_name)
### Response:
def get_or_create_direct_channel(cls, initiator_key, receiver_key):
existing = cls.objects.OR().filter(
code_name= % (initiator_key, receiver_key)).filter(
code_name= % (receiver_key, initiator_key))
receiver_name = UserModel.objects.get(receiver_key).full_name
if existing:
channel = existing[0]
else:
channel_name = % (initiator_key, receiver_key)
channel = cls(is_direct=True, code_name=channel_name, typ=10).blocking_save()
with BlockSave(Subscriber):
Subscriber.objects.get_or_create(channel=channel,
user_id=initiator_key,
name=receiver_name)
Subscriber.objects.get_or_create(channel=channel,
user_id=receiver_key,
name=UserModel.objects.get(initiator_key).full_name)
return channel, receiver_name |
def timeRange(*args):
gmt_arg_present = (len(args) == 2 and args[1] == ) or (len(args) % 2 == 1 and len(args) > 1)
if gmt_arg_present:
today = _now(args[-1])
args = args[:-1]
else:
today = _now()
num_args = len(args)
if num_args == 1:
h1 = args[0]
return h1 == today.hour
if num_args == 2:
h1, h2 = args
return h1 <= today.hour < h2
if num_args == 4:
h1, m1, h2, m2 = args
return time(h1, m1) <= today.time() <= time(h2, m2)
if num_args == 6:
h1, m1, s1, h2, m2, s2 = args
return time(h1, m1, s1) <= today.time() <= time(h2, m2, s2)
return False | Accepted forms:
* ``timeRange(hour)``
* ``timeRange(hour1, hour2)``
* ``timeRange(hour1, min1, hour2, min2)``
* ``timeRange(hour1, min1, sec1, hour2, min2, sec2)``
* ``timeRange(hour1, min1, sec1, hour2, min2, sec2, gmt)``
``hour``
is the hour from 0 to 23. (0 is midnight, 23 is 11 pm.)
``min``
minutes from 0 to 59.
``sec``
seconds from 0 to 59.
``gmt``
either the string "GMT" for GMT timezone, or not specified, for local timezone.
Again, even though the above list doesn't show it, this parameter may be present in each of
the different parameter profiles, always as the last parameter.
:return: True during (or between) the specified time(s).
:rtype: bool | ### Input:
Accepted forms:
* ``timeRange(hour)``
* ``timeRange(hour1, hour2)``
* ``timeRange(hour1, min1, hour2, min2)``
* ``timeRange(hour1, min1, sec1, hour2, min2, sec2)``
* ``timeRange(hour1, min1, sec1, hour2, min2, sec2, gmt)``
``hour``
is the hour from 0 to 23. (0 is midnight, 23 is 11 pm.)
``min``
minutes from 0 to 59.
``sec``
seconds from 0 to 59.
``gmt``
either the string "GMT" for GMT timezone, or not specified, for local timezone.
Again, even though the above list doesn't show it, this parameter may be present in each of
the different parameter profiles, always as the last parameter.
:return: True during (or between) the specified time(s).
:rtype: bool
### Response:
def timeRange(*args):
gmt_arg_present = (len(args) == 2 and args[1] == ) or (len(args) % 2 == 1 and len(args) > 1)
if gmt_arg_present:
today = _now(args[-1])
args = args[:-1]
else:
today = _now()
num_args = len(args)
if num_args == 1:
h1 = args[0]
return h1 == today.hour
if num_args == 2:
h1, h2 = args
return h1 <= today.hour < h2
if num_args == 4:
h1, m1, h2, m2 = args
return time(h1, m1) <= today.time() <= time(h2, m2)
if num_args == 6:
h1, m1, s1, h2, m2, s2 = args
return time(h1, m1, s1) <= today.time() <= time(h2, m2, s2)
return False |
def async_raise(self, exc_type):
assert self.ident is not None,
raise ValueError(, self.ident)
elif result > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(self.ident, None)
raise RuntimeError( % (
exc_type, self.name, self.ident, result)) | Raise the exception. | ### Input:
Raise the exception.
### Response:
def async_raise(self, exc_type):
assert self.ident is not None,
raise ValueError(, self.ident)
elif result > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(self.ident, None)
raise RuntimeError( % (
exc_type, self.name, self.ident, result)) |
def ok_cred_def_id(token: str, issuer_did: str = None) -> bool:
cd_id_m = re.match(.format(B58), token or )
return bool(cd_id_m) and ((not issuer_did) or cd_id_m.group(1) == issuer_did) | Whether input token looks like a valid credential definition identifier from input issuer DID (default any); i.e.,
<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag> for protocol >= 1.4, or
<issuer-did>:3:CL:<schema-seq-no> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid credential definition identifier | ### Input:
Whether input token looks like a valid credential definition identifier from input issuer DID (default any); i.e.,
<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag> for protocol >= 1.4, or
<issuer-did>:3:CL:<schema-seq-no> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid credential definition identifier
### Response:
def ok_cred_def_id(token: str, issuer_did: str = None) -> bool:
cd_id_m = re.match(.format(B58), token or )
return bool(cd_id_m) and ((not issuer_did) or cd_id_m.group(1) == issuer_did) |
def _list_files(path, suffix=""):
if os.path.isdir(path):
incomplete = os.listdir(path)
complete = [os.path.join(path, entry) for entry in incomplete]
lists = [_list_files(subpath, suffix) for subpath in complete]
flattened = []
for one_list in lists:
for elem in one_list:
flattened.append(elem)
return flattened
else:
assert os.path.exists(path), "couldn%s'" % path
if path.endswith(suffix):
return [path]
return [] | Returns a list of all files ending in `suffix` contained within `path`.
Parameters
----------
path : str
a filepath
suffix : str
Returns
-------
l : list
A list of all files ending in `suffix` contained within `path`.
(If `path` is a file rather than a directory, it is considered
to "contain" itself) | ### Input:
Returns a list of all files ending in `suffix` contained within `path`.
Parameters
----------
path : str
a filepath
suffix : str
Returns
-------
l : list
A list of all files ending in `suffix` contained within `path`.
(If `path` is a file rather than a directory, it is considered
to "contain" itself)
### Response:
def _list_files(path, suffix=""):
if os.path.isdir(path):
incomplete = os.listdir(path)
complete = [os.path.join(path, entry) for entry in incomplete]
lists = [_list_files(subpath, suffix) for subpath in complete]
flattened = []
for one_list in lists:
for elem in one_list:
flattened.append(elem)
return flattened
else:
assert os.path.exists(path), "couldn%s'" % path
if path.endswith(suffix):
return [path]
return [] |
def _wave(self):
try:
return wave.open(StringIO(self.contents))
except wave.Error, err:
err.message += "\nInvalid wave file: %s" % self
err.args = (err.message,)
raise | Return a wave.Wave_read instance from the ``wave`` module. | ### Input:
Return a wave.Wave_read instance from the ``wave`` module.
### Response:
def _wave(self):
try:
return wave.open(StringIO(self.contents))
except wave.Error, err:
err.message += "\nInvalid wave file: %s" % self
err.args = (err.message,)
raise |
def apply_complement(self, a, return_Ya=False):
if self.V.shape[1] == 0:
if return_Ya:
return a.copy(), numpy.zeros((0, a.shape[1]))
return a.copy()
if return_Ya:
x, Ya = self._apply(a, return_Ya=True)
else:
x = self._apply(a)
z = a - x
for i in range(self.iterations-1):
w = self._apply(z)
z = z - w
if return_Ya:
return z, Ya
return z | Apply the complementary projection to an array.
:param z: array with ``shape==(N,m)``.
:return: :math:`P_{\\mathcal{Y}^\\perp,\\mathcal{X}}z =
z - P_{\\mathcal{X},\\mathcal{Y}^\\perp} z`. | ### Input:
Apply the complementary projection to an array.
:param z: array with ``shape==(N,m)``.
:return: :math:`P_{\\mathcal{Y}^\\perp,\\mathcal{X}}z =
z - P_{\\mathcal{X},\\mathcal{Y}^\\perp} z`.
### Response:
def apply_complement(self, a, return_Ya=False):
if self.V.shape[1] == 0:
if return_Ya:
return a.copy(), numpy.zeros((0, a.shape[1]))
return a.copy()
if return_Ya:
x, Ya = self._apply(a, return_Ya=True)
else:
x = self._apply(a)
z = a - x
for i in range(self.iterations-1):
w = self._apply(z)
z = z - w
if return_Ya:
return z, Ya
return z |
def handle_request(self, request, *args, **kwargs):
paginator = self.get_paginator()
search = self.get_search()
page = self.get_page(paginator)
items = self.get_items(paginator, page)
return {
: search,
: page,
: self.get_page_size(),
: paginator.num_pages,
: self.get_sort(),
: self.get_current_fields(),
: self.get_all_fields(),
: items,
} | Give back list items + config | ### Input:
Give back list items + config
### Response:
def handle_request(self, request, *args, **kwargs):
paginator = self.get_paginator()
search = self.get_search()
page = self.get_page(paginator)
items = self.get_items(paginator, page)
return {
: search,
: page,
: self.get_page_size(),
: paginator.num_pages,
: self.get_sort(),
: self.get_current_fields(),
: self.get_all_fields(),
: items,
} |
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=generate_help())
subparsers = parser.add_subparsers()
for registered_recipe in config.Config.get_registered_recipes():
recipe, recipe_args, documentation = registered_recipe
subparser = subparsers.add_parser(
recipe[],
formatter_class=utils.DFTimewolfFormatterClass,
description=.format(documentation))
subparser.set_defaults(recipe=recipe)
for switch, help_text, default in recipe_args:
subparser.add_argument(switch, help=help_text, default=default)
subparser.set_defaults(**config.Config.get_extra())
args = parser.parse_args()
recipe = args.recipe
state = DFTimewolfState(config.Config)
print()
state.load_recipe(recipe)
print(.format(
recipe[], len(recipe[])))
print()
state.setup_modules(args)
print()
print()
state.run_modules()
print(.format(recipe[])) | Main function for DFTimewolf. | ### Input:
Main function for DFTimewolf.
### Response:
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=generate_help())
subparsers = parser.add_subparsers()
for registered_recipe in config.Config.get_registered_recipes():
recipe, recipe_args, documentation = registered_recipe
subparser = subparsers.add_parser(
recipe[],
formatter_class=utils.DFTimewolfFormatterClass,
description=.format(documentation))
subparser.set_defaults(recipe=recipe)
for switch, help_text, default in recipe_args:
subparser.add_argument(switch, help=help_text, default=default)
subparser.set_defaults(**config.Config.get_extra())
args = parser.parse_args()
recipe = args.recipe
state = DFTimewolfState(config.Config)
print()
state.load_recipe(recipe)
print(.format(
recipe[], len(recipe[])))
print()
state.setup_modules(args)
print()
print()
state.run_modules()
print(.format(recipe[])) |
def add(self, other):
if not isinstance(other, BlockMatrix):
raise TypeError("Other should be a BlockMatrix, got %s" % type(other))
other_java_block_matrix = other._java_matrix_wrapper._java_model
java_block_matrix = self._java_matrix_wrapper.call("add", other_java_block_matrix)
return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock) | Adds two block matrices together. The matrices must have the
same size and matching `rowsPerBlock` and `colsPerBlock` values.
If one of the sub matrix blocks that are being added is a
SparseMatrix, the resulting sub matrix block will also be a
SparseMatrix, even if it is being added to a DenseMatrix. If
two dense sub matrix blocks are added, the output block will
also be a DenseMatrix.
>>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])
>>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12])
>>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12])
>>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)])
>>> mat1 = BlockMatrix(blocks1, 3, 2)
>>> mat2 = BlockMatrix(blocks2, 3, 2)
>>> mat3 = BlockMatrix(blocks3, 3, 2)
>>> mat1.add(mat2).toLocalMatrix()
DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0)
>>> mat1.add(mat3).toLocalMatrix()
DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0) | ### Input:
Adds two block matrices together. The matrices must have the
same size and matching `rowsPerBlock` and `colsPerBlock` values.
If one of the sub matrix blocks that are being added is a
SparseMatrix, the resulting sub matrix block will also be a
SparseMatrix, even if it is being added to a DenseMatrix. If
two dense sub matrix blocks are added, the output block will
also be a DenseMatrix.
>>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])
>>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12])
>>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12])
>>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)])
>>> mat1 = BlockMatrix(blocks1, 3, 2)
>>> mat2 = BlockMatrix(blocks2, 3, 2)
>>> mat3 = BlockMatrix(blocks3, 3, 2)
>>> mat1.add(mat2).toLocalMatrix()
DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0)
>>> mat1.add(mat3).toLocalMatrix()
DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0)
### Response:
def add(self, other):
if not isinstance(other, BlockMatrix):
raise TypeError("Other should be a BlockMatrix, got %s" % type(other))
other_java_block_matrix = other._java_matrix_wrapper._java_model
java_block_matrix = self._java_matrix_wrapper.call("add", other_java_block_matrix)
return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock) |
def connect(self):
if self._conn:
return self._conn
self._conn = psycopg2.connect(
self.config,
cursor_factory=psycopg2.extras.RealDictCursor,
)
self._conn.set_session(autocommit=True)
psycopg2.extras.register_hstore(self._conn)
return self._conn | Construct the psycopg2 connection instance
:return: psycopg2.connect instance | ### Input:
Construct the psycopg2 connection instance
:return: psycopg2.connect instance
### Response:
def connect(self):
if self._conn:
return self._conn
self._conn = psycopg2.connect(
self.config,
cursor_factory=psycopg2.extras.RealDictCursor,
)
self._conn.set_session(autocommit=True)
psycopg2.extras.register_hstore(self._conn)
return self._conn |
def join(self, other, *args, **kwarg):
event = Event(*args, **kwarg)
if self.intersects(other):
if self.starts_within(other):
event.begin = other.begin
else:
event.begin = self.begin
if self.ends_within(other):
event.end = other.end
else:
event.end = self.end
return event
raise ValueError(t intersect.'.format(self, other)) | Create a new event which covers the time range of two intersecting events
All extra parameters are passed to the Event constructor.
Args:
other: the other event
Returns:
a new Event instance | ### Input:
Create a new event which covers the time range of two intersecting events
All extra parameters are passed to the Event constructor.
Args:
other: the other event
Returns:
a new Event instance
### Response:
def join(self, other, *args, **kwarg):
event = Event(*args, **kwarg)
if self.intersects(other):
if self.starts_within(other):
event.begin = other.begin
else:
event.begin = self.begin
if self.ends_within(other):
event.end = other.end
else:
event.end = self.end
return event
raise ValueError(t intersect.'.format(self, other)) |
def attach_video(self, video: typing.Union[InputMediaVideo, base.InputFile],
thumb: typing.Union[base.InputFile, base.String] = None,
caption: base.String = None,
width: base.Integer = None, height: base.Integer = None, duration: base.Integer = None):
if not isinstance(video, InputMedia):
video = InputMediaVideo(media=video, thumb=thumb, caption=caption,
width=width, height=height, duration=duration)
self.attach(video) | Attach video
:param video:
:param caption:
:param width:
:param height:
:param duration: | ### Input:
Attach video
:param video:
:param caption:
:param width:
:param height:
:param duration:
### Response:
def attach_video(self, video: typing.Union[InputMediaVideo, base.InputFile],
thumb: typing.Union[base.InputFile, base.String] = None,
caption: base.String = None,
width: base.Integer = None, height: base.Integer = None, duration: base.Integer = None):
if not isinstance(video, InputMedia):
video = InputMediaVideo(media=video, thumb=thumb, caption=caption,
width=width, height=height, duration=duration)
self.attach(video) |
def explain_weights_xgboost(xgb,
vec=None,
top=20,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
importance_type=,
):
booster, is_regression = _check_booster_args(xgb)
xgb_feature_names = booster.feature_names
coef = _xgb_feature_importances(booster, importance_type=importance_type)
return get_feature_importance_explanation(
xgb, vec, coef,
feature_names=feature_names,
estimator_feature_names=xgb_feature_names,
feature_filter=feature_filter,
feature_re=feature_re,
top=top,
description=DESCRIPTION_XGBOOST,
is_regression=is_regression,
num_features=coef.shape[-1],
) | Return an explanation of an XGBoost estimator (via scikit-learn wrapper
XGBClassifier or XGBRegressor, or via xgboost.Booster)
as feature importances.
See :func:`eli5.explain_weights` for description of
``top``, ``feature_names``,
``feature_re`` and ``feature_filter`` parameters.
``target_names`` and ``targets`` parameters are ignored.
Parameters
----------
importance_type : str, optional
A way to get feature importance. Possible values are:
- 'gain' - the average gain of the feature when it is used in trees
(default)
- 'weight' - the number of times a feature is used to split the data
across all trees
- 'cover' - the average coverage of the feature when it is used in trees | ### Input:
Return an explanation of an XGBoost estimator (via scikit-learn wrapper
XGBClassifier or XGBRegressor, or via xgboost.Booster)
as feature importances.
See :func:`eli5.explain_weights` for description of
``top``, ``feature_names``,
``feature_re`` and ``feature_filter`` parameters.
``target_names`` and ``targets`` parameters are ignored.
Parameters
----------
importance_type : str, optional
A way to get feature importance. Possible values are:
- 'gain' - the average gain of the feature when it is used in trees
(default)
- 'weight' - the number of times a feature is used to split the data
across all trees
- 'cover' - the average coverage of the feature when it is used in trees
### Response:
def explain_weights_xgboost(xgb,
vec=None,
top=20,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
importance_type=,
):
booster, is_regression = _check_booster_args(xgb)
xgb_feature_names = booster.feature_names
coef = _xgb_feature_importances(booster, importance_type=importance_type)
return get_feature_importance_explanation(
xgb, vec, coef,
feature_names=feature_names,
estimator_feature_names=xgb_feature_names,
feature_filter=feature_filter,
feature_re=feature_re,
top=top,
description=DESCRIPTION_XGBOOST,
is_regression=is_regression,
num_features=coef.shape[-1],
) |
def login(self):
if self.args.snmp_force:
self.client_mode =
else:
if not self._login_glances():
return False
if self.client_mode == :
if not self._login_snmp():
return False
logger.debug("Load limits from the client configuration file")
self.stats.load_limits(self.config)
if self.quiet:
logger.info("Quiet mode is ON: Nothing will be displayed")
else:
self.screen = GlancesCursesClient(config=self.config, args=self.args)
return True | Logon to the server. | ### Input:
Logon to the server.
### Response:
def login(self):
if self.args.snmp_force:
self.client_mode =
else:
if not self._login_glances():
return False
if self.client_mode == :
if not self._login_snmp():
return False
logger.debug("Load limits from the client configuration file")
self.stats.load_limits(self.config)
if self.quiet:
logger.info("Quiet mode is ON: Nothing will be displayed")
else:
self.screen = GlancesCursesClient(config=self.config, args=self.args)
return True |
def sup_inf(u):
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
erosions = []
for P_i in P:
erosions.append(ndi.binary_erosion(u, P_i))
return np.array(erosions, dtype=np.int8).max(0) | SI operator. | ### Input:
SI operator.
### Response:
def sup_inf(u):
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
erosions = []
for P_i in P:
erosions.append(ndi.binary_erosion(u, P_i))
return np.array(erosions, dtype=np.int8).max(0) |
def _retrieve_all_teams(self, year):
team_data_dict = {}
if not year:
year = utils._find_year_for_season()
doc = pq(SEASON_PAGE_URL % year)
teams_list = utils._get_stats_table(doc, )
opp_teams_list = utils._get_stats_table(doc,
)
for stats_list in [teams_list, opp_teams_list]:
team_data_dict = self._add_stats_data(stats_list, team_data_dict)
for team_data in team_data_dict.values():
team = Team(team_data[], team_data[], year)
self._teams.append(team) | Find and create Team instances for all teams in the given season.
For a given season, parses the specified NBA stats table and finds all
requested stats. Each team then has a Team instance created which
includes all requested stats and a few identifiers, such as the team's
name and abbreviation. All of the individual Team instances are added
to a list.
Note that this method is called directly once Teams is invoked and does
not need to be called manually.
Parameters
----------
year : string
The requested year to pull stats from. | ### Input:
Find and create Team instances for all teams in the given season.
For a given season, parses the specified NBA stats table and finds all
requested stats. Each team then has a Team instance created which
includes all requested stats and a few identifiers, such as the team's
name and abbreviation. All of the individual Team instances are added
to a list.
Note that this method is called directly once Teams is invoked and does
not need to be called manually.
Parameters
----------
year : string
The requested year to pull stats from.
### Response:
def _retrieve_all_teams(self, year):
team_data_dict = {}
if not year:
year = utils._find_year_for_season()
doc = pq(SEASON_PAGE_URL % year)
teams_list = utils._get_stats_table(doc, )
opp_teams_list = utils._get_stats_table(doc,
)
for stats_list in [teams_list, opp_teams_list]:
team_data_dict = self._add_stats_data(stats_list, team_data_dict)
for team_data in team_data_dict.values():
team = Team(team_data[], team_data[], year)
self._teams.append(team) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.