code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def rollforward(self, dt):
if not self.onOffset(dt):
if self.n >= 0:
return self._next_opening_time(dt)
else:
return self._prev_opening_time(dt)
return dt | Roll provided date forward to next offset only if not on offset. |
def dump(o, f):
if not f.write:
raise TypeError("You can only dump an object to a file descriptor")
d = dumps(o)
f.write(d)
return d | Writes out dict as toml to a file
Args:
o: Object to dump into toml
f: File descriptor where the toml should be stored
Returns:
String containing the toml corresponding to dictionary
Raises:
TypeError: When anything other than file descriptor is passed |
def check_file(
state,
fname,
missing_msg="Did you create a file named `{}`?",
is_dir_msg="Want to check a file named `{}`, but found a directory.",
parse=True,
use_fs=True,
use_solution=False,
):
if use_fs:
p = Path(fname)
if not p.exists():
state.report(Feedback(missing_msg.format(fname)))
if p.is_dir():
state.report(Feedback(is_dir_msg.format(fname)))
code = p.read_text()
else:
code = _get_fname(state, "student_code", fname)
if code is None:
state.report(Feedback(missing_msg.format(fname)))
sol_kwargs = {"solution_code": None, "solution_ast": None}
if use_solution:
sol_code = _get_fname(state, "solution_code", fname)
if sol_code is None:
raise Exception("Solution code does not have file named: %s" % fname)
sol_kwargs["solution_code"] = sol_code
sol_kwargs["solution_ast"] = (
state.parse(sol_code, test=False) if parse else None
)
return state.to_child(
student_code=code,
student_ast=state.parse(code) if parse else None,
fname=fname,
**sol_kwargs
) | Test whether file exists, and make its contents the student code.
Note: this SCT fails if the file is a directory. |
def _set_sport(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u: {: 3}, u: {: 2}, u: {: 1}, u: {: 5}, u: {: 4}},), is_leaf=True, yang_name="sport", rest_name="sport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: None, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "brocade-ipv6-access-list:enumeration",
: ,
})
self.__sport = t
if hasattr(self, ):
self._set() | Setter method for sport, mapped from YANG variable /ipv6_acl/ipv6/access_list/extended/seq/sport (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_sport is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sport() directly. |
def get_obsmeta(self, lcid):
if self._obsdata is None:
self._obsdata = fetch_rrlyrae_fitdata()
i = np.where(self._obsdata[] == lcid)[0]
if len(i) == 0:
raise ValueError("invalid lcid: {0}".format(lcid))
return self._obsdata[i[0]] | Get the observation metadata for the given id.
This is table 3 of Sesar 2010 |
def _growing_step_sequence(interval_growth, max_interval, init_interval, start_level=None):
interval = init_interval
next_level = start_level or init_interval
while True:
yield next_level
interval = min(interval * interval_growth, max_interval)
next_level += interval | Returns an iterator that constructs a sequence of trigger levels with growing intervals.
The interval is growing exponentially until it reaches the maximum value. Then the interval
stays the same and the sequence becomes linear.
An optional starting level `start_level` defaults to the initial interval. The interval
starts out as `init_interval`, multiplied by `interval_growth` in each step until it
reaches the `max_interval`. |
def convert(model, input_shape, class_labels=None, mode=None,
preprocessor_args=None, builder=None, verbose=True):
if not isinstance(input_shape, list):
raise TypeError("Must provide a list for input shape. e.g input_shape=[(, (3,224,224))]")
def remove_batch(dim):
return dim[1:]
input_names, input_dims = zip(*input_shape)
input_dims = list(map(remove_batch, input_dims))
net = model.symbol
shapes = net.infer_shape(**dict(input_shape))
arg_names = net.list_arguments()
output_names = net.list_outputs()
aux_names = net.list_auxiliary_states()
shape_dict = {}
for idx, op in enumerate(arg_names):
shape_dict[op] = shapes[0][idx]
for idx, op in enumerate(output_names):
shape_dict[op] = shapes[1][idx]
for idx, op in enumerate(aux_names):
shape_dict[op] = shapes[2][idx]
output_dims = shapes[1]
if mode is None:
output_dims = list(map(remove_batch, output_dims))
input_types = [_datatypes.Array(*dim) for dim in input_dims]
output_types = [_datatypes.Array(*dim) for dim in output_dims]
input_features = list(zip(input_names, input_types))
output_features = list(zip(output_names, output_types))
finalize = builder is None
if builder is None:
builder = _neural_network.NeuralNetworkBuilder(input_features, output_features, mode)
net = _json.loads(net.tojson())
nodes = net[]
for i, node in enumerate(nodes):
node[] = i
if node[] in shape_dict:
node[] = shape_dict[node[]]
node[] = []
if in node:
for ip in node[]:
nodes[ip[0]][].append([i, 0])
else:
node[] = []
for head in net[]:
head_id = head[0]
head_node = nodes[head_id]
head_node[] = [head]
head_node[] += "_output"
head_node[] = shape_dict[head_node[]]
for node in nodes:
op = node[]
inputs = node[]
outputs = node[]
if op in _MXNET_SKIP_LAYERS:
nodes[inputs[0][0]][][0] = outputs[0]
nodes[outputs[0][0]][][0] = inputs[0]
for idx, node in enumerate(nodes):
op = node[]
if op == or op in _MXNET_SKIP_LAYERS:
continue
name = node[]
if verbose:
print("%d : %s, %s" % (idx, name, op))
converter_func = _get_layer_converter_fn(op)
converter_func(net, node, model, builder)
if finalize:
_set_input_output_layers(builder, input_names, output_names)
builder.set_input(input_names, input_dims)
builder.set_output(output_names, output_dims)
if preprocessor_args is not None:
builder.set_pre_processing_parameters(**preprocessor_args)
if class_labels is not None:
if type(class_labels) is str:
labels = [l.strip() for l in open(class_labels).readlines()]
elif type(class_labels) is list:
labels = class_labels
else:
raise TypeError("synset variable of unknown type. Type found: %s. Expected either string or list of strings." % type(class_labels))
builder.set_class_labels(class_labels = labels)
return _coremltools.models.MLModel(builder.spec) | Convert an MXNet model to the protobuf spec.
Parameters
----------
model: MXNet model
A trained MXNet neural network model.
input_shape: list of tuples
A list of (name, shape) tuples, defining the input names and their
shapes. The list also serves to define the desired order of the inputs.
class_labels: A string or list of strings.
As a string it represents the name of the file which contains the classification labels (one per line).
As a list of strings it represents a list of categories that map the index of the output of a neural network to labels in a classifier.
mode: str ('classifier', 'regressor' or None)
Mode of the converted coreml model.
When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed.
When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed.
builder: `NeuralNetworkBuilder`
If `None`, a builder will be created internally. This also means the
builder will not be finalized and returned as an `MLModel`.
Post-processing arguments will be ignored and class labels will not be
integrated. This option is meant for advanced users.
verbose: bool
Print exported layers.
**kwargs :
Provide keyword arguments for:
- input shapes. Supplied as a dictionary object with keyword "input_shape".
- pre-processing arguments: Supplied as a dictionary object with keyword "preprocessor_args". The parameters in the dictionary
tell the converted coreml model how to pre-process any input before an inference is run on it.
For the list of pre-processing arguments see
http://pythonhosted.org/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters
Returns
-------
model: A coreml model. |
def convert_models_for_lang(language):
language = language.title()
lang_metadata = LANGUAGES.get(language)
if not lang_metadata:
raise ValueError(
.format(language))
lang_mod_name = .format(language.lower())
if not os.path.exists(os.path.join(, lang_mod_name + )):
print(
.format(language))
return
lang_mod = getattr(chardet, lang_mod_name)
print(
.format(language))
print(.format(lang_metadata.use_ascii))
print(.format(lang_metadata.alphabet))
charset_models = {}
char_ranks = {}
order_to_chars = {}
for var_name in dir(lang_mod):
if not ( in var_name and not in var_name):
continue
old_model = getattr(lang_mod, var_name)
charset_name = old_model[]
print(.format(charset_name))
sys.stdout.flush()
charset_models[charset_name] = convert_sbcs_model(old_model,
lang_metadata.alphabet)
for byte_hex, order in iteritems(charset_models[charset_name].char_to_order_map):
file=output_file) | Convert old SingleByteCharSetModels for the given language |
def updateBar(self, bar):
self.count += 1
if not self.inited and self.count >= self.size:
self.inited = True
self.openArray[0:self.size - 1] = self.openArray[1:self.size]
self.highArray[0:self.size - 1] = self.highArray[1:self.size]
self.lowArray[0:self.size - 1] = self.lowArray[1:self.size]
self.closeArray[0:self.size - 1] = self.closeArray[1:self.size]
self.volumeArray[0:self.size - 1] = self.volumeArray[1:self.size]
self.openArray[-1] = bar.open
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.closeArray[-1] = bar.close
self.volumeArray[-1] = bar.volume | 更新K线 |
def update_flowspec_global_table(self, flowspec_family, rules,
actions=None, is_withdraw=False):
from ryu.services.protocols.bgp.core import BgpCoreError
from ryu.services.protocols.bgp.api.prefix import (
FLOWSPEC_FAMILY_IPV4,
FLOWSPEC_FAMILY_IPV6,
FLOWSPEC_FAMILY_L2VPN,
)
src_ver_num = 1
peer = None
origin = BGPPathAttributeOrigin(BGP_ATTR_ORIGIN_IGP)
aspath = BGPPathAttributeAsPath([[]])
pathattrs = OrderedDict()
pathattrs[BGP_ATTR_TYPE_ORIGIN] = origin
pathattrs[BGP_ATTR_TYPE_AS_PATH] = aspath
if flowspec_family == FLOWSPEC_FAMILY_IPV4:
_nlri = FlowSpecIPv4NLRI.from_user(**rules)
p = IPv4FlowSpecPath
try:
communities = create_v4flowspec_actions(actions)
except ValueError as e:
raise BgpCoreError(desc=str(e))
if communities:
pathattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = (
BGPPathAttributeExtendedCommunities(
communities=communities))
elif flowspec_family == FLOWSPEC_FAMILY_IPV6:
_nlri = FlowSpecIPv6NLRI.from_user(**rules)
p = IPv6FlowSpecPath
try:
communities = create_v6flowspec_actions(actions)
except ValueError as e:
raise BgpCoreError(desc=str(e))
if communities:
pathattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = (
BGPPathAttributeExtendedCommunities(
communities=communities))
elif flowspec_family == FLOWSPEC_FAMILY_L2VPN:
_nlri = FlowSpecL2VPNNLRI.from_user(**rules)
p = L2vpnFlowSpecPath
try:
communities = create_l2vpnflowspec_actions(actions)
except ValueError as e:
raise BgpCoreError(desc=str(e))
if communities:
pathattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = (
BGPPathAttributeExtendedCommunities(
communities=communities))
else:
raise BgpCoreError(
desc= % flowspec_family)
new_path = p(peer, _nlri, src_ver_num,
pattrs=pathattrs, is_withdraw=is_withdraw)
self.learn_path(new_path) | Update a BGP route in the Global table for Flow Specification.
``flowspec_family`` specifies one of the Flow Specification
family name.
``rules`` specifies NLRIs of Flow Specification as
a dictionary type value.
`` actions`` specifies Traffic Filtering Actions of
Flow Specification as a dictionary type value.
If `is_withdraw` is False, which is the default, add a BGP route
to the Global table.
If `is_withdraw` is True, remove a BGP route from the Global table. |
def save_metadata(self, data_dir, feature_name=None):
for feature_key, feature in six.iteritems(self._feature_dict):
if feature_name:
feature_key = .join((feature_name, feature_key))
feature.save_metadata(data_dir, feature_name=feature_key) | See base class for details. |
def rex_assert(self, rex, byte=False):
self.rex_search(rex, byte=byte) | If `rex` expression is not found then raise `DataNotFound` exception. |
def audits(self, ticket=None, include=None, **kwargs):
if ticket is not None:
return self._query_zendesk(self.endpoint.audits, , id=ticket, include=include)
else:
return self._query_zendesk(self.endpoint.audits.cursor, , include=include, **kwargs) | Retrieve TicketAudits. If ticket is passed, return the tickets for a specific audit.
If ticket_id is None, a TicketAuditGenerator is returned to handle pagination. The way this generator
works is a different to the other Zenpy generators as it is cursor based, allowing you to change the
direction that you are consuming objects. This is done with the reversed() python method.
For example:
.. code-block:: python
for audit in reversed(zenpy_client.tickets.audits()):
print(audit)
See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/core/ticket_audits#pagination>`__ for
information on additional parameters.
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param ticket: Ticket object or id |
def whereis(self, channel):
if channel in self:
return tuple(self.channels_db[channel])
else:
return tuple() | get ocurrences of channel name in the file
Parameters
----------
channel : str
channel name string
Returns
-------
ocurrences : tuple
Examples
--------
>>> mdf = MDF(file_name)
>>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file
((1, 2), (2, 4))
>>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file
() |
def configure_logging(level):
global logging_level
logging_level = logging.ERROR
if "info" == level.lower():
logging_level = logging.INFO
elif "warn" == level.lower():
logging_level = logging.WARNING
elif "debug" == level.lower():
logging_level = logging.DEBUG | Configure global log level to given one
:param level: Level (INFO | DEBUG | WARN | ERROR)
:return: |
def in_memory(self, op_in_mem):
r
old_state = self.in_memory
if not old_state and op_in_mem:
self._map_to_memory()
elif not op_in_mem and old_state:
self._clear_in_memory() | r"""
If set to True, the output will be stored in memory. |
def all_subclasses(cls):
subclasses = cls.__subclasses__()
descendants = (descendant for subclass in subclasses
for descendant in all_subclasses(subclass))
return set(subclasses) | set(descendants) | Recursively returns all the subclasses of the provided class. |
def connect(url, username, password):
bb_session = stashy.connect(url, username, password)
logger.info(, url, username)
return bb_session | Return a connected Bitbucket session |
def project_with_metadata(self, term_doc_mat, x_dim=0, y_dim=1):
return self._project_category_corpus(self._get_category_metadata_corpus_and_replace_terms(term_doc_mat),
x_dim, y_dim) | Returns a projection of the
:param term_doc_mat: a TermDocMatrix
:return: CategoryProjection |
def send_request_email(
authorised_text, authorised_role, authorised_persons, application,
link, is_secret):
context = CONTEXT.copy()
context[] = application.applicant
context[] = link
context[] = is_secret
context[] = application
context[] = authorised_text
_send_request_email(
context,
authorised_role, authorised_persons,
"common_request") | Sends an email to admin asking to approve user application |
def gelman_rubin(self, chain=None, threshold=0.05):
r
if chain is None:
return np.all([self.gelman_rubin(k, threshold=threshold) for k in range(len(self.parent.chains))])
index = self.parent._get_chain(chain)
assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index)
chain = self.parent.chains[index[0]]
num_walkers = chain.walkers
parameters = chain.parameters
name = chain.name
data = chain.chain
chains = np.split(data, num_walkers)
assert num_walkers > 1, "Cannot run Gelman-Rubin statistic with only one walker"
m = 1.0 * len(chains)
n = 1.0 * chains[0].shape[0]
all_mean = np.mean(data, axis=0)
chain_means = np.array([np.mean(c, axis=0) for c in chains])
chain_var = np.array([np.var(c, axis=0, ddof=1) for c in chains])
b = n / (m - 1) * ((chain_means - all_mean)**2).sum(axis=0)
w = (1 / m) * chain_var.sum(axis=0)
var = (n - 1) * w / n + b / n
v = var + b / (n * m)
R = np.sqrt(v / w)
passed = np.abs(R - 1) < threshold
print("Gelman-Rubin Statistic values for chain %s" % name)
for p, v, pas in zip(parameters, R, passed):
param = "Param %d" % p if isinstance(p, int) else p
print("%s: %7.5f (%s)" % (param, v, "Passed" if pas else "Failed"))
return np.all(passed) | r""" Runs the Gelman Rubin diagnostic on the supplied chains.
Parameters
----------
chain : int|str, optional
Which chain to run the diagnostic on. By default, this is `None`,
which will run the diagnostic on all chains. You can also
supply and integer (the chain index) or a string, for the chain
name (if you set one).
threshold : float, optional
The maximum deviation permitted from 1 for the final value
:math:`\hat{R}`
Returns
-------
float
whether or not the chains pass the test
Notes
-----
I follow PyMC in calculating the Gelman-Rubin statistic, where,
having :math:`m` chains of length :math:`n`, we compute
.. math::
B = \frac{n}{m-1} \sum_{j=1}^{m} \left(\bar{\theta}_{.j} - \bar{\theta}_{..}\right)^2
W = \frac{1}{m} \sum_{j=1}^{m} \left[ \frac{1}{n-1} \sum_{i=1}^{n} \left( \theta_{ij} - \bar{\theta_{.j}}\right)^2 \right]
where :math:`\theta` represents each model parameter. We then compute
:math:`\hat{V} = \frac{n_1}{n}W + \frac{1}{n}B`, and have our convergence ratio
:math:`\hat{R} = \sqrt{\frac{\hat{V}}{W}}`. We check that for all parameters,
this ratio deviates from unity by less than the supplied threshold. |
def relative_root_dir(self):
return Path(self.bundle.name) / str(self.created_at.date()) | Build the relative root dir path for the bundle version. |
def solve_apply(expr, vars):
func = __solve_for_scalar(expr.func, vars)
args = []
kwargs = {}
for arg in expr.args:
if isinstance(arg, ast.Pair):
if not isinstance(arg.lhs, ast.Var):
raise errors.EfilterError(
root=arg.lhs,
message="Invalid argument name.")
kwargs[arg.key.value] = solve(arg.value, vars).value
else:
args.append(solve(arg, vars).value)
result = applicative.apply(func, args, kwargs)
return Result(result, ()) | Returns the result of applying function (lhs) to its arguments (rest).
We use IApplicative to apply the function, because that gives the host
application an opportunity to compare the function being called against
a whitelist. EFILTER will never directly call a function that wasn't
provided through a protocol implementation. |
def __expire_files(self):
self.__files = OrderedDict(
item for item in self.__files.items() if not item[1].expired
) | Because files are always unclean |
def _next_offset(self):
if self._filestream:
offset = self._filestream.tell()
if offset:
offset -= 1
else:
offset = self._initial_offset
return offset | Return the offset of the next line to read. |
def mem_size(self):
data_len = self._data_mem_size
node_count = len(list(self.xml_doc.iter(tag=etree.Element)))
if self.compressed:
size = 52 * node_count + data_len + 630
else:
tags_len = 0
for e in self.xml_doc.iter(tag=etree.Element):
e_len = max(len(e.tag), 8)
e_len = (e_len + 3) & ~3
tags_len += e_len
size = 56 * node_count + data_len + 630 + tags_len
return (size + 8) & ~7 | used when allocating memory ingame |
def _history_locked(self):
return (self.history_lock and
(self._get_edited_history(self._history_index) !=
self.input_buffer) and
(self._get_prompt_cursor().blockNumber() !=
self._get_end_cursor().blockNumber())) | Returns whether history movement is locked. |
def create_volume(kwargs=None, call=None, wait_to_finish=False):
{"tag1": "val1", "tag2", "val2"}
if call != :
log.error(
)
return False
if not in kwargs:
log.error()
return False
if not in kwargs and not in kwargs:
kwargs[] =
params = {: ,
: kwargs[]}
if in kwargs:
params[] = kwargs[]
if in kwargs:
params[] = kwargs[]
if in kwargs:
params[] = kwargs[]
if in kwargs and kwargs.get(, ) == :
params[] = kwargs[]
return r_data | Create a volume.
zone
The availability zone used to create the volume. Required. String.
size
The size of the volume, in GiBs. Defaults to ``10``. Integer.
snapshot
The snapshot-id from which to create the volume. Integer.
type
The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned
IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for
Magnetic volumes. String.
iops
The number of I/O operations per second (IOPS) to provision for the volume,
with a maximum ratio of 50 IOPS/GiB. Only valid for Provisioned IOPS SSD
volumes. Integer.
This option will only be set if ``type`` is also specified as ``io1``.
encrypted
Specifies whether the volume will be encrypted. Boolean.
If ``snapshot`` is also given in the list of kwargs, then this value is ignored
since volumes that are created from encrypted snapshots are also automatically
encrypted.
tags
The tags to apply to the volume during creation. Dictionary.
call
The ``create_volume`` function must be called with ``-f`` or ``--function``.
String.
wait_to_finish
Whether or not to wait for the volume to be available. Boolean. Defaults to
``False``.
CLI Examples:
.. code-block:: bash
salt-cloud -f create_volume my-ec2-config zone=us-east-1b
salt-cloud -f create_volume my-ec2-config zone=us-east-1b tags='{"tag1": "val1", "tag2", "val2"}' |
def _mask_cov_func(self, *args):
if self.num_dim == 1:
return self.cov_func(args[0], args[1], *self.params)
else:
return self.cov_func(args[:self.num_dim], args[self.num_dim:], *self.params) | Masks the covariance function into a form usable by :py:func:`mpmath.diff`.
Parameters
----------
*args : `num_dim` * 2 floats
The individual elements of Xi and Xj to be passed to :py:attr:`cov_func`. |
def reserveIdentifierResponse(self, pid, vendorSpecific=None):
mmp_dict = {: pid}
return self.POST([, pid], fields=mmp_dict, headers=vendorSpecific) | CNCore.getLogRecords(session[, fromDate][, toDate][, event][, start][,
count]) → Log https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNCore.getLogRecords Implemented in
d1_client.baseclient.py.
CNCore.reserveIdentifier(session, pid) → Identifier
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.reserveIdentifier
Args:
pid:
vendorSpecific:
Returns: |
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond) | Return the time part, with tzinfo None. |
def get_next_instance(self, obj):
ordered_siblings = obj.get_siblings().filter(placeholder=obj.placeholder).order_by()
pos = list(ordered_siblings).index(obj.cmsplugin_ptr)
if pos < ordered_siblings.count() - 1:
next_sibling = ordered_siblings[pos + 1]
return next_sibling.get_bound_plugin() | Return the next plugin instance for the given object.
This differs from `obj.get_next_sibling()` which returns an unsorted sibling. |
def find( cls, name ):
if ( cls._plugins is None ):
cls._plugins = {}
return cls._plugins.get(nativestring(name)) | Finds a particular wizard plugin based on its name.
:param name | <str> || None |
def get_dict_repr(self):
return dict(
phase_name = self.phase_name,
phase_type = self.phase_type,
actions = self.actions
) | Return a dictionary representation of this phase.
This will be used for checksumming, in order to uniquely compare
instance images against their requirements |
def from_dict(d):
return Specs(
qubits_specs=sorted([QubitSpecs(id=int(q),
fRO=qspecs.get(),
f1QRB=qspecs.get(),
T1=qspecs.get(),
T2=qspecs.get(),
fActiveReset=qspecs.get())
for q, qspecs in d["1Q"].items()],
key=lambda qubit_specs: qubit_specs.id),
edges_specs=sorted([EdgeSpecs(targets=[int(q) for q in e.split()],
fBellState=especs.get(),
fCZ=especs.get(),
fCZ_std_err=especs.get(),
fCPHASE=especs.get())
for e, especs in d["2Q"].items()],
key=lambda edge_specs: edge_specs.targets)
) | Re-create the Specs from a dictionary representation.
:param Dict[str, Any] d: The dictionary representation.
:return: The restored Specs.
:rtype: Specs |
def get_large_image(self, page=1):
url = self.get_large_image_url(page=page)
return self._get_url(url) | Downloads and returns the large sized image of a single page.
The page kwarg specifies which page to return. One is the default. |
def _null_ac_sia(transition, direction, alpha=0.0):
return AcSystemIrreducibilityAnalysis(
transition=transition,
direction=direction,
alpha=alpha,
account=(),
partitioned_account=()
) | Return an |AcSystemIrreducibilityAnalysis| with zero |big_alpha| and
empty accounts. |
def setcontents(source, identifier, pointer):
record = Record.get_record(identifier)
Document(record, pointer).setcontents(source) | Patch existing bibliographic record. |
def _create_memory_database_interface(self) -> GraphDatabaseInterface:
Base = declarative_base()
engine = sqlalchemy.create_engine("sqlite://", poolclass=StaticPool)
Session = sessionmaker(bind=engine)
dbi: GraphDatabaseInterface = create_graph_database_interface(
sqlalchemy, Session(), Base, sqlalchemy.orm.relationship
)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
return dbi | Creates and returns the in-memory database interface the graph will use. |
def createEditor( self, parent, option, index ):
item = self.parent().itemFromIndex(index)
column = self.column(index)
if column and \
not column.isReadOnly() and \
isinstance(item, XOrbRecordItem):
plugin = self.plugin(column)
if not plugin:
return None
return plugin.createEditor(parent, item.record(), column)
return super(XOrbTreeWidgetDelegate, self).createEditor(parent,
option,
index) | Creates a new editor for the given index parented to the inputed widget.
:param parent | <QWidget>
option | <QStyleOption>
index | <QModelIndex>
:return <QWidget> || None |
def Bernoulli(prob_true: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().BernoulliVertex, label, cast_to_double_vertex(prob_true)) | One to one constructor for mapping some shape of probTrue to
a matching shaped Bernoulli.
:param prob_true: probTrue with same shape as desired Bernoulli tensor or scalar |
def onPollCreated(
self,
mid=None,
poll=None,
author_id=None,
thread_id=None,
thread_type=None,
ts=None,
metadata=None,
msg=None,
):
log.info(
"{} created poll {} in {} ({})".format(
author_id, poll, thread_id, thread_type.name
)
) | Called when the client is listening, and somebody creates a group poll
:param mid: The action ID
:param poll: Created poll
:param author_id: The ID of the person who created the poll
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param metadata: Extra metadata about the action
:param msg: A full set of the data recieved
:type poll: models.Poll
:type thread_type: models.ThreadType |
def discover(self, details = False):
if details and not (isinstance(details, str) and details.lower() == ):
return copy.deepcopy(self.discoverinfo)
else:
return dict((k,v.get(, )) for k,v in self.discoverinfo.items()) | Discover API definitions. Set details=true to show details |
def getfile(self):
current_file = str(self.selectedFiles()[0])
if os.path.isfile(current_file):
print , current_file
if current_file.endswith() or current_file.endswith():
fmode =
else:
fmode =
else:
if not current_file.endswith() and not current_file.endswith():
current_file +=
fmode =
return current_file, fmode | Gets the full file path of the entered/selected file
:returns: str -- the name of the data file to open/create |
def _copy(self, axis=True, attr=True, data=False):
cdata = type(self)()
cdata.s_freq = self.s_freq
cdata.start_time = self.start_time
if axis:
cdata.axis = deepcopy(self.axis)
else:
cdata_axis = OrderedDict()
for axis_name in self.axis:
cdata_axis[axis_name] = array([], dtype=)
cdata.axis = cdata_axis
if attr:
cdata.attr = deepcopy(self.attr)
if data:
cdata.data = deepcopy(self.data)
else:
cdata.data = empty(self.number_of(), dtype=)
return cdata | Create a new instance of Data, but does not copy the data
necessarily.
Parameters
----------
axis : bool, optional
deep copy the axes (default: True)
attr : bool, optional
deep copy the attributes (default: True)
data : bool, optional
deep copy the data (default: False)
Returns
-------
instance of Data (or ChanTime, ChanFreq, ChanTimeFreq)
copy of the data, but without the actual data
Notes
-----
It's important that we copy all the relevant information here. If you
add new attributes, you should add them here.
Remember that it deep-copies all the information, so if you copy data
the size might become really large. |
def factor_mark(field_name, markers, factors, start=0, end=None):
return field(field_name, CategoricalMarkerMapper(markers=markers,
factors=factors,
start=start,
end=end)) | Create a ``DataSpec`` dict that applies a client-side
``CategoricalMarkerMapper`` transformation to a ``ColumnDataSource``
column.
.. note::
This transform is primarily only useful with ``scatter``, which
can be parameterized by glyph type.
Args:
field_name (str) : a field name to configure ``DataSpec`` with
markers (seq[string]) : a list of markers to use to map to
factors (seq) : a sequences of categorical factors corresponding to
the palette
start (int, optional) : a start slice index to apply when the column
data has factors with multiple levels. (default: 0)
end (int, optional) : an end slice index to apply when the column
data has factors with multiple levels. (default: None)
Returns:
dict |
def shutdown(self, force=False):
if not force:
self.join()
self._dbg(2, )
self.workqueue.shutdown(True)
self._dbg(2, )
self._del_status_bar() | Stop executing any further jobs. If the force argument is True,
the function does not wait until any queued jobs are completed but
stops immediately.
After emptying the queue it is restarted, so you may still call run()
after using this method.
:type force: bool
:param force: Whether to wait until all jobs were processed. |
def find_version_by_string_lib(line):
if not line:
return None
simplified_line = simplify_line(line)
version = None
if simplified_line.startswith("version="):
if not in simplified_line:
pass
else:
if "=" in simplified_line:
post_equals = simplified_line.split("=")[0]
if in post_equals:
parts = post_equals.split()
if len(parts) != 3:
version = parts[0]
return version | No regex parsing. Or at least, mostly, not regex. |
def generateLowerBoundList(confidence, numUniqueFeatures, numLocationsPerObject,
maxNumObjects):
maxNumOtherLocations = maxNumObjects*10 - 1
results = zip(itertools.count(1),
findBinomialNsWithLowerBoundSampleMinimum(
confidence,
itertools.count(1), 1./numUniqueFeatures, numLocationsPerObject,
maxNumOtherLocations))
finalResults = [(numOtherLocations, interpolatedN / numLocationsPerObject)
for numOtherLocations, (interpolatedN, _, _) in results]
return finalResults | Metric: How unique is each object's most unique feature? Calculate the
probabilistic lower bound for the number of occurrences of an object's most
unique feature. For example, if confidence is 0.8, the tick "3" will be placed
at the point where 80% of objects are completely composed of features with 3
or more total occurrences, and 20% of objects have at least one feature that
has 2 or fewer total occurrences. |
def ext_pillar(minion_id,
pillar,
config_file):
config_template = None
try:
config_template = _render_template(config_file)
except jinja2.exceptions.TemplateNotFound:
log.debug(, config_file)
except Exception:
log.debug(,
config_file, exc_info=True)
if not config_template:
if result:
data = _result_to_dict(data, result, config, source)
return data | Execute LDAP searches and return the aggregated data |
def get_system_uptime_output_show_system_uptime_rbridge_id(self, **kwargs):
config = ET.Element("config")
get_system_uptime = ET.Element("get_system_uptime")
config = get_system_uptime
output = ET.SubElement(get_system_uptime, "output")
show_system_uptime = ET.SubElement(output, "show-system-uptime")
rbridge_id = ET.SubElement(show_system_uptime, "rbridge-id")
rbridge_id.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
def export_module_spec_with_checkpoint(module_spec,
checkpoint_path,
export_path,
scope_prefix=""):
with tf.Graph().as_default():
m = hub.Module(module_spec)
assign_map = {
scope_prefix + name: value for name, value in m.variable_map.items()
}
tf.train.init_from_checkpoint(checkpoint_path, assign_map)
init_op = tf.initializers.global_variables()
with tf.Session() as session:
session.run(init_op)
m.export(export_path, session) | Exports given checkpoint as tfhub module with given spec. |
def gen_smul(src1, src2, dst):
assert src1.size == src2.size
return ReilBuilder.build(ReilMnemonic.SMUL, src1, src2, dst) | Return a SMUL instruction. |
def update(self, path):
self._reset()
self.path = path
self._refresh_synced()
if self.is_synced:
self._refresh_path()
self._refresh_signed()
self._refresh_nvr() | Update the attributes of this CartItem. |
def urlencode(txt):
if isinstance(txt, unicode):
txt = txt.encode()
return urllib.quote_plus(txt) | Url encode a path. |
def bulk_insert(self, rows, return_model=False):
if self.conflict_target or self.conflict_action:
compiler = self._build_insert_compiler(rows)
objs = compiler.execute_sql(return_id=True)
if return_model:
return [self.model(**dict(r, **k)) for r, k in zip(rows, objs)]
else:
return [dict(r, **k) for r, k in zip(rows, objs)]
return super().bulk_create([self.model(**fields) for fields in rows]) | Creates multiple new records in the database.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
rows:
An array of dictionaries, where each dictionary
describes the fields to insert.
return_model (default: False):
If model instances should be returned rather than
just dicts.
Returns:
A list of either the dicts of the rows inserted, including the pk or
the models of the rows inserted with defaults for any fields not specified |
def get_all_user(self, **kwargs):
kwargs[] = True
if kwargs.get():
return self.get_all_user_with_http_info(**kwargs)
else:
(data) = self.get_all_user_with_http_info(**kwargs)
return data | Get all users # noqa: E501
Returns all users # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_user(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[UserModel]
If the method is called asynchronously,
returns the request thread. |
def _get_keycachelike(self, keycache, keys, get_adds_dels, parentity, branch, turn, tick, *, forward):
keycache_key = parentity + (branch,)
keycache2 = keycache3 = None
if keycache_key in keycache:
keycache2 = keycache[keycache_key]
if turn in keycache2:
keycache3 = keycache2[turn]
if tick in keycache3:
return keycache3[tick]
if forward:
if turn not in keycache2:
old_turn = keycache2.rev_before(turn)
old_turn_kc = keycache2[turn]
added, deleted = get_adds_dels(
keys[parentity], branch, turn, tick, stoptime=(
branch, old_turn, old_turn_kc.end
)
)
ret = old_turn_kc[old_turn_kc.end].union(added).difference(deleted)
new_turn_kc = WindowDict()
new_turn_kc[tick] = ret
keycache2[turn] = new_turn_kc
return ret
if not keycache3:
keycache3 = keycache2[turn]
if tick not in keycache3:
if keycache3.rev_gettable(tick):
added, deleted = get_adds_dels(
keys[parentity], branch, turn, tick, stoptime=(
branch, turn, keycache3.rev_before(tick)
)
)
ret = keycache3[tick].union(added).difference(deleted)
keycache3[tick] = ret
return ret
else:
turn_before = keycache2.rev_before(turn)
tick_before = keycache2[turn_before].end
keys_before = keycache2[turn_before][tick_before]
added, deleted = get_adds_dels(
keys[parentity], branch, turn, tick, stoptime=(
branch, turn_before, tick_before
)
)
ret = keycache3[tick] = keys_before.union(added).difference(deleted)
return ret
return keycache3[tick]
else:
for (parbranch, parturn, partick) in self.db._iter_parent_btt(branch, turn, tick):
par_kc_key = parentity + (parbranch,)
if par_kc_key in keycache:
kcpkc = keycache[par_kc_key]
if parturn in kcpkc and kcpkc[parturn].rev_gettable(partick):
parkeys = kcpkc[parturn][partick]
break
elif kcpkc.rev_gettable(parturn-1):
partkeys = kcpkc[parturn-1]
parkeys = partkeys[partkeys.end]
break
else:
parkeys = frozenset()
keycache2 = SettingsTurnDict()
added, deleted = get_adds_dels(
keys[parentity], branch, turn, tick, stoptime=(
parbranch, parturn, partick
)
)
ret = parkeys.union(added).difference(deleted)
keycache2[turn] = {tick: ret}
keycache[keycache_key] = keycache2
return ret
ret = frozenset(get_adds_dels(keys[parentity], branch, turn, tick)[0])
if keycache2:
if keycache3:
keycache3[tick] = ret
else:
keycache2[turn] = {tick: ret}
else:
kcc = SettingsTurnDict()
kcc[turn] = {tick: ret}
keycache[keycache_key] = kcc
return ret | Try to retrieve a frozenset representing extant keys.
If I can't, generate one, store it, and return it. |
def _file_not_empty(tmpfile):
if os.path.exists(tmpfile):
return os.stat(tmpfile).st_size != 0
else:
return False | Returns True if file exists and it is not empty
to check if it is time to read container ID from cidfile
:param tmpfile: str, path to file
:return: bool, True if container id is written to the file |
def is_kanji(data):
data_len = len(data)
if not data_len or data_len % 2:
return False
if _PY2:
data = (ord(c) for c in data)
data_iter = iter(data)
for i in range(0, data_len, 2):
code = (next(data_iter) << 8) | next(data_iter)
if not (0x8140 <= code <= 0x9ffc or 0xe040 <= code <= 0xebbf):
return False
return True | \
Returns if the `data` can be encoded in "kanji" mode.
:param bytes data: The data to check.
:rtype: bool |
def __build_level(self, previous_level_blocks, level):
current_level_blocks = []
split_dimension = level % len(self.__data[0])
cache_require = (level == self.__levels - 1)
for block in previous_level_blocks:
self.__split_block(block, split_dimension, cache_require, current_level_blocks)
if cache_require:
self.__leafs += current_level_blocks
return current_level_blocks | !
@brief Build new level of directory.
@param[in] previous_level_blocks (list): BANG-blocks on the previous level.
@param[in] level (uint): Level number that should be built.
@return (list) New block on the specified level. |
def quaternion_inverse(quaternion):
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q) | Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True |
def decode(self, longval, nbits):
val = Fraction(longval, long(1) << nbits)
letters = []
probs_items = [
(char, minval, maxval)
for (char, (minval, maxval)) in self._probs.items()
]
char =
while True:
for (char, minval, maxval) in probs_items:
if minval <= val < maxval:
break
if char == :
break
letters.append(char)
delta = maxval - minval
val = (val - minval) / delta
return .join(letters) | Decode the number to a string using the given statistics.
Parameters
----------
longval : int
The first part of an encoded tuple from encode
nbits : int
The second part of an encoded tuple from encode
Returns
-------
str
The arithmetically decoded text
Example
-------
>>> ac = Arithmetic('the quick brown fox jumped over the lazy dog')
>>> ac.decode(16720586181, 34)
'align' |
def _load_converted_gssha_data_from_lsm(self, gssha_var, lsm_var, load_type, time_step=None):
if in gssha_var:
conversion_factor = self.netcdf_attributes[gssha_var][][load_type]
if gssha_var.startswith() and not isinstance(lsm_var, basestring):
global_radiation_var, diffusive_fraction_var = lsm_var
global_radiation = self._load_lsm_data(global_radiation_var, conversion_factor)
diffusive_fraction = self._load_lsm_data(diffusive_fraction_var)
if gssha_var.endswith("cc"):
diffusive_fraction /= 100.0
self.data = ((1-diffusive_fraction)*global_radiation)
elif gssha_var.startswith() and not isinstance(lsm_var, basestring):
global_radiation_var, diffusive_fraction_var = lsm_var
global_radiation = self._load_lsm_data(global_radiation_var, conversion_factor)
diffusive_fraction = self._load_lsm_data(diffusive_fraction_var)
if gssha_var.endswith("cc"):
diffusive_fraction /= 100
self.data = (diffusive_fraction*global_radiation)
elif isinstance(lsm_var, basestring):
self.data = self._load_lsm_data(lsm_var, self.netcdf_attributes[gssha_var][][load_type])
else:
raise ValueError("Invalid LSM variable ({0}) for GSSHA variable {1}".format(lsm_var, gssha_var))
elif gssha_var == and not isinstance(lsm_var, str):
specific_humidity_var, pressure_var, temperature_var = lsm_var
specific_humidity = self._load_lsm_data(specific_humidity_var)
pressure = self._load_lsm_data(pressure_var)
temperature = self._load_lsm_data(temperature_var)
es = esat(temperature)
self.data = 100 * specific_humidity/((0.622*es)/(pressure-es))
elif gssha_var == :
dew_point_temp_var, temperature_var = lsm_var
dew_point_temp = self._load_lsm_data(dew_point_temp_var)
temperature = self._load_lsm_data(temperature_var)
self.data = 100 * esat(dew_point_temp)/esat(temperature)
elif gssha_var == and not isinstance(lsm_var, str):
u_vector_var, v_vector_var = lsm_var
conversion_factor = self.netcdf_attributes[gssha_var][][load_type]
u_vector = self._load_lsm_data(u_vector_var, conversion_factor)
v_vector = self._load_lsm_data(v_vector_var, conversion_factor)
self.data = (xu.sqrt(u_vector**2 + v_vector**2))
elif in gssha_var and not isinstance(lsm_var, str):
rain_c_var, rain_nc_var = lsm_var
conversion_factor = self.netcdf_attributes[gssha_var][][load_type]
rain_c = self._load_lsm_data(rain_c_var, conversion_factor)
rain_nc = self._load_lsm_data(rain_nc_var, conversion_factor)
self.data = rain_c + rain_nc
else:
self.data = self._load_lsm_data(lsm_var,
self.netcdf_attributes[gssha_var][][load_type],
self.netcdf_attributes[gssha_var].get(),
self.netcdf_attributes[gssha_var].get(),
time_step=time_step)
conversion_function = self.netcdf_attributes[gssha_var].get()
if conversion_function:
self.data.values = self.netcdf_attributes[gssha_var][][load_type](self.data.values)
if in gssha_var:
if in self.data.attrs:
if self.data.attrs[] == :
self.data.values *= 1000
if load_type == or load_type == :
if gssha_var == :
self.data.values = np.lib.pad(self.data.diff(self.lsm_time_dim).values,
((1, 0), (0, 0), (0, 0)),
,
constant_values=0)
if gssha_var == or gssha_var == :
time_step_hours = np.diff(self.xd[self.lsm_time_var].values)[0]/np.timedelta64(1, )
self.data.values /= time_step_hours
gssha_data_var_name = self.netcdf_attributes[gssha_var][]
self.data = self.data.to_dataset(name=gssha_data_var_name)
self.data.rename(
{
self.lsm_lon_dim: ,
self.lsm_lat_dim: ,
self.lsm_lon_var: ,
self.lsm_lat_var:
},
inplace=True
)
self.data.attrs = {: self.xd.lsm.projection.ExportToProj4()}
self.data[gssha_data_var_name].attrs = {
: self.netcdf_attributes[gssha_var][],
: self.netcdf_attributes[gssha_var][],
: self.netcdf_attributes[gssha_var][][load_type],
} | This function loads data from LSM and converts to GSSHA format |
def _get_parselypage(self, body):
parser = ParselyPageParser()
ret = None
try:
parser.feed(body)
except HTMLParseError:
pass
if parser.ppage is None:
return
ret = parser.ppage
if ret:
ret = {parser.original_unescape(k): parser.original_unescape(v)
for k, v in iteritems(ret)}
return ret | extract the parsely-page meta content from a page |
def message_received(self, message):
identifier = message.identifier or + str(message.type)
if identifier in self._outstanding:
outstanding = OutstandingMessage(
self._outstanding[identifier].semaphore, message)
self._outstanding[identifier] = outstanding
self._outstanding[identifier].semaphore.release()
else:
asyncio.ensure_future(self._dispatch(message), loop=self.loop) | Message was received from device. |
def check_roles(self, account, aws_policies, aws_roles):
self.log.debug(.format(account.account_name))
max_session_duration = self.dbconfig.get(, self.ns, 8) * 60 * 60
sess = get_aws_session(account)
iam = sess.client()
account_roles = copy.deepcopy(self.cfg_roles)
if account.account_name in self.git_policies:
for role in self.git_policies[account.account_name]:
if role in account_roles:
account_roles[role][] += list(self.git_policies[account.account_name][role].keys())
for role_name, data in list(account_roles.items()):
if role_name not in aws_roles:
iam.create_role(
Path=,
RoleName=role_name,
AssumeRolePolicyDocument=json.dumps(data[], indent=4),
MaxSessionDuration=max_session_duration
)
self.log.info(.format(account.account_name, role_name))
else:
try:
if aws_roles[role_name][] != max_session_duration:
iam.update_role(
RoleName=aws_roles[role_name][],
MaxSessionDuration=max_session_duration
)
self.log.info(.format(
role_name,
account.account_name,
max_session_duration
))
except ClientError:
self.log.exception(.format(
role_name,
account.account_name
))
aws_role_policies = [x[] for x in iam.list_attached_role_policies(
RoleName=role_name)[]
]
aws_role_inline_policies = iam.list_role_policies(RoleName=role_name)[]
cfg_role_policies = data[]
missing_policies = list(set(cfg_role_policies) - set(aws_role_policies))
extra_policies = list(set(aws_role_policies) - set(cfg_role_policies))
if aws_role_inline_policies:
self.log.info(.format(
role_name,
account.account_name,
.join(aws_role_inline_policies)
))
if self.dbconfig.get(, self.ns, False) and self.manage_roles:
for policy in aws_role_inline_policies:
iam.delete_role_policy(RoleName=role_name, PolicyName=policy)
auditlog(
event=,
actor=self.ns,
data={
: account.account_name,
: role_name,
: policy
}
)
if missing_policies:
self.log.info(.format(
role_name,
account.account_name,
.join(missing_policies)
))
if self.manage_roles:
for policy in missing_policies:
iam.attach_role_policy(RoleName=role_name, PolicyArn=aws_policies[policy][])
auditlog(
event=,
actor=self.ns,
data={
: account.account_name,
: role_name,
: aws_policies[policy][]
}
)
if extra_policies:
self.log.info(.format(
role_name,
account.account_name,
.join(extra_policies)
))
for policy in extra_policies:
if policy in aws_policies:
polArn = aws_policies[policy][]
elif policy in self.aws_managed_policies:
polArn = self.aws_managed_policies[policy][]
else:
polArn = None
self.log.info(.format(
role_name,
account.account_name,
policy
))
if self.manage_roles and polArn:
iam.detach_role_policy(RoleName=role_name, PolicyArn=polArn)
auditlog(
event=,
actor=self.ns,
data={
: account.account_name,
: role_name,
: polArn
}
) | Iterate through the roles of a specific account and create or update the roles if they're missing or
does not match the roles from Git.
Args:
account (:obj:`Account`): The account to check roles on
aws_policies (:obj:`dict` of `str`: `dict`): A dictionary containing all the policies for the specific
account
aws_roles (:obj:`dict` of `str`: `dict`): A dictionary containing all the roles for the specific account
Returns:
`None` |
def process_image(self, image, image_format, save_kwargs={}):
imagefile = BytesIO()
inv_image = ImageOps.invert(image)
inv_image.save(
imagefile,
**save_kwargs
)
return imagefile | Return a BytesIO instance of `image` with inverted colors. |
def jd2dt(jd):
n = int(round(float(jd)))
a = n + 32044
b = (4*a + 3)//146097
c = a - (146097*b)//4
d = (4*c + 3)//1461
e = c - (1461*d)//4
m = (5*e + 2)//153
day = e + 1 - (153*m + 2)//5
month = m + 3 - 12*(m//10)
year = 100*b + d - 4800 + m/10
tfrac = 0.5 + float(jd) - n
tfrac_s = 86400.0 * tfrac
minfrac, hours = np.modf(tfrac_s / 3600.)
secfrac, minutes = np.modf(minfrac * 60.)
microsec, seconds = np.modf(secfrac * 60.)
return datetime(year, month, day, int(hours), int(minutes), int(seconds), int(microsec*1E6)) | Convert julian date to datetime |
def register_regex_entity(self, regex_str, domain=0):
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_regex_entity(regex_str=regex_str) | A regular expression making use of python named group expressions.
Example: (?P<Artist>.*)
Args:
regex_str(str): a string representing a regular expression as defined above
domain(str): a string representing the domain you wish to add the entity to |
def find_DST():
if SYS_NAME == "Windows":
return os.path.join(site.getsitepackages()[1], PKG_NAME)
elif SYS_NAME in ["Darwin", "Linux"]:
return os.path.join(site.getsitepackages()[0], PKG_NAME) | Find where this package should be installed to. |
def artist_update(self, artist_id, name=None, other_names_comma=None,
group_name=None, url_string=None, body=None):
params = {
: name,
: other_names_comma,
: group_name,
: url_string,
: body
}
return self .get(.format(artist_id), params,
method=, auth=True) | Function to update artists (Requires login) (UNTESTED).
Parameters:
artist_id (str):
name (str): Artist name.
other_names_comma (str): List of alternative names for this
artist, comma delimited.
group_name (str): The name of the group this artist belongs to.
url_string (str): List of URLs associated with this artist,
whitespace or newline delimited.
body (str): DText that will be used to create/update a wiki entry
at the same time. |
def get_attachment(self, ticket_id, attachment_id):
msg = self.__request(.format(str(ticket_id), str(attachment_id)),
text_response=False)
msg = msg.split(b)
if (len(msg) > 2) and (self.RE_PATTERNS[].match(msg[2]) or self.RE_PATTERNS[].match(msg[2])):
return None
msg = msg[2:]
head_matching = [i for i, m in enumerate(msg) if self.RE_PATTERNS[].match(m)]
head_id = head_matching[0] if head_matching else None
if not head_id:
raise UnexpectedMessageFormat()
msg[head_id] = re.sub(b, r, msg[head_id])
cont_matching = [i for i, m in enumerate(msg) if self.RE_PATTERNS[].match(m)]
cont_id = cont_matching[0] if cont_matching else None
if not cont_matching:
raise UnexpectedMessageFormat()
pairs = {}
for i in range(head_id):
if b in msg[i]:
header, content = msg[i].split(b, 1)
pairs[header.strip().decode()] = content.strip().decode()
headers = {}
for i in range(head_id, cont_id):
if b in msg[i]:
header, content = msg[i].split(b, 1)
headers[header.strip().decode()] = content.strip().decode()
pairs[] = headers
content = msg[cont_id][9:]
for i in range(cont_id + 1, len(msg)):
if msg[i][:9] == (b * 9):
content += b + msg[i][9:]
pairs[] = content
return pairs | Get attachment.
:param ticket_id: ID of ticket
:param attachment_id: ID of attachment for obtain
:returns: Attachment as dictionary with these keys:
* Transaction
* ContentType
* Parent
* Creator
* Created
* Filename
* Content (bytes type)
* Headers
* MessageId
* ContentEncoding
* id
* Subject
All these fields are strings, just 'Headers' holds another
dictionary with attachment headers as strings e.g.:
* Delivered-To
* From
* Return-Path
* Content-Length
* To
* X-Seznam-User
* X-QM-Mark
* Domainkey-Signature
* RT-Message-ID
* X-RT-Incoming-Encryption
* X-Original-To
* Message-ID
* X-Spam-Status
* In-Reply-To
* Date
* Received
* X-Country
* X-Spam-Checker-Version
* X-Abuse
* MIME-Version
* Content-Type
* Subject
.. warning:: Content-Length parameter is set after opening
ticket in web interface!
Set of headers available depends on mailservers sending
emails not on Request Tracker!
Returns None if ticket or attachment does not exist.
:raises UnexpectedMessageFormat: Unexpected format of returned message. |
def load():
with within_proj_dir():
if os.path.exists():
load_yaml_config()
if os.path.exists():
load_py_config() | Load configuration from file.
This will search the directory structure upwards to find the project root
(directory containing ``pelconf.py`` file). Once found it will import the
config file which should initialize all the configuration (using
`peltak.core.conf.init()` function).
You can also have both yaml (configuration) and python (custom commands)
living together. Just remember that calling `conf.init()` will overwrite
the config defined in YAML. |
def photbw(self, floor=0):
mywaveunits = self.waveunits.name
self.convert()
wave = self.wave
thru = self.throughput
self.convert(mywaveunits)
num = self.trapezoidIntegration(wave, thru * N.log(wave) / wave)
den = self.trapezoidIntegration(wave, thru / wave)
if num == 0 or den == 0:
return 0.0
avg_wave = N.exp(num/den)
if floor != 0:
idx = N.where(thru >= floor)
wave = wave[idx]
thru = thru[idx]
integrand = thru * N.log(wave / avg_wave)**2 / wave
num = self.trapezoidIntegration(wave, integrand)
if num == 0 or den == 0:
return 0.0
return avg_wave * N.sqrt(num/den) | Calculate :ref:`pysynphot-formula-bandw`.
.. note:: For backward-compatibility with IRAF STSDAS SYNPHOT only.
Parameters
----------
floor : float
Same as :meth:`rmswidth`.
Returns
-------
ans : float
RMS band width (deprecated). |
def potential_purviews(self, direction, mechanism, purviews=False):
system = self.system[direction]
return [
purview for purview in system.potential_purviews(
direction, mechanism, purviews)
if set(purview).issubset(self.purview_indices(direction))
] | Return all purviews that could belong to the |MIC|/|MIE|.
Filters out trivially-reducible purviews.
Args:
direction (str): Either |CAUSE| or |EFFECT|.
mechanism (tuple[int]): The mechanism of interest.
Keyword Args:
purviews (tuple[int]): Optional subset of purviews of interest. |
def update_cache_settings(self, service_id, version_number, name_key, **kwargs):
body = self._formdata(kwargs, FastlyCacheSettings.FIELDS)
content = self._fetch("/service/%s/version/%d/cache_settings/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyCacheSettings(self, content) | Update a specific cache settings object. |
def user_present(name, uid, password, channel=14, callback=False,
link_auth=True, ipmi_msg=True, privilege_level=, **kwargs):
call inIPMI Messaging
ret = {: name, : False, : , : {}}
org_user = __salt__[](uid=uid, channel=channel, **kwargs)
change = False
if org_user[][] != callback:
change = True
if org_user[][] != link_auth:
change = True
if org_user[][] != ipmi_msg:
change = True
if org_user[][] != privilege_level:
change = True
if __salt__[](uid, mode=,
password=password, **kwargs) is False:
change = True
if change is False:
ret[] = True
ret[] =
return ret
if __opts__[]:
ret[] =
ret[] = None
ret[] = {: org_user, : name}
return ret
__salt__[](uid,
name,
password,
channel,
callback,
link_auth,
ipmi_msg,
privilege_level,
**kwargs)
current_user = __salt__[](uid=uid, channel=channel, **kwargs)
ret[] =
ret[] = True
ret[] = {: org_user, : current_user}
return ret | Ensure IPMI user and user privileges.
name
name of user (limit 16 bytes)
uid
user id number (1 to 7)
password
user password (limit 16 bytes)
channel
ipmi channel defaults to 14 for auto
callback
User Restricted to Callback
False = User Privilege Limit is determined by the User Privilege Limit
parameter privilege_level, for both callback and non-callback connections.
True = User Privilege Limit is determined by the privilege_level
parameter for callback connections, but is restricted to Callback
level for non-callback connections. Thus, a user can only initiate
a Callback when they 'call in' to the BMC, but once the callback
connection has been made, the user could potentially establish a
session as an Operator.
link_auth
User Link authentication
True/False
user name and password information will be used for link
authentication, e.g. PPP CHAP) for the given channel. Link
authentication itself is a global setting for the channel and is
enabled/disabled via the serial/modem configuration parameters.
ipmi_msg
User IPMI Messaging
True/False
user name and password information will be used for IPMI
Messaging. In this case, 'IPMI Messaging' refers to the ability to
execute generic IPMI commands that are not associated with a
particular payload type. For example, if IPMI Messaging is disabled for
a user, but that user is enabled for activating the SOL
payload type, then IPMI commands associated with SOL and session
management, such as Get SOL Configuration Parameters and Close Session
are available, but generic IPMI commands such as Get SEL Time are
unavailable.)
ipmi_msg
privilege_level
* callback
* user
* operator
* administrator
* proprietary
* no_access
kwargs
- api_host=localhost
- api_user=admin
- api_pass=
- api_port=623
- api_kg=None |
def properties(self):
_type = type(self)
return [_property for _property in dir(_type) if self._is_property(_property)] | Returns:
(list[str]) List of public properties |
def qualified_note_rate(pianoroll, threshold=2):
_validate_pianoroll(pianoroll)
if np.issubdtype(pianoroll.dtype, np.bool_):
pianoroll = pianoroll.astype(np.uint8)
padded = np.pad(pianoroll, ((1, 1), (0, 0)), )
diff = np.diff(padded, axis=0).reshape(-1)
onsets = (diff > 0).nonzero()[0]
offsets = (diff < 0).nonzero()[0]
n_qualified_notes = np.count_nonzero(offsets - onsets >= threshold)
return n_qualified_notes / len(onsets) | Return the ratio of the number of the qualified notes (notes longer than
`threshold` (in time step)) to the total number of notes in a pianoroll. |
def main (args):
if len(args) != 2:
print(main.__doc__)
sys.exit(1)
doc = libsedml.readSedML(args[1]);
if ( doc.getErrorLog().getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_ERROR) > 0):
print doc.getErrorLog().toString();
sys.exit(2);
print .format(doc.getNumSimulations());
for i in range(0, doc.getNumSimulations()):
current = doc.getSimulation(i);
if (current.getTypeCode() == libsedml.SEDML_SIMULATION_UNIFORMTIMECOURSE):
tc = current;
kisaoid="none"
if tc.isSetAlgorithm():
kisaoid=tc.getAlgorithm().getKisaoID()
print "\tTimecourse id=", tc.getId()," start=",tc.getOutputStartTime()," end=",tc.getOutputEndTime()," numPoints=",tc.getNumberOfPoints()," kisao=",kisaoid,"\n";
else:
print "\tUncountered unknown simulation. ",current.getId(),"\n";
print "\n"
print "The document has ",doc.getNumModels() , " model(s)." , "\n";
for i in range(0,doc.getNumModels()):
current = doc.getModel(i);
print "\tModel id=" , current.getId() , " language=" , current.getLanguage() , " source=" , current.getSource() , " numChanges=" , current.getNumChanges() , "\n";
print "\n";
print "The document has " , doc.getNumTasks() , " task(s)." , "\n";
for i in range(0,doc.getNumTasks()):
current = doc.getTask(i);
print "\tTask id=" , current.getId() , " model=" , current.getModelReference() , " sim=" , current.getSimulationReference() , "\n";
print "\n";
print "The document has " , doc.getNumDataGenerators() , " datagenerators(s)." , "\n";
for i in range( 0, doc.getNumDataGenerators()):
current = doc.getDataGenerator(i);
print "\tDG id=" , current.getId() , " math=" , libsedml.formulaToString(current.getMath()) , "\n";
print "\n";
print "The document has " , doc.getNumOutputs() , " output(s)." , "\n";
for i in range (0, doc.getNumOutputs()):
current = doc.getOutput(i);
tc = current.getTypeCode();
if tc == libsedml.SEDML_OUTPUT_REPORT:
r = (current);
print "\tReport id=" , current.getId() , " numDataSets=" , r.getNumDataSets() , "\n";
elif tc == libsedml.SEDML_OUTPUT_PLOT2D:
p = (current);
print "\tPlot2d id=" , current.getId() , " numCurves=" , p.getNumCurves() , "\n";
elif tc == libsedml.SEDML_OUTPUT_PLOT3D:
p = (current);
print "\tPlot3d id=" , current.getId() , " numSurfaces=" , p.getNumSurfaces() , "\n";
else:
print "\tEncountered unknown output " , current.getId() , "\n"; | Usage: print_sedml input-filename |
def convert(self, values, nan_rep, encoding, errors):
if values.dtype.fields is not None:
values = values[self.cname]
values = _maybe_convert(values, self.kind, encoding, errors)
kwargs = dict()
if self.freq is not None:
kwargs[] = _ensure_decoded(self.freq)
if self.index_name is not None:
kwargs[] = _ensure_decoded(self.index_name)
try:
self.values = Index(values, **kwargs)
except Exception:
if in kwargs:
kwargs[] = None
self.values = Index(values, **kwargs)
self.values = _set_tz(self.values, self.tz)
return self | set the values from this selection: take = take ownership |
def _set_sip_ipv4_address(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={: u}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u, u), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "brocade-bgp:sip-ipv4-address",
: ,
})
self.__sip_ipv4_address = t
if hasattr(self, ):
self._set() | Setter method for sip_ipv4_address, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ips/neighbor_addr/update_source/sip_ipv4_address (sip-ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_sip_ipv4_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sip_ipv4_address() directly. |
def fill_translation_cache(instance):
if hasattr(instance, ):
return
instance._translation_cache = {}
if not instance.pk:
return
for language_code in get_language_code_list():
field_alias = get_translated_field_alias(, language_code)
if getattr(instance, field_alias, None) is not None:
field_names = [f.attname for f in instance._meta.translation_model._meta.fields]
field_data = {}
for fname in field_names:
field_data[fname] = getattr(instance,
get_translated_field_alias(fname, language_code))
translation = instance._meta.translation_model(**field_data)
instance._translation_cache[language_code] = translation
if len(instance._translation_cache.keys()) == 0:
for translation in instance.translations.all():
instance._translation_cache[translation.language_code] = translation | Fill the translation cache using information received in the
instance objects as extra fields.
You can not do this in post_init because the extra fields are
assigned by QuerySet.iterator after model initialization. |
def lasts(iterable, items=1, default=None):
last_items = deque(iterable, maxlen=items)
for _ in range(items - len(last_items)):
yield default
for y in last_items:
yield y | Lazily return the last x items from this iterable or default. |
def all_subclasses(cls):
for subcls in cls.__subclasses__():
yield subcls
for subsubcls in all_subclasses(subcls):
yield subsubcls | Generator yielding all subclasses of `cls` recursively |
def login_defs():
uid_min = None
uid_max = None
login_defs_path =
if os.path.exists(login_defs_path):
with io.open(text_type(login_defs_path), encoding=text_type()) as log_defs_file:
login_data = log_defs_file.readlines()
for line in login_data:
if PY3:
line = str(line)
if PY2:
line = line.encode(text_type())
if line[:7] == text_type():
uid_min = int(line.split()[1].strip())
if line[:7] == text_type():
uid_max = int(line.split()[1].strip())
if not uid_min:
uid_min = DEFAULT_UID_MIN
if not uid_max:
uid_max = DEFAULT_UID_MAX
return uid_min, uid_max | Discover the minimum and maximum UID number. |
def _dump_to_json(self, with_stats):
import json
trees_json_str = tc.extensions._xgboost_dump_model(self.__proxy__, with_stats=with_stats, format=)
trees_json = [json.loads(x) for x in trees_json_str]
import struct
import sys
def hexadecimal_to_float(s):
if sys.version_info[0] >= 3:
return struct.unpack(, bytes.fromhex(s))[0]
else:
return struct.unpack(, s.decode())[0]
for d in trees_json:
nodes = d[]
for n in nodes:
if in n:
n[] = hexadecimal_to_float(n[])
return trees_json | Dump the models into a list of strings. Each
string is a text representation of a tree.
Parameters
----------
with_stats : bool
If true, include node statistics in the output.
Returns
-------
out : SFrame
A table with two columns: feature, count,
ordered by 'count' in descending order. |
def set_memory_cache(self, results, key=None):
key = self.model.hash if key is None else key
self.memory_cache[key] = results | Store result in memory cache with key matching model state. |
def _update_trial_queue(self, blocking=False, timeout=600):
trials = self._search_alg.next_trials()
if blocking and not trials:
start = time.time()
while (not trials and not self.is_finished()
and time.time() - start < timeout):
logger.info("Blocking for next trial...")
trials = self._search_alg.next_trials()
time.sleep(1)
for trial in trials:
self.add_trial(trial) | Adds next trials to queue if possible.
Note that the timeout is currently unexposed to the user.
Args:
blocking (bool): Blocks until either a trial is available
or is_finished (timeout or search algorithm finishes).
timeout (int): Seconds before blocking times out. |
def racks(self):
if not self.__racks:
self.__racks = Racks(self.__connection)
return self.__racks | Gets the Racks API client.
Returns:
Racks: |
def get_yaml_files_at_env_root(self):
yaml_files = glob.glob(
os.path.join(self.env_root, )
)
yml_files = glob.glob(
os.path.join(self.env_root, )
)
return yaml_files + yml_files | Return list of yaml files in env_root. |
def hide_button_span(self, mode, file=sys.stdout):
file.write("\033[83;%iu" % mode)
yield
file.write("\033[83;0u") | :param int mode: 1 or 2
:param io.TextIOBase|io.StringIO file: |
def _ns_var(
py_ns_var: str = _NS_VAR, lisp_ns_var: str = LISP_NS_VAR, lisp_ns_ns: str = CORE_NS
) -> ast.Assign:
return ast.Assign(
targets=[ast.Name(id=py_ns_var, ctx=ast.Store())],
value=ast.Call(
func=_FIND_VAR_FN_NAME,
args=[
ast.Call(
func=_NEW_SYM_FN_NAME,
args=[ast.Str(lisp_ns_var)],
keywords=[ast.keyword(arg="ns", value=ast.Str(lisp_ns_ns))],
)
],
keywords=[],
),
) | Assign a Python variable named `ns_var` to the value of the current
namespace. |
def reciprocal_rank(
model,
test_interactions,
train_interactions=None,
user_features=None,
item_features=None,
preserve_rows=False,
num_threads=1,
check_intersections=True,
):
if num_threads < 1:
raise ValueError("Number of threads must be 1 or larger.")
ranks = model.predict_rank(
test_interactions,
train_interactions=train_interactions,
user_features=user_features,
item_features=item_features,
num_threads=num_threads,
check_intersections=check_intersections,
)
ranks.data = 1.0 / (ranks.data + 1.0)
ranks = np.squeeze(np.array(ranks.max(axis=1).todense()))
if not preserve_rows:
ranks = ranks[test_interactions.getnnz(axis=1) > 0]
return ranks | Measure the reciprocal rank metric for a model: 1 / the rank of the highest
ranked positive example. A perfect score is 1.0.
Parameters
----------
model: LightFM instance
the fitted model to be evaluated
test_interactions: np.float32 csr_matrix of shape [n_users, n_items]
Non-zero entries representing known positives in the evaluation set.
train_interactions: np.float32 csr_matrix of shape [n_users, n_items], optional
Non-zero entries representing known positives in the train set. These
will be omitted from the score calculations to avoid re-recommending
known positives.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
preserve_rows: boolean, optional
When False (default), the number of rows in the output will be equal
to the number of users with interactions in the evaluation set.
When True, the number of rows in the output will be equal to the
number of users.
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
check_intersections: bool, optional, True by default,
Only relevant when train_interactions are supplied.
A flag that signals whether the test and train matrices should be checked
for intersections to prevent optimistic ranks / wrong evaluation / bad data split.
Returns
-------
np.array of shape [n_users with interactions or n_users,]
Numpy array containing reciprocal rank scores for each user.
If there are no interactions for a given user the returned value will
be 0.0. |
def sample(self, size=(), rule="R", antithetic=None):
size_ = numpy.prod(size, dtype=int)
dim = len(self)
if dim > 1:
if isinstance(size, (tuple, list, numpy.ndarray)):
shape = (dim,) + tuple(size)
else:
shape = (dim, size)
else:
shape = size
from . import sampler
out = sampler.generator.generate_samples(
order=size_, domain=self, rule=rule, antithetic=antithetic)
try:
out = out.reshape(shape)
except:
if len(self) == 1:
out = out.flatten()
else:
out = out.reshape(dim, int(out.size/dim))
return out | Create pseudo-random generated samples.
By default, the samples are created using standard (pseudo-)random
samples. However, if needed, the samples can also be created by either
low-discrepancy sequences, and/or variance reduction techniques.
Changing the sampling scheme, use the following ``rule`` flag:
+-------+-------------------------------------------------+
| key | Description |
+=======+=================================================+
| ``C`` | Roots of the first order Chebyshev polynomials. |
+-------+-------------------------------------------------+
| ``NC``| Chebyshev nodes adjusted to ensure nested. |
+-------+-------------------------------------------------+
| ``K`` | Korobov lattice. |
+-------+-------------------------------------------------+
| ``R`` | Classical (Pseudo-)Random samples. |
+-------+-------------------------------------------------+
| ``RG``| Regular spaced grid. |
+-------+-------------------------------------------------+
| ``NG``| Nested regular spaced grid. |
+-------+-------------------------------------------------+
| ``L`` | Latin hypercube samples. |
+-------+-------------------------------------------------+
| ``S`` | Sobol low-discrepancy sequence. |
+-------+-------------------------------------------------+
| ``H`` | Halton low-discrepancy sequence. |
+-------+-------------------------------------------------+
| ``M`` | Hammersley low-discrepancy sequence. |
+-------+-------------------------------------------------+
All samples are created on the ``[0, 1]``-hypercube, which then is
mapped into the domain of the distribution using the inverse Rosenblatt
transformation.
Args:
size (numpy.ndarray):
The size of the samples to generate.
rule (str):
Indicator defining the sampling scheme.
antithetic (bool, numpy.ndarray):
If provided, will be used to setup antithetic variables. If
array, defines the axes to mirror.
Returns:
(numpy.ndarray):
Random samples with shape ``(len(self),)+self.shape``. |
def add_data(self, request, pk=None):
resp = super().add_data(request, pk)
entity = self.get_object()
for collection in entity.collections.all():
collection.data.add(*request.data[])
return resp | Add data to Entity and it's collection. |
def all(self, data={}, **kwargs):
return super(Subscription, self).all(data, **kwargs) | Fetch all Subscription entities
Returns:
Dictionary of Subscription data |
def set_notebook(note_store, my_notebook, notebook_id):
if notebook_id == 0:
new_notebook = Types.Notebook()
new_notebook.name = my_notebook
new_notebook.defaultNotebook = False
notebook_id = note_store.createNotebook(new_notebook).guid
return notebook_id | create a notebook |
def get_assessment_form_for_create(self, assessment_record_types):
for arg in assessment_record_types:
if not isinstance(arg, ABCType):
raise errors.InvalidArgument()
if assessment_record_types == []:
obj_form = objects.AssessmentForm(
bank_id=self._catalog_id,
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy)
else:
obj_form = objects.AssessmentForm(
bank_id=self._catalog_id,
record_types=assessment_record_types,
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy)
self._forms[obj_form.get_id().get_identifier()] = not CREATED
return obj_form | Gets the assessment form for creating new assessments.
A new form should be requested for each create transaction.
arg: assessment_record_types (osid.type.Type[]): array of
assessment record types to be included in the create
operation or an empty list if none
return: (osid.assessment.AssessmentForm) - the assessment form
raise: NullArgument - ``assessment_record_types`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - unable to get form for requested record
types
*compliance: mandatory -- This method must be implemented.* |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.