code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def load_distant(self):
print("Loading distant Zotero data...")
self._references = self.get_references()
self.reference_types = self.get_reference_types()
self.reference_templates = self.get_reference_templates(self.reference_types)
print("Distant Zotero data loaded.")
self.cache()
|
Load the distant Zotero data.
|
def extract_audio(filename, channels=1, rate=16000):
temp = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)
if not os.path.isfile(filename):
print("The given file does not exist: {}".format(filename))
raise Exception("Invalid filepath: {}".format(filename))
if not which("ffmpeg"):
print("ffmpeg: Executable not found on machine.")
raise Exception("Dependency not found: ffmpeg")
command = ["ffmpeg", "-y", "-i", filename,
"-ac", str(channels), "-ar", str(rate),
"-loglevel", "error", temp.name]
use_shell = True if os.name == "nt" else False
subprocess.check_output(command, stdin=open(os.devnull), shell=use_shell)
return temp.name, rate
|
Extract audio from an input file to a temporary WAV file.
|
def on_any_event(self, event):
for delegate in self.delegates:
if hasattr(delegate, "on_any_event"):
delegate.on_any_event(event)
|
On any event method
|
def processing_blocks(self):
sbi_ids = Subarray(self.get_name()).sbi_ids
pbs = []
for sbi_id in sbi_ids:
sbi = SchedulingBlockInstance(sbi_id)
pbs.append(sbi.processing_block_ids)
return 'PB', pbs
|
Return list of PBs associated with the subarray.
<http://www.esrf.eu/computing/cs/tango/pytango/v920/server_api/server.html#PyTango.server.pipe>
|
def write (self, s):
if isinstance(s, bytes):
s = self._decode(s)
for c in s:
self.process(c)
|
Process text, writing it to the virtual screen while handling
ANSI escape codes.
|
def _get_service_config(self):
if not os.path.exists(self.config_path):
try:
os.makedirs(os.path.dirname(self.config_path))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
return {}
with open(self.config_path, 'r') as data:
return json.load(data)
|
Reads in config file of UAA credential information
or generates one as a side-effect if not yet
initialized.
|
def transform_e1e2(x, y, e1, e2, center_x=0, center_y=0):
x_shift = x - center_x
y_shift = y - center_y
x_ = (1-e1) * x_shift - e2 * y_shift
y_ = -e2 * x_shift + (1 + e1) * y_shift
det = np.sqrt((1-e1)*(1+e1) + e2**2)
return x_ / det, y_ / det
|
maps the coordinates x, y with eccentricities e1 e2 into a new elliptical coordiante system
:param x:
:param y:
:param e1:
:param e2:
:param center_x:
:param center_y:
:return:
|
def parse_string(self):
word = ''
if self.prior_delim:
delim = self.prior_delim
self.prior_delim = None
else:
delim = self.char
word += self.char
self.update_chars()
while True:
if self.char == delim:
self.update_chars()
if self.char == delim:
word += 2 * delim
self.update_chars()
else:
word += delim
break
elif self.char == '\n':
self.prior_delim = delim
break
else:
word += self.char
self.update_chars()
return word
|
Tokenize a Fortran string.
|
def prepare_read(data, method='readlines', mode='r'):
if hasattr(data, 'readlines'):
data = getattr(data, method)()
elif isinstance(data, list):
if method == 'read':
return ''.join(data)
elif isinstance(data, basestring):
data = getattr(open(data, mode), method)()
else:
raise TypeError('Unable to handle data of type %r' % type(data))
return data
|
Prepare various input types for parsing.
Args:
data (iter): Data to read
method (str): Method to process data with
mode (str): Custom mode to process with, if data is a file
Returns:
list: List suitable for parsing
Raises:
TypeError: Invalid value for data
|
def set_log_block_num(self, bl):
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('CL record not yet initialized!')
self.child_log_block_num = bl
|
Set the logical block number for the child.
Parameters:
bl - Logical block number of the child.
Returns:
Nothing.
|
def get_access_token(self):
if self.is_access_token_expired():
if is_debug_enabled():
debug('requesting new access_token')
token = get_access_token(username=self.username,
password=self.password,
client_id=self.client_id,
client_secret=self.client_secret,
app_url=self.app_url)
self.expires_at = time.time() + token['expires_in']/2
self.access_token = token['access_token']
return self.access_token
|
get a valid access token
|
def match(self, device):
return all(match_value(getattr(device, k), v)
for k, v in self._match.items())
|
Check if the device object matches this filter.
|
def threaded_start(self, no_init=False):
thread = Thread(target=self.init_connections, kwargs={
'no_init': no_init})
thread.setDaemon(True)
thread.start()
thread.join()
|
Spawns a worker thread to set up the zookeeper connection
|
def write(self, destination, filename, template_name, **kwargs):
template = self.env.get_template(template_name)
content = template.render(kwargs)
super(TemplateFileWriter, self).write(destination=destination, filename=filename, content=content)
|
Write a file according to the template name
Args:
destination (string): the destination location
filename (string): the filename that will be written
template_name (string): the name of the template
kwargs (dict): all attribute that will be passed to the template
|
def _effective_perm_list_from_iter(self, perm_iter):
highest_perm_str = self._highest_perm_from_iter(perm_iter)
return (
self._equal_or_lower_perm_list(highest_perm_str)
if highest_perm_str is not None
else None
)
|
Return list of effective permissions for for highest permission in
``perm_iter``, ordered lower to higher, or None if ``perm_iter`` is empty.
|
def _get_block_publisher(self, state_hash):
state_view = self._state_view_factory.create_view(state_hash)
try:
class BatchPublisher:
def send(self, transactions):
raise InvalidGenesisConsensusError(
'Consensus cannot send transactions during genesis.')
consensus = ConsensusFactory.get_configured_consensus_module(
NULL_BLOCK_IDENTIFIER,
state_view)
return consensus.BlockPublisher(
BlockCache(self._block_store),
state_view_factory=self._state_view_factory,
batch_publisher=BatchPublisher(),
data_dir=self._data_dir,
config_dir=self._config_dir,
validator_id=self._identity_signer.get_public_key().as_hex())
except UnknownConsensusModuleError as e:
raise InvalidGenesisStateError(e)
|
Returns the block publisher based on the consensus module set by the
"sawtooth_settings" transaction family.
Args:
state_hash (str): The current state root hash for reading settings.
Raises:
InvalidGenesisStateError: if any errors occur getting the
BlockPublisher.
|
def unlock(self, password: str):
if self.locked:
self._privkey = decode_keyfile_json(self.keystore, password.encode('UTF-8'))
self.locked = False
self._fill_address()
|
Unlock the account with a password.
If the account is already unlocked, nothing happens, even if the password is wrong.
Raises:
ValueError: (originating in ethereum.keys) if the password is wrong
(and the account is locked)
|
def hybrid_meco_frequency(m1, m2, chi1, chi2, qm1=None, qm2=None):
if qm1 is None:
qm1 = 1
if qm2 is None:
qm2 = 1
return velocity_to_frequency(hybrid_meco_velocity(m1, m2, chi1, chi2, qm1, qm2), m1 + m2)
|
Return the frequency of the hybrid MECO
Parameters
----------
m1 : float
Mass of the primary object in solar masses.
m2 : float
Mass of the secondary object in solar masses.
chi1: float
Dimensionless spin of the primary object.
chi2: float
Dimensionless spin of the secondary object.
qm1: {None, float}, optional
Quadrupole-monopole term of the primary object (1 for black holes).
If None, will be set to qm1 = 1.
qm2: {None, float}, optional
Quadrupole-monopole term of the secondary object (1 for black holes).
If None, will be set to qm2 = 1.
Returns
-------
f: float
The frequency (in Hz) of the hybrid MECO
|
def write_tex():
datadir = livvkit.index_dir
outdir = os.path.join(datadir, "tex")
print(outdir)
data_files = glob.glob(datadir + "/**/*.json", recursive=True)
for each in data_files:
data = functions.read_json(each)
tex = translate_page(data)
outfile = os.path.join(outdir, os.path.basename(each).replace('json', 'tex'))
with open(outfile, 'w') as f:
f.write(tex)
|
Finds all of the output data files, and writes them out to .tex
|
def parse_tibiadata_datetime(date_dict) -> Optional[datetime.datetime]:
try:
t = datetime.datetime.strptime(date_dict["date"], "%Y-%m-%d %H:%M:%S.%f")
except (KeyError, ValueError, TypeError):
return None
if date_dict["timezone"] == "CET":
timezone_offset = 1
elif date_dict["timezone"] == "CEST":
timezone_offset = 2
else:
return None
t = t - datetime.timedelta(hours=timezone_offset)
return t.replace(tzinfo=datetime.timezone.utc)
|
Parses time objects from the TibiaData API.
Time objects are made of a dictionary with three keys:
date: contains a string representation of the time
timezone: a string representation of the timezone the date time is based on
timezone_type: the type of representation used in the timezone key
Parameters
----------
date_dict: :class:`dict`
Dictionary representing the time object.
Returns
-------
:class:`datetime.date`, optional
The represented datetime, in UTC.
|
def set_properties(self, pathobj, props, recursive):
url = '/'.join([pathobj.drive,
'api/storage',
str(pathobj.relative_to(pathobj.drive)).strip('/')])
params = {'properties': encode_properties(props)}
if not recursive:
params['recursive'] = '0'
text, code = self.rest_put(url,
params=params,
auth=pathobj.auth,
verify=pathobj.verify,
cert=pathobj.cert)
if code == 404 and "Unable to find item" in text:
raise OSError(2, "No such file or directory: '%s'" % url)
if code != 204:
raise RuntimeError(text)
|
Set artifact properties
|
async def power_on(
self, comment: str = None,
wait: bool = False, wait_interval: int = 5):
params = {"system_id": self.system_id}
if comment is not None:
params["comment"] = comment
try:
self._data = await self._handler.power_on(**params)
except CallError as error:
if error.status == HTTPStatus.FORBIDDEN:
message = "Not allowed to power on machine."
raise OperationNotAllowed(message) from error
else:
raise
if not wait or self.power_state == PowerState.UNKNOWN:
return self
else:
while self.power_state == PowerState.OFF:
await asyncio.sleep(wait_interval)
self._data = await self._handler.read(system_id=self.system_id)
if self.power_state == PowerState.ERROR:
msg = "{hostname} failed to power on.".format(
hostname=self.hostname
)
raise PowerError(msg, self)
return self
|
Power on.
:param comment: Reason machine was powered on.
:type comment: `str`
:param wait: If specified, wait until the machine is powered on.
:type wait: `bool`
:param wait_interval: How often to poll, defaults to 5 seconds.
:type wait_interval: `int`
|
def handle(self, *args, **options):
trigger_id = options.get('trigger_id')
trigger = TriggerService.objects.filter(
id=int(trigger_id),
status=True,
user__is_active=True,
provider_failed__lt=settings.DJANGO_TH.get('failed_tries', 10),
consumer_failed__lt=settings.DJANGO_TH.get('failed_tries', 10)
).select_related('consumer__name', 'provider__name')
try:
with Pool(processes=1) as pool:
r = Read()
result = pool.map_async(r.reading, trigger)
result.get(timeout=360)
p = Pub()
result = pool.map_async(p.publishing, trigger)
result.get(timeout=360)
cache.delete('django_th' + '_fire_trigger_' + str(trigger_id))
except TimeoutError as e:
logger.warning(e)
|
get the trigger to fire
|
def from_hising(cls, h, J, offset=None):
poly = {(k,): v for k, v in h.items()}
poly.update(J)
if offset is not None:
poly[frozenset([])] = offset
return cls(poly, Vartype.SPIN)
|
Construct a binary polynomial from a higher-order Ising problem.
Args:
h (dict):
The linear biases.
J (dict):
The higher-order biases.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:obj:`.BinaryPolynomial`
Examples:
>>> poly = dimod.BinaryPolynomial.from_hising({'a': 2}, {'ab': -1}, 0)
|
def authenticate(self, username, password):
r = self._query_('/users/authenticate', 'POST',
params={'username': username,
'password': password})
if r.status_code == 201:
return r.text.strip('"')
else:
raise ValueError('Authentication invalid.')
|
Authenticates your user and returns an auth token.
:param str username: Hummingbird username.
:param str password: Hummingbird password.
:returns: str -- The Auth Token
:raises: ValueError -- If the Authentication is wrong
|
def age(self):
aff4_type = self.Get(self.Schema.TYPE)
if aff4_type:
return aff4_type.age
else:
return rdfvalue.RDFDatetime.Now()
|
RDFDatetime at which the object was created.
|
def getLogs(self,
argument_filters=None,
fromBlock=None,
toBlock=None,
blockHash=None):
if not self.address:
raise TypeError("This method can be only called on "
"an instated contract with an address")
abi = self._get_event_abi()
if argument_filters is None:
argument_filters = dict()
_filters = dict(**argument_filters)
blkhash_set = blockHash is not None
blknum_set = fromBlock is not None or toBlock is not None
if blkhash_set and blknum_set:
raise ValidationError(
'blockHash cannot be set at the same'
' time as fromBlock or toBlock')
data_filter_set, event_filter_params = construct_event_filter_params(
abi,
contract_address=self.address,
argument_filters=_filters,
fromBlock=fromBlock,
toBlock=toBlock,
address=self.address,
)
if blockHash is not None:
event_filter_params['blockHash'] = blockHash
logs = self.web3.eth.getLogs(event_filter_params)
return tuple(get_event_data(abi, entry) for entry in logs)
|
Get events for this contract instance using eth_getLogs API.
This is a stateless method, as opposed to createFilter.
It can be safely called against nodes which do not provide
eth_newFilter API, like Infura nodes.
If there are many events,
like ``Transfer`` events for a popular token,
the Ethereum node might be overloaded and timeout
on the underlying JSON-RPC call.
Example - how to get all ERC-20 token transactions
for the latest 10 blocks:
.. code-block:: python
from = max(mycontract.web3.eth.blockNumber - 10, 1)
to = mycontract.web3.eth.blockNumber
events = mycontract.events.Transfer.getLogs(fromBlock=from, toBlock=to)
for e in events:
print(e["args"]["from"],
e["args"]["to"],
e["args"]["value"])
The returned processed log values will look like:
.. code-block:: python
(
AttributeDict({
'args': AttributeDict({}),
'event': 'LogNoArguments',
'logIndex': 0,
'transactionIndex': 0,
'transactionHash': HexBytes('...'),
'address': '0xF2E246BB76DF876Cef8b38ae84130F4F55De395b',
'blockHash': HexBytes('...'),
'blockNumber': 3
}),
AttributeDict(...),
...
)
See also: :func:`web3.middleware.filter.local_filter_middleware`.
:param argument_filters:
:param fromBlock: block number or "latest", defaults to "latest"
:param toBlock: block number or "latest". Defaults to "latest"
:param blockHash: block hash. blockHash cannot be set at the
same time as fromBlock or toBlock
:yield: Tuple of :class:`AttributeDict` instances
|
def to(self, unit):
from astropy.units import au, d
return (self.au_per_d * au / d).to(unit)
|
Convert this velocity to the given AstroPy unit.
|
def shape(self) -> Tuple[int, ...]:
nmb_place = len(self.sequences)
nmb_time = len(hydpy.pub.timegrids.init)
nmb_others = collections.deque()
for sequence in self.sequences.values():
nmb_others.append(sequence.shape)
nmb_others_max = tuple(numpy.max(nmb_others, axis=0))
return self.sort_timeplaceentries(nmb_time, nmb_place) + nmb_others_max
|
Required shape of |NetCDFVariableDeep.array|.
For the default configuration, the first axis corresponds to the
number of devices, and the second one to the number of timesteps.
We show this for the 0-dimensional input sequence |lland_inputs.Nied|:
>>> from hydpy.core.examples import prepare_io_example_1
>>> nodes, elements = prepare_io_example_1()
>>> from hydpy.core.netcdftools import NetCDFVariableDeep
>>> ncvar = NetCDFVariableDeep('input_nied', isolate=False, timeaxis=1)
>>> for element in elements:
... ncvar.log(element.model.sequences.inputs.nied, None)
>>> ncvar.shape
(3, 4)
For higher dimensional sequences, each new entry corresponds
to the maximum number of fields the respective sequences require.
In the next example, we select the 1-dimensional sequence
|lland_fluxes.NKor|. The maximum number 3 (last value of the
returned |tuple|) is due to the third element defining three
hydrological response units:
>>> ncvar = NetCDFVariableDeep('flux_nkor', isolate=False, timeaxis=1)
>>> for element in elements:
... ncvar.log(element.model.sequences.fluxes.nkor, None)
>>> ncvar.shape
(3, 4, 3)
When using the first axis for time (`timeaxis=0`) the order of the
first two |tuple| entries turns:
>>> ncvar = NetCDFVariableDeep('flux_nkor', isolate=False, timeaxis=0)
>>> for element in elements:
... ncvar.log(element.model.sequences.fluxes.nkor, None)
>>> ncvar.shape
(4, 3, 3)
|
def any_ends_with(self, string_list, pattern):
try:
s_base = basestring
except:
s_base = str
is_string = isinstance(pattern, s_base)
if not is_string:
return False
for s in string_list:
if pattern.endswith(s):
return True
return False
|
Returns true iff one of the strings in string_list ends in
pattern.
|
def tokenize_string(cls, string, separator):
results = []
token = ""
found_escape = False
for c in string:
if found_escape:
if c == separator:
token += separator
else:
token += "\\" + c
found_escape = False
continue
if c == "\\":
found_escape = True
elif c == separator:
results.append(token)
token = ""
else:
token += c
results.append(token)
return results
|
Split string with given separator unless the separator is escaped with backslash
|
def dispatch(self, method_frame):
method = self.dispatch_map.get(method_frame.method_id)
if method:
callback = self.channel.clear_synchronous_cb(method)
callback(method_frame)
else:
raise self.InvalidMethod(
"no method is registered with id: %d" % method_frame.method_id)
|
Dispatch a method for this protocol.
|
def jsonld(client, datasets):
from renku.models._json import dumps
from renku.models._jsonld import asjsonld
data = [
asjsonld(
dataset,
basedir=os.path.relpath(
'.', start=str(dataset.__reference__.parent)
)
) for dataset in datasets
]
click.echo(dumps(data, indent=2))
|
Format datasets as JSON-LD.
|
def updateDatasetMenu(self):
enabled = True
current = self.vtgui.dbs_tree_view.currentIndex()
if current:
leaf = self.vtgui.dbs_tree_model.nodeFromIndex(current)
if leaf.node_kind in (u'group', u'root group'):
enabled = False
self.plot_action.setEnabled(enabled)
|
Update the `export` QAction when the Dataset menu is pulled down.
This method is a slot. See class ctor for details.
|
def get_typecast_value(self, value, type):
if type == entities.Variable.Type.BOOLEAN:
return value == 'true'
elif type == entities.Variable.Type.INTEGER:
return int(value)
elif type == entities.Variable.Type.DOUBLE:
return float(value)
else:
return value
|
Helper method to determine actual value based on type of feature variable.
Args:
value: Value in string form as it was parsed from datafile.
type: Type denoting the feature flag type.
Return:
Value type-casted based on type of feature variable.
|
def publish_topology_closed(self, topology_id):
event = TopologyClosedEvent(topology_id)
for subscriber in self.__topology_listeners:
try:
subscriber.closed(event)
except Exception:
_handle_exception()
|
Publish a TopologyClosedEvent to all topology listeners.
:Parameters:
- `topology_id`: A unique identifier for the topology this server
is a part of.
|
def _validate_columns(self):
geom_cols = {'the_geom', 'the_geom_webmercator', }
col_overlap = set(self.style_cols) & geom_cols
if col_overlap:
raise ValueError('Style columns cannot be geometry '
'columns. `{col}` was chosen.'.format(
col=','.join(col_overlap)))
|
Validate the options in the styles
|
def go(gconfig, args):
rconfig = gconfig['rejester']
which_worker = rconfig.get('worker', 'fork_worker')
if which_worker == 'fork_worker':
yakonfig.set_default_config([rejester], config=gconfig)
else:
start_logging(gconfig, args.logpath)
return start_worker(which_worker, rconfig)
|
Actually run the worker.
This does some required housekeeping, like setting up logging for
:class:`~rejester.workers.MultiWorker` and establishing the global
:mod:`yakonfig` configuration. This expects to be called with the
:mod:`yakonfig` configuration unset.
:param dict gconfig: the :mod:`yakonfig` global configuration
:param args: command-line arguments
|
def restore(self, hist_uid):
if self.check_post_role()['ADMIN']:
pass
else:
return False
histinfo = MWikiHist.get_by_uid(hist_uid)
if histinfo:
pass
else:
return False
postinfo = MWiki.get_by_uid(histinfo.wiki_id)
cur_cnt = tornado.escape.xhtml_unescape(postinfo.cnt_md)
old_cnt = tornado.escape.xhtml_unescape(histinfo.cnt_md)
MWiki.update_cnt(
histinfo.wiki_id,
{'cnt_md': old_cnt, 'user_name': self.userinfo.user_name}
)
MWikiHist.update_cnt(
histinfo.uid,
{'cnt_md': cur_cnt, 'user_name': postinfo.user_name}
)
if postinfo.kind == '1':
self.redirect('/wiki/{0}'.format(postinfo.title))
elif postinfo.kind == '2':
self.redirect('/page/{0}.html'.format(postinfo.uid))
|
Restore by ID
|
def can_execute(self):
return not self._disabled and all(dep.status == dep.node.S_OK for dep in self.deps)
|
True if we can execute the callback.
|
def get_value(self):
if self.__is_value_array:
if self.__bit_size == 8:
return list(self.__value)
else:
result = []
for i in range(self.__report_count):
result.append(self.__getitem__(i))
return result
else:
return self.__value
|
Retreive usage value within report
|
def delete_answer(self, answer_id):
from dlkit.abstract_osid.id.primitives import Id as ABCId
from .objects import Answer
collection = JSONClientValidated('assessment',
collection='Item',
runtime=self._runtime)
if not isinstance(answer_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
item = collection.find_one({'answers._id': ObjectId(answer_id.get_identifier())})
index = 0
found = False
for i in item['answers']:
if i['_id'] == ObjectId(answer_id.get_identifier()):
answer_map = item['answers'].pop(index)
index += 1
found = True
if not found:
raise errors.OperationFailed()
Answer(
osid_object_map=answer_map,
runtime=self._runtime,
proxy=self._proxy)._delete()
collection.save(item)
|
Deletes the ``Answer`` identified by the given ``Id``.
arg: answer_id (osid.id.Id): the ``Id`` of the ``Answer`` to
delete
raise: NotFound - an ``Answer`` was not found identified by the
given ``Id``
raise: NullArgument - ``answer_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
|
def parse_json(target, json, create_sections = False, create_options = False):
is_dict = isinstance(json, dict)
for o in json:
if is_dict:
section = o
else:
section = o[0]
if not target.has_section(section):
if create_sections:
target.add_section(section)
else:
continue
for k, v in (json[o].items() if is_dict else o[1]):
if target.has_option(section, k) or create_options:
target.set(section, k, v)
return target
|
Given a confmanager object and a dictionary object, import the values from the dictionary into the object, optionally adding sections and options as it goes.
|
def show_error(self, message):
assert isinstance(message, string_types)
self.post('error', data=message)
|
Send an error message to the active client. The new error will be
displayed on any active GUI clients.
Args:
message (str): Plain-text message to display.
Returns:
None
>>> s = _syncthing()
>>> s.system.show_error('my error msg')
>>> s.system.errors()[0]
... # doctest: +ELLIPSIS
ErrorEvent(when=datetime.datetime(...), message='"my error msg"')
>>> s.system.clear_errors()
>>> s.system.errors()
[]
|
def generate_lifetime_subparser(subparsers):
parser = subparsers.add_parser(
'lifetime', description=constants.LIFETIME_DESCRIPTION,
epilog=constants.LIFETIME_EPILOG, formatter_class=ParagraphFormatter,
help=constants.LIFETIME_HELP)
parser.set_defaults(func=lifetime_report)
utils.add_tokenizer_argument(parser)
utils.add_common_arguments(parser)
utils.add_query_arguments(parser)
parser.add_argument('results', help=constants.LIFETIME_RESULTS_HELP,
metavar='RESULTS')
parser.add_argument('label', help=constants.LIFETIME_LABEL_HELP,
metavar='LABEL')
parser.add_argument('output', help=constants.REPORT_OUTPUT_HELP,
metavar='OUTPUT')
|
Adds a sub-command parser to `subparsers` to make a lifetime report.
|
def results(project, apikey, run, watch, server, output):
status = run_get_status(server, project, run, apikey)
log(format_run_status(status))
if watch:
for status in watch_run_status(server, project, run, apikey, 24*60*60):
log(format_run_status(status))
if status['state'] == 'completed':
log("Downloading result")
response = run_get_result_text(server, project, run, apikey)
log("Received result")
print(response, file=output)
elif status['state'] == 'error':
log("There was an error")
error_result = run_get_result_text(server, project, run, apikey)
print(error_result, file=output)
else:
log("No result yet")
|
Check to see if results are available for a particular mapping
and if so download.
Authentication is carried out using the --apikey option which
must be provided. Depending on the server operating mode this
may return a mask, a linkage table, or a permutation. Consult
the entity service documentation for details.
|
def check_cups_allowed(func):
@wraps(func)
def decorator(*args, **kwargs):
cups = kwargs.get('cups')
if (cups and current_user.is_authenticated()
and not current_user.allowed(cups, 'cups')):
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorator
|
Check if CUPS is allowd by token
|
def start(self):
yield from self._do_connect()
_LOGGER.info('connected to snapserver on %s:%s', self._host, self._port)
status = yield from self.status()
self.synchronize(status)
self._on_server_connect()
|
Initiate server connection.
|
def add_key_filters(self, key_filters):
if self._input_mode == 'query':
raise ValueError('Key filters are not supported in a query.')
self._key_filters.extend(key_filters)
return self
|
Adds key filters to the inputs.
:param key_filters: a list of filters
:type key_filters: list
:rtype: :class:`RiakMapReduce`
|
def _emit(self, s):
if os.path.exists(self._html_dir):
self._report_file.write(s)
self._report_file.flush()
|
Append content to the main report file.
|
def get_allow_future(self):
qs = self.get_queryset()
post_edit_permission = '{}.edit_{}'.format(
qs.model._meta.app_label, qs.model._meta.model_name
)
if self.request.user.has_perm(post_edit_permission):
return True
return False
|
Only superusers and users with the permission can edit the post.
|
def set_managing_editor(self):
try:
self.managing_editor = self.soup.find('managingeditor').string
except AttributeError:
self.managing_editor = None
|
Parses managing editor and set value
|
def date_this_decade(self, before_today=True, after_today=False):
today = date.today()
this_decade_start = date(today.year - (today.year % 10), 1, 1)
next_decade_start = date(this_decade_start.year + 10, 1, 1)
if before_today and after_today:
return self.date_between_dates(this_decade_start, next_decade_start)
elif not before_today and after_today:
return self.date_between_dates(today, next_decade_start)
elif not after_today and before_today:
return self.date_between_dates(this_decade_start, today)
else:
return today
|
Gets a Date object for the decade year.
:param before_today: include days in current decade before today
:param after_today: include days in current decade after today
:example Date('2012-04-04')
:return Date
|
def binary_op(data, op, other, blen=None, storage=None, create='array',
**kwargs):
if hasattr(other, 'shape') and len(other.shape) == 0:
other = other[()]
if np.isscalar(other):
def f(block):
return op(block, other)
return map_blocks(data, f, blen=blen, storage=storage, create=create, **kwargs)
elif len(data) == len(other):
def f(a, b):
return op(a, b)
return map_blocks((data, other), f, blen=blen, storage=storage, create=create,
**kwargs)
else:
raise NotImplementedError('argument type not supported')
|
Compute a binary operation block-wise over `data`.
|
def iter_identities(self, stanza=None):
for (category, type_), names in self._identities.items():
for lang, name in names.items():
yield category, type_, lang, name
if not names:
yield category, type_, None, None
|
Return an iterator of tuples describing the identities of the node.
:param stanza: The IQ request stanza
:type stanza: :class:`~aioxmpp.IQ` or :data:`None`
:rtype: iterable of (:class:`str`, :class:`str`, :class:`str` or
:data:`None`, :class:`str` or :data:`None`) tuples
:return: :xep:`30` identities of this node
`stanza` can be the :class:`aioxmpp.IQ` stanza of the request. This can
be used to hide a node depending on who is asking. If the returned
iterable is empty, the :class:`~.DiscoServer` returns an
``<item-not-found/>`` error.
`stanza` may be :data:`None` if the identities are queried without
a specific request context. In that case, implementors should assume
that the result is visible to everybody.
.. note::
Subclasses must allow :data:`None` for `stanza` and default it to
:data:`None`.
Return an iterator which yields tuples consisting of the category, the
type, the language code and the name of each identity declared in this
:class:`Node`.
Both the language code and the name may be :data:`None`, if no names or
a name without language code have been declared.
|
def delete_tenant(self, tenant):
tenant_id = utils.get_id(tenant)
uri = "tenants/%s" % tenant_id
resp, resp_body = self.method_delete(uri)
if resp.status_code == 404:
raise exc.TenantNotFound("Tenant '%s' does not exist." % tenant)
|
ADMIN ONLY. Removes the tenant from the system. There is no 'undo'
available, so you should be certain that the tenant specified is the
tenant you wish to delete.
|
def _find_neighbor_and_lambda(neighbor_indices, neighbor_distances,
core_distances, min_samples):
neighbor_core_distances = core_distances[neighbor_indices]
point_core_distances = neighbor_distances[min_samples] * np.ones(
neighbor_indices.shape[0])
mr_distances = np.vstack((
neighbor_core_distances,
point_core_distances,
neighbor_distances
)).max(axis=0)
nn_index = mr_distances.argmin()
nearest_neighbor = neighbor_indices[nn_index]
if mr_distances[nn_index] > 0.0:
lambda_ = 1. / mr_distances[nn_index]
else:
lambda_ = np.finfo(np.double).max
return nearest_neighbor, lambda_
|
Find the nearest mutual reachability neighbor of a point, and compute
the associated lambda value for the point, given the mutual reachability
distance to a nearest neighbor.
Parameters
----------
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
neighbor : int
The index into the full raw data set of the nearest mutual reachability
distance neighbor of the point.
lambda_ : float
The lambda value at which this point joins/merges with `neighbor`.
|
def getNameOwner(self, busName):
d = self.callRemote(
'/org/freedesktop/DBus',
'GetNameOwner',
interface='org.freedesktop.DBus',
signature='s',
body=[busName],
destination='org.freedesktop.DBus',
)
return d
|
Calls org.freedesktop.DBus.GetNameOwner
@rtype: L{twisted.internet.defer.Deferred}
@returns: a Deferred to the unique connection name owning the bus name
|
def get_named_graph(identifier, store_id=DEFAULT_STORE, create=True):
if not isinstance(identifier, URIRef):
identifier = URIRef(identifier)
store = DjangoStore(store_id)
graph = Graph(store, identifier=identifier)
if graph.open(None, create=create) != VALID_STORE:
raise ValueError("The store identified by {0} is not a valid store".format(store_id))
return graph
|
Returns an open named graph.
|
def DbGetExportdDeviceListForClass(self, argin):
self._log.debug("In DbGetExportdDeviceListForClass()")
argin = replace_wildcard(argin)
return self.db.get_exported_device_list_for_class(argin)
|
Query the database for device exported for the specified class.
:param argin: Class name
:type: tango.DevString
:return: Device exported list
:rtype: tango.DevVarStringArray
|
def _compile_to_sklearn(self, expr):
sklearn_pipeline_str = generate_pipeline_code(expr_to_tree(expr, self._pset), self.operators)
sklearn_pipeline = eval(sklearn_pipeline_str, self.operators_context)
sklearn_pipeline.memory = self._memory
return sklearn_pipeline
|
Compile a DEAP pipeline into a sklearn pipeline.
Parameters
----------
expr: DEAP individual
The DEAP pipeline to be compiled
Returns
-------
sklearn_pipeline: sklearn.pipeline.Pipeline
|
def update_policy(self,defaultHeaders):
if self.inputs is not None:
for k,v in defaultHeaders.items():
if k not in self.inputs:
self.inputs[k] = v
if k == 'pins':
self.inputs[k] = self.inputs[k] + defaultHeaders[k]
return self.inputs
else:
return self.inputs
|
rewrite update policy so that additional pins are added and not overwritten
|
def organization_deidentify_template_path(cls, organization, deidentify_template):
return google.api_core.path_template.expand(
"organizations/{organization}/deidentifyTemplates/{deidentify_template}",
organization=organization,
deidentify_template=deidentify_template,
)
|
Return a fully-qualified organization_deidentify_template string.
|
def show_calltip(self, signature, doc='', parameter='', parameter_doc='',
color=_DEFAULT_TITLE_COLOR, is_python=False):
point = self._calculate_position()
tiptext, wrapped_lines = self._format_signature(
signature,
doc,
parameter,
parameter_doc,
color,
is_python,
)
self._update_stylesheet(self.calltip_widget)
self.calltip_widget.show_tip(point, tiptext, wrapped_lines)
|
Show calltip.
Calltips look like tooltips but will not disappear if mouse hovers
them. They are useful for displaying signature information on methods
and functions.
|
def copy_func(func: Callable) -> Callable:
copied = types.FunctionType(
func.__code__, func.__globals__, name=func.__name__,
argdefs=func.__defaults__, closure=func.__closure__)
copied = functools.update_wrapper(copied, func)
copied.__kwdefaults__ = func.__kwdefaults__
return copied
|
Returns a copy of a function.
:param func: The function to copy.
:returns: The copied function.
|
def hyphenation(phrase, format='json'):
base_url = Vocabulary.__get_api_link("wordnik")
url = base_url.format(word=phrase.lower(), action="hyphenation")
json_obj = Vocabulary.__return_json(url)
if json_obj:
return Response().respond(json_obj, format)
else:
return False
|
Returns back the stress points in the "phrase" passed
:param phrase: word for which hyphenation is to be found
:param format: response structure type. Defaults to: "json"
:returns: returns a json object as str, False if invalid phrase
|
def wait(msg='', exceptions=None, timeout=10):
exc = [StaleElementReferenceException]
if exceptions is not None:
try:
exc.extend(iter(exceptions))
except TypeError:
exc.append(exceptions)
exc = tuple(exc)
if not msg:
msg = "Could not recover from Exception(s): {}".format(', '.join([e.__name__ for e in exc]))
def wrapper(func):
def wait_handler(*args, **kwargs):
import time
poll_freq = 0.5
end_time = time.time() + timeout
while time.time() <= end_time:
try:
value = func(*args, **kwargs)
if value:
return value
except exc as e:
LOGGER.debug(e)
pass
time.sleep(poll_freq)
poll_freq *= 1.25
raise RuntimeError(msg)
return wait_handler
return wrapper
|
Decorator to handle generic waiting situations.
Will handle StaleElementReferenceErrors.
:param msg: Error message
:param exceptions: Extra exceptions to handle
:param timeout: time to keep trying (default: 10 seconds)
:return: the result of the decorated function
|
def rotation(self):
rotation = self._libinput.libinput_event_tablet_tool_get_rotation(
self._handle)
changed = self._libinput. \
libinput_event_tablet_tool_rotation_has_changed(self._handle)
return rotation, changed
|
The current Z rotation of the tool in degrees, clockwise
from the tool's logical neutral position and whether it has changed
in this event.
For tools of type :attr:`~libinput.constant.TabletToolType.MOUSE`
and :attr:`~libinput.constant.TabletToolType.LENS` the logical
neutral position is pointing to the current logical north
of the tablet. For tools of type
:attr:`~libinput.constant.TabletToolType.BRUSH`, the logical
neutral position is with the buttons pointing up.
If this axis does not exist on the current tool, this property is
(0, :obj:`False`).
Returns:
(float, bool): The current value of the the axis and whether it has
changed.
|
def anscombe():
_, ((axa, axb), (axc, axd)) = plt.subplots(2, 2, sharex='col', sharey='row')
colors = get_color_cycle()
for arr, ax, color in zip(ANSCOMBE, (axa, axb, axc, axd), colors):
x = arr[0]
y = arr[1]
ax.set_xlim(0, 15)
ax.set_ylim(0, 15)
ax.scatter(x, y, c=color)
draw_best_fit(x, y, ax, c=color)
return (axa, axb, axc, axd)
|
Creates 2x2 grid plot of the 4 anscombe datasets for illustration.
|
def create_ui(self):
self.entry = gtk.Entry()
self.widget.add(self.entry)
|
Create the user interface
create_ui is a method called during the Delegate's
initialisation process, to create, add to, or modify any UI
created by GtkBuilder files.
|
def integrate(self, wave=None):
if wave is None:
wave = self.wave
ans = self.trapezoidIntegration(wave, self(wave))
return ans
|
Integrate the throughput over the specified wavelength set.
If no wavelength set is specified, the built-in one is used.
Integration is done using :meth:`~Integrator.trapezoidIntegration`
with ``x=wave`` and ``y=throughput``.
Also see :ref:`pysynphot-formula-equvw`.
Parameters
----------
wave : array_like or `None`
Wavelength set for integration.
Returns
-------
ans : float
Integrated sum.
|
def get_all_parcels(self, view = None):
return parcels.get_all_parcels(self._get_resource_root(), self.name, view)
|
Get all parcels in this cluster.
@return: A list of ApiParcel objects.
|
def get_attr_info(binary_view):
global _ATTR_BASIC
attr_type, attr_len, non_resident = _ATTR_BASIC.unpack(binary_view[:9])
return (AttrTypes(attr_type), attr_len, bool(non_resident))
|
Gets basic information from a binary stream to allow correct processing of
the attribute header.
This function allows the interpretation of the Attribute type, attribute length
and if the attribute is non resident.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
An tuple with the attribute type, the attribute length, in bytes, and
if the attribute is resident or not.
|
def _render_internal_label(self):
ncc = self._num_complete_chars
bar = self._lbl.center(self.iwidth)
cm_chars = self._comp_style(bar[:ncc])
em_chars = self._empt_style(bar[ncc:])
return f'{self._first}{cm_chars}{em_chars}{self._last}'
|
Render with a label inside the bar graph.
|
def get_action_group_names(self):
return self.get_group_names(
list(itertools.chain(
*[self._get_array('add'),
self._get_array('remove'),
self._get_array('isolation-group')])))
|
Return all the security group names configured in this action.
|
def run():
logging.basicConfig(level=logging.DEBUG)
load_config.ConfigLoader().load()
config.debug = True
print(repr(config.engine.item(sys.argv[1])))
|
Module level test.
|
def A_hollow_cylinder(Di, Do, L):
r
side_o = pi*Do*L
side_i = pi*Di*L
cap_circle = pi*Do**2/4*2
cap_removed = pi*Di**2/4*2
return side_o + side_i + cap_circle - cap_removed
|
r'''Returns the surface area of a hollow cylinder.
.. math::
A = \pi D_o L + \pi D_i L + 2\cdot \frac{\pi D_o^2}{4}
- 2\cdot \frac{\pi D_i^2}{4}
Parameters
----------
Di : float
Diameter of the hollow in the cylinder, [m]
Do : float
Diameter of the exterior of the cylinder, [m]
L : float
Length of the cylinder, [m]
Returns
-------
A : float
Surface area [m^2]
Examples
--------
>>> A_hollow_cylinder(0.005, 0.01, 0.1)
0.004830198704894308
|
def update_milestone(self, milestone_id, title, deadline, party_id, notify,
move_upcoming_milestones=None,
move_upcoming_milestones_off_weekends=None):
path = '/milestones/update/%u' % milestone_id
req = ET.Element('request')
req.append(
self._create_milestone_elem(title, deadline, party_id, notify))
if move_upcoming_milestones is not None:
ET.SubElement(req, 'move-upcoming-milestones').text \
= str(bool()).lower()
if move_upcoming_milestones_off_weekends is not None:
ET.SubElement(req, 'move-upcoming-milestones-off-weekends').text \
= str(bool()).lower()
return self._request(path, req)
|
Modifies a single milestone. You can use this to shift the deadline of
a single milestone, and optionally shift the deadlines of subsequent
milestones as well.
|
def _read_mode_qsopt(self, size, kind):
rvrr = self._read_binary(1)
ttld = self._read_unpack(1)
noun = self._read_fileng(4)
data = dict(
kind=kind,
length=size,
req_rate=int(rvrr[4:], base=2),
ttl_diff=ttld,
nounce=noun[:-2],
)
return data
|
Read Quick-Start Response option.
Positional arguments:
* size - int, length of option
* kind - int, 27 (Quick-Start Response)
Returns:
* dict -- extracted Quick-Start Response (QS) option
Structure of TCP QSopt [RFC 4782]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Kind | Length=8 | Resv. | Rate | TTL Diff |
| | | |Request| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| QS Nonce | R |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 tcp.qs.kind Kind (27)
1 8 tcp.qs.length Length (8)
2 16 - Reserved (must be zero)
2 20 tcp.qs.req_rate Request Rate
3 24 tcp.qs.ttl_diff TTL Difference
4 32 tcp.qs.nounce QS Nounce
7 62 - Reserved (must be zero)
|
def get_item_key(self, item):
try:
ret = item[item['key']]
except KeyError:
logger.error("No 'key' available in {}".format(item))
if isinstance(ret, list):
return ret[0]
else:
return ret
|
Return the value of the item 'key'.
|
def get_fw_dict(self):
fw_dict = {}
if self.fw_id is None:
return fw_dict
fw_dict = {'rules': {}, 'tenant_name': self.tenant_name,
'tenant_id': self.tenant_id, 'fw_id': self.fw_id,
'fw_name': self.fw_name,
'firewall_policy_id': self.active_pol_id,
'fw_type': self.fw_type, 'router_id': self.router_id}
if self.active_pol_id not in self.policies:
return fw_dict
pol_dict = self.policies[self.active_pol_id]
for rule in pol_dict['rule_dict']:
fw_dict['rules'][rule] = self.rules[rule]
return fw_dict
|
This API creates a FW dictionary from the local attributes.
|
def fill_nulls(self, col: str):
n = [None, ""]
try:
self.df[col] = self.df[col].replace(n, nan)
except Exception as e:
self.err(e)
|
Fill all null values with NaN values in a column.
Null values are ``None`` or en empty string
:param col: column name
:type col: str
:example: ``ds.fill_nulls("mycol")``
|
def search_function(encoding):
if encoding in _CACHE:
return _CACHE[encoding]
norm_encoding = normalize_encoding(encoding)
codec = None
if norm_encoding in UTF8_VAR_NAMES:
from ftfy.bad_codecs.utf8_variants import CODEC_INFO
codec = CODEC_INFO
elif norm_encoding.startswith('sloppy_'):
from ftfy.bad_codecs.sloppy import CODECS
codec = CODECS.get(norm_encoding)
if codec is not None:
_CACHE[encoding] = codec
return codec
|
Register our "bad codecs" with Python's codecs API. This involves adding
a search function that takes in an encoding name, and returns a codec
for that encoding if it knows one, or None if it doesn't.
The encodings this will match are:
- Encodings of the form 'sloppy-windows-NNNN' or 'sloppy-iso-8859-N',
where the non-sloppy version is an encoding that leaves some bytes
unmapped to characters.
- The 'utf-8-variants' encoding, which has the several aliases seen
above.
|
def lock(key, text):
return hmac.new(key.encode('utf-8'), text.encode('utf-8')).hexdigest()
|
Locks the given text using the given key and returns the result
|
def _calc_d(aod700, p):
p0 = 101325.
dp = 1/(18 + 152*aod700)
d = -0.337*aod700**2 + 0.63*aod700 + 0.116 + dp*np.log(p/p0)
return d
|
Calculate the d coefficient.
|
def init0(self, dae):
dae.y[self.v] = self.v0
dae.y[self.q] = mul(self.u, self.qg)
|
Set initial voltage and reactive power for PQ.
Overwrites Bus.voltage values
|
def safe_unicode(string):
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string
|
Safely transform any object into utf8 encoded bytes
|
def separate_struct_array(array, dtypes):
try:
offsets = np.cumsum([np.dtype(dtype).itemsize for dtype in dtypes])
except TypeError:
dtype_size = np.dtype(dtypes).itemsize
num_fields = int(array.nbytes / (array.size * dtype_size))
offsets = np.cumsum([dtype_size] * num_fields)
dtypes = [dtypes] * num_fields
offsets = np.concatenate([[0], offsets]).astype(int)
uint_array = array.view(np.uint8).reshape(array.shape + (-1,))
return [
uint_array[..., offsets[idx]:offsets[idx+1]].flatten().view(dtype)
for idx, dtype in enumerate(dtypes)
]
|
Takes an array with a structured dtype, and separates it out into
a list of arrays with dtypes coming from the input ``dtypes``.
Does the inverse of ``join_struct_arrays``.
:param np.ndarray array: Structured array.
:param dtypes: List of ``np.dtype``, or just a ``np.dtype`` and the number of
them is figured out automatically by counting bytes.
|
def write_blockdata(self, obj, parent=None):
if type(obj) is str:
obj = to_bytes(obj, "latin-1")
length = len(obj)
if length <= 256:
self._writeStruct(">B", 1, (self.TC_BLOCKDATA,))
self._writeStruct(">B", 1, (length,))
else:
self._writeStruct(">B", 1, (self.TC_BLOCKDATALONG,))
self._writeStruct(">I", 1, (length,))
self.object_stream.write(obj)
|
Appends a block of data to the serialization stream
:param obj: String form of the data block
|
def preproc_directive(self) -> bool:
self._stream.save_context()
if self.read_until("\n", '\\'):
return self._stream.validate_context()
return self._stream.restore_context()
|
Consume a preproc directive.
|
def hostname(name, hostname=None):
ret = _default_ret(name)
current_name = __salt__['cimc.get_hostname']()
req_change = False
try:
if current_name != hostname:
req_change = True
if req_change:
update = __salt__['cimc.set_hostname'](hostname)
if not update:
ret['result'] = False
ret['comment'] = "Error setting hostname."
return ret
ret['changes']['before'] = current_name
ret['changes']['after'] = hostname
ret['comment'] = "Hostname modified."
else:
ret['comment'] = "Hostname already configured. No changes required."
except Exception as err:
ret['result'] = False
ret['comment'] = "Error occurred setting hostname."
log.error(err)
return ret
ret['result'] = True
return ret
|
Ensures that the hostname is set to the specified value.
.. versionadded:: 2019.2.0
name: The name of the module function to execute.
hostname(str): The hostname of the server.
SLS Example:
.. code-block:: yaml
set_name:
cimc.hostname:
- hostname: foobar
|
def component_acting_parent_tag(parent_tag, tag):
if parent_tag.name == "fig-group":
if (len(tag.find_previous_siblings("fig")) > 0):
acting_parent_tag = first(extract_nodes(parent_tag, "fig"))
else:
return None
else:
acting_parent_tag = parent_tag
return acting_parent_tag
|
Only intended for use in getting components, look for tag name of fig-group
and if so, find the first fig tag inside it as the acting parent tag
|
def restore_site_properties(self, site_property="ff_map", filename=None):
if not self.control_params["filetype"] == "pdb":
raise ValueError()
filename = filename or self.control_params["output"]
bma = BabelMolAdaptor.from_file(filename, "pdb")
pbm = pb.Molecule(bma._obmol)
assert len(pbm.residues) == sum([x["number"]
for x in self.param_list])
packed_mol = self.convert_obatoms_to_molecule(
pbm.residues[0].atoms, residue_name=pbm.residues[0].name,
site_property=site_property)
for resid in pbm.residues[1:]:
mol = self.convert_obatoms_to_molecule(
resid.atoms, residue_name=resid.name,
site_property=site_property)
for site in mol:
packed_mol.append(site.species, site.coords,
properties=site.properties)
return packed_mol
|
Restore the site properties for the final packed molecule.
Args:
site_property (str):
filename (str): path to the final packed molecule.
Returns:
Molecule
|
def predict_files(self, files):
imgs = [0]*len(files)
for i, file in enumerate(files):
img = cv2.imread(file).astype('float64')
img = cv2.resize(img, (224,224))
img = preprocess_input(img)
if img is None:
print('failed to open: {}, continuing...'.format(file))
imgs[i] = img
return self.model.predict(np.array(imgs))
|
reads files off disk, resizes them
and then predicts them, files should
be a list or itrerable of file paths
that lead to images, they are then
loaded with opencv, resized, and predicted
|
def delete(self, invoice_id, **kwargs):
url = "{}/{}".format(self.base_url, invoice_id)
return self.delete_url(url, {}, **kwargs)
|
Delete an invoice
You can delete an invoice which is in the draft state.
Args:
invoice_id : Id for delete the invoice
Returns:
The response is always be an empty array like this - []
|
def add_resource(self, resource_id, attributes, parents=[],
issuer='default'):
assert isinstance(attributes, (dict)), "attributes expected to be dict"
attrs = []
for key in attributes.keys():
attrs.append({
'issuer': issuer,
'name': key,
'value': attributes[key]
})
body = {
"resourceIdentifier": resource_id,
"parents": parents,
"attributes": attrs,
}
return self._put_resource(resource_id, body)
|
Will add the given resource with a given identifier and attribute
dictionary.
example/
add_resource('/asset/12', {'id': 12, 'manufacturer': 'GE'})
|
def register(scraper):
global scrapers
language = scraper('').language
if not language:
raise Exception('No language specified for your scraper.')
if scrapers.has_key(language):
scrapers[language].append(scraper)
else:
scrapers[language] = [scraper]
|
Registers a scraper to make it available for the generic scraping interface.
|
def _red_listing_validation(key, listing):
if listing:
try:
jsonschema.validate(listing, cwl_job_listing_schema)
except ValidationError as e:
raise RedValidationError('REDFILE listing of input "{}" does not comply with jsonschema: {}'
.format(key, e.context))
|
Raises an RedValidationError, if the given listing does not comply with cwl_job_listing_schema.
If listing is None or an empty list, no exception is thrown.
:param key: The input key to build an error message if needed.
:param listing: The listing to validate
:raise RedValidationError: If the given listing does not comply with cwl_job_listing_schema
|
def get_ilwdchar_class(tbl_name, col_name, namespace = globals()):
key = (str(tbl_name), str(col_name))
cls_name = "%s_%s_class" % key
assert cls_name != "get_ilwdchar_class"
try:
return namespace[cls_name]
except KeyError:
pass
class new_class(_ilwd.ilwdchar):
__slots__ = ()
table_name, column_name = key
index_offset = len("%s:%s:" % key)
new_class.__name__ = cls_name
namespace[cls_name] = new_class
copy_reg.pickle(new_class, lambda x: (ilwdchar, (unicode(x),)))
return new_class
|
Searches this module's namespace for a subclass of _ilwd.ilwdchar
whose table_name and column_name attributes match those provided.
If a matching subclass is found it is returned; otherwise a new
class is defined, added to this module's namespace, and returned.
Example:
>>> process_id = get_ilwdchar_class("process", "process_id")
>>> x = process_id(10)
>>> str(type(x))
"<class 'pycbc_glue.ligolw.ilwd.process_process_id_class'>"
>>> str(x)
'process:process_id:10'
Retrieving and storing the class provides a convenient mechanism
for quickly constructing new ID objects.
Example:
>>> for i in range(10):
... print str(process_id(i))
...
process:process_id:0
process:process_id:1
process:process_id:2
process:process_id:3
process:process_id:4
process:process_id:5
process:process_id:6
process:process_id:7
process:process_id:8
process:process_id:9
|
def _infer_decorator_callchain(node):
if not isinstance(node, FunctionDef):
return None
if not node.parent:
return None
try:
result = next(node.infer_call_result(node.parent))
except exceptions.InferenceError:
return None
if isinstance(result, bases.Instance):
result = result._proxied
if isinstance(result, ClassDef):
if result.is_subtype_of("%s.classmethod" % BUILTINS):
return "classmethod"
if result.is_subtype_of("%s.staticmethod" % BUILTINS):
return "staticmethod"
return None
|
Detect decorator call chaining and see if the end result is a
static or a classmethod.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.