code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def remove_empty_dirs(path):
for root, dirs, files in os.walk(path):
for d in dirs:
dir_path = os.path.join(root, d)
if not os.listdir(dir_path):
os.rmdir(dir_path) | removes empty dirs under a given path |
def _at_block_start(tc, line):
if tc.atBlockStart():
return True
column = tc.columnNumber()
indentation = len(line) - len(line.lstrip())
return column <= indentation | Improve QTextCursor.atBlockStart to ignore spaces |
def interpretAsOpenMath(x):
if hasattr(x, "_ishelper") and x._ishelper:
return x._toOM()
elif isinstance(x, om.OMAny):
return x
elif isinstance(x, six.integer_types):
return om.OMInteger(x)
elif isinstance(x, float):
return om.OMFloat(x)
elif isinstance(x, six.string_types):
return om.OMString(x)
elif isinstance(x, WrappedHelper):
return x.toOM()
elif inspect.isfunction(x):
paramMap = inspect.signature(x).parameters
params = [v for k, v in six.iteritems(paramMap)]
posArgKinds = [inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD]
if not all([p.kind in posArgKinds for p in params]):
raise CannotInterpretAsOpenMath("no sequence arguments allowed")
paramsOM = [om.OMVariable(name=p.name) for p in params]
bodyOM = interpretAsOpenMath(x(*paramsOM))
return OMBinding(om.OMSymbol(name="lambda", cd="python", cdbase="http://python.org"), paramsOM, bodyOM)
else:
raise CannotInterpretAsOpenMath("unknown kind of object: " + str(x)) | tries to convert a Python object into an OpenMath object
this is not a replacement for using a Converter for exporting Python objects
instead, it is used conveniently building OM objects in DSL embedded in Python
inparticular, it converts Python functions into OMBinding objects using lambdaOM as the binder |
def adjust_all_to_360(dictionary):
for key in dictionary:
dictionary[key] = adjust_to_360(dictionary[key], key)
return dictionary | Take a dictionary and check each key/value pair.
If this key is of type: declination/longitude/azimuth/direction,
adjust it to be within 0-360 as required by the MagIC data model |
def show_one(request, post_process_fun, object_class, id, template=):
obj = get_object_or_404(object_class, pk=id)
json = post_process_fun(request, obj)
return render_json(request, json, template=template, help_text=show_one.__doc__) | Return object of the given type with the specified identifier.
GET parameters:
user:
identifier of the current user
stats:
turn on the enrichment of the objects by some statistics
html
turn on the HTML version of the API |
def features(self):
mycols = []
for col in dfn.feature_names:
if col in self:
mycols.append(col)
mycols.sort()
return mycols | All available features |
def text2labels(text, sents):
t exists within any
element of `sents` with `1` character, other characters (within sentences)
will be marked with `0`
Used in training process
>>> text =
>>> sents = [, ]
>>> labels = text2labels(text, sents)
>>> .join(text)
>>>
>>> .join(labels)
>>>
001'
return labels | Marks all characters in given `text`, that doesn't exists within any
element of `sents` with `1` character, other characters (within sentences)
will be marked with `0`
Used in training process
>>> text = 'привет. меня зовут аня.'
>>> sents = ['привет.', 'меня зовут аня.']
>>> labels = text2labels(text, sents)
>>> ' '.join(text)
>>> 'п р и в е т . м е н я з о в у т а н я .'
>>> ' '.join(labels)
>>> '0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' |
def _recurse(self, inputs, output):
if inputs:
my_input = inputs[0]
name = my_input.name
if my_input.state:
my_options = my_input.options(self.state)
else:
my_options = my_input.options
for option in my_options:
my_output = list(output)
my_output.append({name: option})
self._recurse(inputs[1:], my_output)
else:
try:
valid, result = self._function(output)
except ValueError:
raise RuntimeError("function must return 2 values")
print output, valid, result | internal recursion routine called by the run method that generates
all input combinations |
def get_source(self, objtxt):
from spyder_kernels.utils.dochelpers import getsource
obj, valid = self._eval(objtxt)
if valid:
return getsource(obj) | Get object source |
def description(self):
if self._meta and self._meta.get_payload():
return utils.TrueCallableProxy(self._description)
return utils.CallableProxy(None) | Get the textual description of the category |
def gradient(self, mu, dist):
return dist.levels/(mu*(dist.levels - mu)) | derivative of the link function wrt mu
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
grad : np.array of length n |
def read(self, queue):
conn = serial_asyncio.open_serial_connection(**self.serial_settings)
reader, _ = yield from conn
while True:
data = yield from reader.readline()
self.telegram_buffer.append(data.decode())
for telegram in self.telegram_buffer.get_all():
try:
queue.put_nowait(
self.telegram_parser.parse(telegram)
)
except ParseError as e:
logger.warning(, e) | Read complete DSMR telegram's from the serial interface and parse it
into CosemObject's and MbusObject's.
Instead of being a generator, values are pushed to provided queue for
asynchronous processing.
:rtype: None |
def handle(self, *args, **options):
def get_user(username):
try:
return User.objects.get(username=username)
except ObjectDoesNotExist as e:
raise CommandError("This user doesnruseruserusernamefields_permissionsname'])
except Exception as e:
raise CommandError(e) | dump fields permissions for a user |
def PrintAllTables(self):
goodlogging.Log.Info("DB", "Database contents:\n")
for table in self._tableDict.keys():
self._PrintDatabaseTable(table) | Prints contents of every table. |
def _get_baseline_from_tag(config, tag):
last_snapshot = None
for snapshot in __salt__[](config):
if tag == snapshot[].get("baseline_tag"):
if not last_snapshot or last_snapshot[] < snapshot[]:
last_snapshot = snapshot
return last_snapshot | Returns the last created baseline snapshot marked with `tag` |
def assign_power_curve(self, wake_losses_model=,
smoothing=False, block_width=0.5,
standard_deviation_method=,
smoothing_order=,
turbulence_intensity=None, **kwargs):
r
for farm in self.wind_farms:
farm.mean_hub_height()
farm.assign_power_curve(
wake_losses_model=wake_losses_model,
smoothing=smoothing, block_width=block_width,
standard_deviation_method=standard_deviation_method,
smoothing_order=smoothing_order,
turbulence_intensity=turbulence_intensity, **kwargs)
df = pd.concat([farm.power_curve.set_index([]).rename(
columns={: farm.name}) for
farm in self.wind_farms], axis=1)
cluster_power_curve = pd.DataFrame(
df.interpolate(method=).sum(axis=1))
cluster_power_curve.columns = []
cluster_power_curve.reset_index(, inplace=True)
self.power_curve = cluster_power_curve
return self | r"""
Calculates the power curve of a wind turbine cluster.
The turbine cluster power curve is calculated by aggregating the wind
farm power curves of wind farms within the turbine cluster. Depending
on the parameters the power curves are smoothed (before or after the
aggregation) and/or a wind farm efficiency is applied before the
aggregation.
After the calculations the power curve is assigned to the attribute
`power_curve`.
Parameters
----------
wake_losses_model : string
Defines the method for taking wake losses within the farm into
consideration. Options: 'power_efficiency_curve',
'constant_efficiency' or None. Default: 'power_efficiency_curve'.
smoothing : boolean
If True the power curves will be smoothed before or after the
aggregation of power curves depending on `smoothing_order`.
Default: False.
block_width : float
Width between the wind speeds in the sum of the equation in
:py:func:`~.power_curves.smooth_power_curve`. Default: 0.5.
standard_deviation_method : string
Method for calculating the standard deviation for the Gauss
distribution. Options: 'turbulence_intensity',
'Staffell_Pfenninger'. Default: 'turbulence_intensity'.
smoothing_order : string
Defines when the smoothing takes place if `smoothing` is True.
Options: 'turbine_power_curves' (to the single turbine power
curves), 'wind_farm_power_curves'.
Default: 'wind_farm_power_curves'.
turbulence_intensity : float
Turbulence intensity at hub height of the wind farm or
wind turbine cluster for power curve smoothing with
'turbulence_intensity' method. Can be calculated from
`roughness_length` instead. Default: None.
Other Parameters
----------------
roughness_length : float, optional.
Roughness length. If `standard_deviation_method` is
'turbulence_intensity' and `turbulence_intensity` is not given
the turbulence intensity is calculated via the roughness length.
Returns
-------
self |
def get_states(self):
outcome_tag = {}
cpds = self.model.get_cpds()
for cpd in cpds:
var = cpd.variable
outcome_tag[var] = []
if cpd.state_names is None or cpd.state_names.get(var) is None:
states = range(cpd.get_cardinality([var])[var])
else:
states = cpd.state_names[var]
for state in states:
state_tag = etree.SubElement(self.variables[var], "OUTCOME")
state_tag.text = self._make_valid_state_name(state)
outcome_tag[var].append(state_tag)
return outcome_tag | Add outcome to variables of XMLBIF
Return
------
dict: dict of type {variable: outcome tags}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_states()
{'dog-out': [<Element OUTCOME at 0x7ffbabfcdec8>, <Element OUTCOME at 0x7ffbabfcdf08>],
'family-out': [<Element OUTCOME at 0x7ffbabfd4108>, <Element OUTCOME at 0x7ffbabfd4148>],
'bowel-problem': [<Element OUTCOME at 0x7ffbabfd4088>, <Element OUTCOME at 0x7ffbabfd40c8>],
'hear-bark': [<Element OUTCOME at 0x7ffbabfcdf48>, <Element OUTCOME at 0x7ffbabfcdf88>],
'light-on': [<Element OUTCOME at 0x7ffbabfcdfc8>, <Element OUTCOME at 0x7ffbabfd4048>]} |
def user(username, password, all):
if current_app.config[] != :
raise click.UsageError(.format(current_app.config[]))
if username and username not in current_app.config[]:
raise click.UsageError(.format(username))
if not username and not all:
raise click.UsageError()
def create_user(admin):
email = admin if in admin else None
user = User(
name=,
login=admin,
password=generate_password_hash(password),
roles=[],
text=,
email=email,
email_verified=bool(email)
)
try:
db.get_db()
user = user.create()
except Exception as e:
click.echo(.format(e))
else:
click.echo(.format(user.id, user.name))
if all:
for admin in current_app.config[]:
create_user(admin)
else:
create_user(username) | Create admin users (BasicAuth only). |
def _GetNormalizedTimestamp(self):
if self._normalized_timestamp is None:
if self._number_of_seconds is not None:
self._normalized_timestamp = (
decimal.Decimal(self._microseconds) /
definitions.MICROSECONDS_PER_SECOND)
self._normalized_timestamp += decimal.Decimal(self._number_of_seconds)
return self._normalized_timestamp | Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined. |
def set_cache_buster(redis, path, hash):
redis.hset("cache-buster:{}:v3".format(oz.settings["s3_bucket"]), path, hash) | Sets the cache buster value for a given file path |
def get_narrow_url(self, instance):
text = instance[0]
request = self.context["request"]
query_params = request.GET.copy()
page_query_param = self.get_paginate_by_param()
if page_query_param and page_query_param in query_params:
del query_params[page_query_param]
selected_facets = set(query_params.pop(self.root.facet_query_params_text, []))
selected_facets.add("%(field)s_exact:%(text)s" % {"field": self.parent_field, "text": text})
query_params.setlist(self.root.facet_query_params_text, sorted(selected_facets))
path = "%(path)s?%(query)s" % {"path": request.path_info, "query": query_params.urlencode()}
url = request.build_absolute_uri(path)
return serializers.Hyperlink(url, "narrow-url") | Return a link suitable for narrowing on the current item. |
def matplotlib_scraper(block, block_vars, gallery_conf, **kwargs):
matplotlib, plt = _import_matplotlib()
image_path_iterator = block_vars[]
image_paths = list()
for fig_num, image_path in zip(plt.get_fignums(), image_path_iterator):
if in kwargs:
image_path = % (os.path.splitext(image_path)[0],
kwargs[])
fig = plt.figure(fig_num)
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in [, ]:
fig_attr = getattr(fig, + attr)()
default_attr = matplotlib.rcParams[ + attr]
if to_rgba(fig_attr) != to_rgba(default_attr) and \
attr not in kwargs:
kwargs[attr] = fig_attr
fig.savefig(image_path, **kwargs)
image_paths.append(image_path)
plt.close()
return figure_rst(image_paths, gallery_conf[]) | Scrape Matplotlib images.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
**kwargs : dict
Additional keyword arguments to pass to
:meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``.
The ``format`` kwarg in particular is used to set the file extension
of the output file (currently only 'png' and 'svg' are supported).
Returns
-------
rst : str
The ReSTructuredText that will be rendered to HTML containing
the images. This is often produced by :func:`figure_rst`. |
def dl_full_file(url, save_file_name):
response = requests.get(url)
with open(save_file_name, ) as writefile:
writefile.write(response.content)
return | Download a file. No checks are performed.
Parameters
----------
url : str
The url of the file to download
save_file_name : str
The name to save the file as |
def _new_conn(self):
extra_kw = {}
if self.source_address:
extra_kw[] = self.source_address
if self.socket_options:
extra_kw[] = self.socket_options
try:
conn = connection.create_connection(
(self._dns_host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn | Establish a socket connection and set nodelay settings on it.
:return: New socket connection. |
def get_user_autocompletions(ctx, args, incomplete, cmd_param):
results = []
if isinstance(cmd_param.type, Choice):
results = [(c, None)
for c in cmd_param.type.choices if str(c).startswith(incomplete)]
elif cmd_param.autocompletion is not None:
dynamic_completions = cmd_param.autocompletion(ctx=ctx,
args=args,
incomplete=incomplete)
results = [c if isinstance(c, tuple) else (c, None)
for c in dynamic_completions]
return results | :param ctx: context associated with the parsed command
:param args: full list of args
:param incomplete: the incomplete text to autocomplete
:param cmd_param: command definition
:return: all the possible user-specified completions for the param |
def assignEditor(self):
plugin = self.currentPlugin()
column = self.currentColumn()
value = self.currentValue()
if not plugin:
self.setEditor(None)
return
self.setUpdatesEnabled(False)
self.blockSignals(True)
op = self.uiOperatorDDL.currentText()
self.setEditor(plugin.createEditor(self, column, op, value))
self.setUpdatesEnabled(True)
self.blockSignals(False) | Assigns the editor for this entry based on the plugin. |
def extract_names(sender):
sender = to_unicode(sender, precise=True)
sender = "".join([char if char.isalpha() else for char in sender])
sender = [word for word in sender.split() if len(word) > 1 and
not word in BAD_SENDER_NAMES]
names = list(set(sender))
return names | Tries to extract sender's names from `From:` header.
It could extract not only the actual names but e.g.
the name of the company, parts of email, etc.
>>> extract_names('Sergey N. Obukhov <serobnic@mail.ru>')
['Sergey', 'Obukhov', 'serobnic']
>>> extract_names('')
[] |
def filter(self, query, output_fields=None):
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
if not output_fields:
output_fields = [self.def_field]
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
return [] | Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database. |
def reset_full(self, force=False, _meta=None):
strwarn = None
for df in self.__basic__:
if (getattr(self, df)) is None:
if force:
strwarn = ("Reset system warning - Recalculation after "
"reset not possible "
"because {} missing".format(df))
warnings.warn(strwarn, ResetWarning)
else:
raise ResetError("To few tables to recalculate the "
"system after reset ({} missing) "
"- reset can be forced by passing "
")".format(df))
if _meta:
_meta._add_modify("Reset system to Z and Y")
if strwarn:
_meta._add_modify(strwarn)
[setattr(self, key, None)
for key in self.get_DataFrame(
data=False,
with_unit=False,
with_population=False)
if key not in self.__basic__]
return self | Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal |
def is_unprocessed_local_replica(pid):
return d1_gmn.app.models.LocalReplica.objects.filter(
pid__did=pid, info__status__status=
).exists() | Is local replica with status "queued". |
def assumed_state(self):
return (not self._controller.car_online[self.id()] and
(self._controller._last_update_time[self.id()] -
self._controller._last_wake_up_time[self.id()] >
self._controller.update_interval)) | Return whether the data is from an online vehicle. |
def _std_err(self):
return np.sqrt(np.sum(np.square(self._resids), axis=1) / self._df_err) | Standard error of the estimate (SEE). A scalar.
For standard errors of parameters, see _se_all, se_alpha, and se_beta. |
def get_charge(chebi_id):
if len(__CHARGES) == 0:
__parse_chemical_data()
return __CHARGES[chebi_id] if chebi_id in __CHARGES else float() | Returns charge |
def run_simulation(wdir, arp=True, **kwargs):
wdir = pathlib.Path(wdir)
cmd = "{pathbhfield} {mpdigit} {wl:f} {r_core:f} {r_coat:f} " \
+ "{n_grid_x:d} {xspan_min:f} {xspan_max:f} " \
+ "{n_grid_y:d} {yspan_min:f} {yspan_max:f} " \
+ "{n_grid_z:d} {zspan_min:f} {zspan_max:f} " \
+ "{case} {Kreibig:f} {n_med:f} {n_core:f} {k_core:f} " \
+ "{n_coat:f} {k_coat:f}"
old_dir = pathlib.Path.cwd()
os.chdir(wdir)
kwargs["pathbhfield"] = get_binary(arp=arp)
if arp:
kwargs["mpdigit"] = 16
else:
kwargs["mpdigit"] = ""
sp.check_output(cmd.format(**kwargs), shell=True)
os.chdir(old_dir)
check_simulation(wdir) | Example
-------
100-nm silica sphere with 10-nm thick Ag coating,
embedded in water; arprec 20 digits; illuminated with YAG (1064nm);
scan xz plane (21x21, +-200nm)
bhfield-arp-db.exe mpdigit wl r_core r_coat
n_grid_x xspan_min xspan_max
n_grid_y yspan_min yspan_max
n_grid_z zspan_min zspan_max
case Kreibig
[n_med n_core k_core n_coat k_coat (case=other)]
bhfield-arp-db.exe 20 1.064 0.050 0.060
21 -0.2 0.2
1 0 0
21 -0.2 0.2
other 0
1.3205 1.53413 0 0.565838 7.23262
Explanation of parameters
-------------------------
mpdigit:
arprec's number of precision digits;
increase it to overcome round-off errors
wl[um]:
light wavelength in vacuum
r_core[um], r_coat[um]:
core & coat radii
n_grid_x xspan_min[um] xspan_max[um]:
number & span of grid points for field computation; x span
n_grid_y yspan_min[um] yspan_max[um]:
y span
n_grid_z zspan_min[um] zspan_max[um]:
z span
Kreibig:
Kreibig mean free path correction for Ag (0.0 - 1.0)
case:
nanoshell/liposome/HPC/barber/other
n_med n_core k_core n_coat k_coat (case=other only):
refractive indices of medium (real), core & coat (n, k)
If `case=other`, complex refractive indices
(n, k at the particular wavelength) must be specified.
Otherwise (case = nanoshell etc) the medium/core/coat
materials are predefined and the n,k values
are taken from the data file (Ag_palik.nk etc).
The latter reflects our own interest and is intended
for use in our lab, so general users may not find it useful :-) |
def load(self: T, **kwargs) -> T:
lazy_data = {k: v._data for k, v in self.variables.items()
if isinstance(v._data, dask_array_type)}
if lazy_data:
import dask.array as da
evaluated_data = da.compute(*lazy_data.values(), **kwargs)
for k, data in zip(lazy_data, evaluated_data):
self.variables[k].data = data
for k, v in self.variables.items():
if k not in lazy_data:
v.load()
return self | Manually trigger loading of this dataset's data from disk or a
remote source into memory and return this dataset.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute |
def use_kwargs(args, locations=None, inherit=None, apply=None, **kwargs):
kwargs.update({: locations})
def wrapper(func):
options = {
: args,
: kwargs,
}
annotate(func, , [options], inherit=inherit, apply=apply)
return activate(func)
return wrapper | Inject keyword arguments from the specified webargs arguments into the
decorated view function.
Usage:
.. code-block:: python
from marshmallow import fields
@use_kwargs({'name': fields.Str(), 'category': fields.Str()})
def get_pets(**kwargs):
return Pet.query.filter_by(**kwargs).all()
:param args: Mapping of argument names to :class:`Field <marshmallow.fields.Field>`
objects, :class:`Schema <marshmallow.Schema>`, or a callable which accepts a
request and returns a :class:`Schema <marshmallow.Schema>`
:param locations: Default request locations to parse
:param inherit: Inherit args from parent classes
:param apply: Parse request with specified args |
def _register(self, session, url):
dist = self._poetry.file.parent / "dist"
file = dist / "{}-{}.tar.gz".format(
self._package.name, normalize_version(self._package.version.text)
)
if not file.exists():
raise RuntimeError(.format(file.name))
data = self.post_data(file)
data.update({":action": "submit", "protocol_version": "1"})
data_to_send = self._prepare_data(data)
encoder = MultipartEncoder(data_to_send)
resp = session.post(
url,
data=encoder,
allow_redirects=False,
headers={"Content-Type": encoder.content_type},
)
resp.raise_for_status()
return resp | Register a package to a repository. |
def set_description(self):
if self.device_info[] == :
self.node[] = % (self.device_info[],
self.device_info[])
else:
self.node[] = self.device_info[] | Set the node description |
def json_request(endpoint, verb=, session_options=None, **options):
req = functools.partial(_request, endpoint, verb, session_options,
json=True, **options)
return _run_in_fresh_loop(req) | Like :func:`molotov.request` but extracts json from the response. |
def trace_to_next_plane(self):
return list(map(lambda positions, deflections: np.subtract(positions, deflections),
self.positions, self.deflections)) | Trace the positions to the next plane. |
def check_if_username_exists(self, username):
LOGGER.debug()
_, handle = b2handle.utilhandle.remove_index_from_handle(username)
resp = self.send_handle_get_request(handle)
resp_content = decoded_response(resp)
if b2handle.hsresponses.does_handle_exist(resp):
handlerecord_json = json.loads(resp_content)
if not handlerecord_json[] == handle:
raise GenericHandleError(
operation=,
handle=handle,
reponse=resp,
msg=
)
return True
elif b2handle.hsresponses.handle_not_found(resp):
msg =
raise HandleNotFoundException(handle=handle, msg=msg, response=resp)
else:
op =
msg =
raise GenericHandleError(operation=op, handle=handle, response=resp, msg=msg) | Check if the username handles exists.
:param username: The username, in the form index:prefix/suffix
:raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException`
:raises: :exc:`~b2handle.handleexceptions.GenericHandleError`
:return: True. If it does not exist, an exception is raised.
*Note:* Only the existence of the handle is verified. The existence or
validity of the index is not checked, because entries containing
a key are hidden anyway. |
def is_correct(self):
state = True
cls = self.__class__
if hasattr(self, ):
for char in cls.illegal_object_name_chars:
if char in self.host_name:
self.add_error("[%s::%s] host_name contains an illegal character: %s"
% (self.my_type, self.get_name(), char))
state = False
if self.notifications_enabled and not self.contacts:
self.add_warning("[%s::%s] notifications are enabled but no contacts nor "
"contact_groups property is defined for this host"
% (self.my_type, self.get_name()))
return super(Host, self).is_correct() and state | Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool |
def AddMethod(self, function, name=None):
method = MethodWrapper(self, function, name)
self.added_methods.append(method) | Adds the specified function as a method of this construction
environment with the specified name. If the name is omitted,
the default name is the name of the function itself. |
def fd(self):
if isinstance(self._rlist.path_or_fd(), string_types()):
raise ValueError("File descriptor queried although mapping was generated from path")
return self._rlist.path_or_fd() | :return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor |
def get_version():
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
_glfw.glfwGetVersion(major, minor, rev)
return major_value.value, minor_value.value, rev_value.value | Retrieves the version of the GLFW library.
Wrapper for:
void glfwGetVersion(int* major, int* minor, int* rev); |
def _explore_storage(self):
path =
dirs = [path]
while dirs:
path = dirs.pop()
subdirs, files = self.media_storage.listdir(path)
for media_filename in files:
yield os.path.join(path, media_filename)
dirs.extend([os.path.join(path, subdir) for subdir in subdirs]) | Generator of all files contained in media storage. |
def status(self):
myNow = timezone.localtime(timezone=self.tz)
daysDelta = dt.timedelta(days=self.num_days - 1)
todayStart = getAwareDatetime(myNow.date(), dt.time.min, self.tz)
eventStart, event = self.__afterOrPostponedTo(todayStart - daysDelta)
if eventStart is None:
return "finished"
eventFinish = getAwareDatetime(eventStart.date() + daysDelta,
event.time_to, self.tz)
if event.time_from is None:
eventStart += _1day
if eventStart < myNow < eventFinish:
return "started"
if (self.repeat.until and eventFinish < myNow and
self.__afterOrPostponedTo(myNow)[0] is None):
return "finished" | The current status of the event (started, finished or pending). |
def setbit(self, key, offset, value):
if not isinstance(offset, int):
raise TypeError("offset argument must be int")
if offset < 0:
raise ValueError("offset must be greater equal 0")
if value not in (0, 1):
raise ValueError("value argument must be either 1 or 0")
return self.execute(b, key, offset, value) | Sets or clears the bit at offset in the string value stored at key.
:raises TypeError: if offset is not int
:raises ValueError: if offset is less than 0 or value is not 0 or 1 |
def parse_map_Ka(self):
Kd = os.path.join(self.dir, " ".join(self.values[1:]))
self.this_material.set_texture_ambient(Kd) | Ambient map |
def get_parent_vault_ids(self, vault_id):
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalog_ids(catalog_id=vault_id)
return self._hierarchy_session.get_parents(id_=vault_id) | Gets the parent ``Ids`` of the given vault.
arg: vault_id (osid.id.Id): a vault ``Id``
return: (osid.id.IdList) - the parent ``Ids`` of the vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def load(self, filename="temp.pkl"):
inputFile = open(filename, )
self.tm = cPickle.load(inputFile) | Save TM in the filename specified above |
def list_connection_channels(self, name):
return self._api_get(.format(
urllib.parse.quote_plus(name)
)) | List of all channels for a given connection.
:param name: The connection name
:type name: str |
def _get_object(data, position, obj_end, opts, dummy):
obj_size = _UNPACK_INT(data[position:position + 4])[0]
end = position + obj_size - 1
if data[end:position + obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
if end >= obj_end:
raise InvalidBSON("invalid object length")
if _raw_document_class(opts.document_class):
return (opts.document_class(data[position:end + 1], opts),
position + obj_size)
obj = _elements_to_dict(data, position + 4, end, opts)
position += obj_size
if "$ref" in obj:
return (DBRef(obj.pop("$ref"), obj.pop("$id", None),
obj.pop("$db", None), obj), position)
return obj, position | Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef. |
def build(env, ciprcfg, console):
os.putenv(, env.package_dir)
os.putenv(, env.project_directory)
build_settings = path.join(env.project_directory, )
with open(build_settings, ) as f:
data = f.read()
m = _build_re.search(data)
if m:
ver = int(m.group(2))
data = data.replace(m.group(0), % (ver + 1))
with open(build_settings, ) as f:
f.write(data)
if path.exists(env.build_dir):
shutil.rmtree(env.build_dir)
os.makedirs(env.build_dir)
if path.exists(env.dist_dir):
shutil.rmtree(env.dist_dir)
os.makedirs(env.dist_dir)
console.normal( % env.build_dir)
console.normal()
for src, dst in util.sync_dir_to(env.project_directory, env.build_dir, exclude=[, , , , ]):
console.quiet( % (src, dst))
if src.endswith():
_fix_lua_module_name(src, dst)
console.normal()
for package in ciprcfg.packages.keys():
for src, dst in util.sync_lua_dir_to(path.join(env.package_dir, package), env.build_dir, exclude=[], include=[]):
console.quiet( % (src, dst))
if src.endswith():
_fix_lua_module_name(src, dst)
src = path.join(env.code_dir, )
dst = path.join(env.build_dir, )
shutil.copy(src, dst)
cmd = AND(clom.cd(env.build_dir), clom[CORONA_SIMULATOR_PATH](env.build_dir))
console.normal( % env.dist_dir)
try:
cmd.shell.execute()
except KeyboardInterrupt:
pass | Build the current project for distribution |
def amchar_to_int(amchar, hij=False):
psp
if hij:
amchar_map = _amchar_map_hij
else:
amchar_map = _amchar_map_hik
amchar_lower = amchar.lower()
amint = []
for c in amchar_lower:
if c not in amchar_map:
raise KeyError(.format(c))
amint.append(amchar_map.index(c))
return amint | Convert an angular momentum integer to a character
The return value is a list of integers (to handle sp, spd, ... orbitals)
For example, converts 'p' to [1] and 'sp' to [0,1]
If hij is True, the ordering spdfghijkl is used. Otherwise, the
ordering will be spdfghikl (skipping j) |
def draw_tree_grid(self,
nrows=None,
ncols=None,
start=0,
fixed_order=False,
shared_axis=False,
**kwargs):
if not self.treelist:
print("Treelist is empty")
return None, None
if not fixed_order:
treelist = self.copy().treelist
else:
if fixed_order is True:
fixed_order = self.treelist[0].get_tip_labels()
treelist = [
ToyTree(i, fixed_order=fixed_order)
for i in self.copy().treelist
]
for tree in treelist:
tree.style.update(kwargs)
if not (ncols or nrows):
ncols = 5
nrows = 1
elif not (ncols and nrows):
if ncols:
if ncols == 1:
if self.ntrees <= 5:
nrows = self.ntrees
else:
nrows = 2
else:
if self.ntrees <= 10:
nrows = 2
else:
nrows = 3
if nrows:
if nrows == 1:
if self.ntrees <= 5:
ncols = self.ntrees
else:
ncols = 5
else:
if self.ntrees <= 10:
ncols = 5
else:
ncols = 3
else:
pass
draw = TreeGrid(treelist)
if kwargs.get("debug"):
return draw
canvas, axes = draw.update(nrows, ncols, start, shared_axis, **kwargs)
return canvas, axes | Draw a slice of x*y trees into a x,y grid non-overlapping.
Parameters:
-----------
x (int):
Number of grid cells in x dimension. Default=automatically set.
y (int):
Number of grid cells in y dimension. Default=automatically set.
start (int):
Starting index of tree slice from .treelist.
kwargs (dict):
Toytree .draw() arguments as a dictionary. |
def main(config, host, port, logfile, debug, daemon, uid, gid, pidfile, umask, rundir):
_main(**locals()) | Main entry point for running a socket server from the commandline.
This method will read in options from the commandline and call the L{config.init_config} method
to get everything setup. Then, depending on whether deamon mode was specified or not,
the process may be forked (or not) and the server will be started. |
def stopped(name, connection=None, username=None, password=None):
return _virt_call(name, , , "Machine has been shut down",
connection=connection, username=username, password=password) | Stops a VM by shutting it down nicely.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.stopped |
def _create_table_and_update_context(node, context):
schema_type_name = sql_context_helpers.get_schema_type_name(node, context)
table = context.compiler_metadata.get_table(schema_type_name).alias()
context.query_path_to_selectable[node.query_path] = table
return table | Create an aliased table for a SqlNode.
Updates the relevant Selectable global context.
Args:
node: SqlNode, the current node.
context: CompilationContext, global compilation state and metadata.
Returns:
Table, the newly aliased SQLAlchemy table. |
def attributes(self, **kwargs):
path = "/directory-sync-service/v1/attributes"
r = self._httpclient.request(
method="GET",
path=path,
url=self.url,
**kwargs
)
return r | Retrieve the attribute configuration object.
Retrieves a mapping that identifies the custom directory
attributes configured for the Directory SyncService instance,
and the mapping of the custom attributes to standard directory
attributes.
Args:
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``directory_attributes.py`` example. |
def code_deparse_around_offset(name, offset, co, out=StringIO(),
version=None, is_pypy=None,
debug_opts=DEFAULT_DEBUG_OPTS):
assert iscode(co)
if version is None:
version = sysinfo2float()
if is_pypy is None:
is_pypy = IS_PYPY
deparsed = code_deparse(co, out, version, is_pypy, debug_opts)
if (name, offset) in deparsed.offsets.keys():
return deparsed
valid_offsets = [t for t in deparsed.offsets if isinstance(t[1], int)]
offset_list = sorted([t[1] for t in valid_offsets if t[0] == name])
found_offset = find_gt(offset_list, offset)
deparsed.offsets[name, offset] = deparsed.offsets[name, found_offset]
return deparsed | Like deparse_code(), but given a function/module name and
offset, finds the node closest to offset. If offset is not an instruction boundary,
we raise an IndexError. |
def dpss_windows(N, NW, Kmax, interp_from=None, interp_kind=):
Kmax = int(Kmax)
W = float(NW) / N
nidx = np.arange(N, dtype=)
if interp_from is not None:
if interp_from > N:
e_s = % interp_from
e_s += % N
e_s +=
raise ValueError(e_s)
dpss = []
d, e = dpss_windows(interp_from, NW, Kmax)
for this_d in d:
x = np.arange(this_d.shape[-1])
I = interpolate.interp1d(x, this_d, kind=interp_kind)
d_temp = I(np.linspace(0, this_d.shape[-1] - 1, N, endpoint=False))
d_temp = d_temp / np.sqrt(np.sum(d_temp ** 2))
dpss.append(d_temp)
dpss = np.array(dpss)
else:
diagonal = ((N - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
off_diag = np.zeros_like(nidx)
off_diag[:-1] = nidx[1:] * (N - nidx[1:]) / 2.
ab = np.zeros((2, N), )
ab[1] = diagonal
ab[0, 1:] = off_diag[:-1]
w = linalg.eigvals_banded(ab, select=,
select_range=(N - Kmax, N - 1))
w = w[::-1]
t = np.linspace(0, np.pi, N)
dpss = np.zeros((Kmax, N), )
for k in range(Kmax):
dpss[k] = tridi_inverse_iteration(
diagonal, off_diag, w[k], x0=np.sin((k + 1) * t)
)
fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
for i, f in enumerate(fix_symmetric):
if f:
dpss[2 * i] *= -1
pk = np.argmax(np.abs(dpss[1::2, :N//2]), axis=1)
for i, p in enumerate(pk):
if np.sum(dpss[2 * i + 1, :p]) < 0:
dpss[2 * i + 1] *= -1
dpss_rxx = autocorr(dpss) * N
r = 4 * W * np.sinc(2 * W * nidx)
r[0] = 2 * W
eigvals = np.dot(dpss_rxx, r)
return dpss, eigvals | Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
for a given frequency-spacing multiple NW and sequence length N.
Parameters
----------
N : int
sequence length
NW : float, unitless
standardized half bandwidth corresponding to 2NW = BW/f0 = BW*N*dt
but with dt taken as 1
Kmax : int
number of DPSS windows to return is Kmax (orders 0 through Kmax-1)
interp_from : int (optional)
The dpss can be calculated using interpolation from a set of dpss
with the same NW and Kmax, but shorter N. This is the length of this
shorter set of dpss windows.
interp_kind : str (optional)
This input variable is passed to scipy.interpolate.interp1d and
specifies the kind of interpolation as a string ('linear', 'nearest',
'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the
order of the spline interpolator to use.
Returns
-------
v, e : tuple,
v is an array of DPSS windows shaped (Kmax, N)
e are the eigenvalues
Notes
-----
Tridiagonal form of DPSS calculation from:
Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430 |
def _convertNonNumericData(self, spatialOutput, temporalOutput, output):
encoders = self.encoder.getEncoderList()
types = self.encoder.getDecoderOutputFieldTypes()
for i, (encoder, type) in enumerate(zip(encoders, types)):
spatialData = spatialOutput[i]
temporalData = temporalOutput[i]
if type != FieldMetaType.integer and type != FieldMetaType.float:
| Converts all of the non-numeric fields from spatialOutput and temporalOutput
into their scalar equivalents and records them in the output dictionary.
:param spatialOutput: The results of topDownCompute() for the spatial input.
:param temporalOutput: The results of topDownCompute() for the temporal
input.
:param output: The main dictionary of outputs passed to compute(). It is
expected to have keys 'spatialTopDownOut' and 'temporalTopDownOut' that
are mapped to numpy arrays. |
def getOntology(self, id_):
if id_ not in self._ontologyIdMap:
raise exceptions.OntologyNotFoundException(id_)
return self._ontologyIdMap[id_] | Returns the ontology with the specified ID. |
def parse_subprotocol_item(
header: str, pos: int, header_name: str
) -> Tuple[Subprotocol, int]:
item, pos = parse_token(header, pos, header_name)
return cast(Subprotocol, item), pos | Parse a subprotocol from ``header`` at the given position.
Return the subprotocol value and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs. |
def organization_requests(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/requests
api_path = "/api/v2/organizations/{id}/requests.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/requests#list-requests |
def add_libravatar (generator, metadata):
missing = generator.settings.get ()
size = generator.settings.get ()
if not in metadata.keys ():
try:
metadata [] = generator.settings.get ()
except:
pass
if metadata []:
email = metadata [].lower ().encode ()
md5 = hashlib.md5 (email).hexdigest ()
url = + md5
if missing or size:
url = url +
if missing:
url = url + + missing
if size:
url = url +
if size:
url = url + + str (size)
| Article generator connector for the Libravatar plugin |
def add_material(self, material):
if self.has_material(material):
return
self.materials.append(material) | Add a material to the mesh, IF it's not already present. |
def add_image_history(self, data):
self._ef[][piexif.ImageIFD.ImageHistory] = json.dumps(data) | Add arbitrary string to ImageHistory tag. |
def clazz(clazz, parent_clazz, description, link, params_string, init_super_args=None):
variables_needed = []
variables_optional = []
imports = set()
for param in params_string.split("\n"):
variable = parse_param_types(param)
assert(not any([type_.always_is_value is not None for type_ in variable.types]) or len(variable.types) == 1)
if variable.optional:
variables_optional.append(variable)
else:
variables_needed.append(variable)
imports.update(variable.all_imports)
imports = list(imports)
imports.sort()
if isinstance(parent_clazz, str):
parent_clazz = to_type(parent_clazz, "parent class")
assert isinstance(parent_clazz, Type)
clazz_object = Clazz(imports=imports,
clazz=clazz, parent_clazz=parent_clazz, link=link, description=description,
parameters=variables_needed, keywords=variables_optional
)
return clazz_object | Live template for pycharm:
y = clazz(clazz="$clazz$", parent_clazz="%parent$", description="$description$", link="$lnk$", params_string="$first_param$") |
def transplant_func(func, module):
from nose.tools import make_decorator
def newfunc(*arg, **kw):
return func(*arg, **kw)
newfunc = make_decorator(func)(newfunc)
newfunc.__module__ = module
return newfunc | Make a function imported from module A appear as if it is located
in module B.
>>> from pprint import pprint
>>> pprint.__module__
'pprint'
>>> pp = transplant_func(pprint, __name__)
>>> pp.__module__
'nose.util'
The original function is not modified.
>>> pprint.__module__
'pprint'
Calling the transplanted function calls the original.
>>> pp([1, 2])
[1, 2]
>>> pprint([1,2])
[1, 2] |
def random_outdir():
if not hasattr(random_outdir, ):
random_outdir.outdir = + .join([random.choice(string.ascii_letters) for _ in range(6)])
return random_outdir.outdir | Return the random directory name chosen to use for tool / workflow output |
def from_dict(cls, d):
structure = Structure.from_dict(d[])
voronoi_list2 = from_bson_voronoi_list2(d[], structure)
maximum_distance_factor = d[] if in d else None
minimum_angle_factor = d[] if in d else None
return cls(structure=structure, voronoi_list2=voronoi_list2,
normalized_angle_tolerance=d[],
normalized_distance_tolerance=d[],
additional_conditions=d[],
valences=d[],
maximum_distance_factor=maximum_distance_factor,
minimum_angle_factor=minimum_angle_factor) | Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using
the as_dict method.
:param d: dict representation of the VoronoiContainer object
:return: VoronoiContainer object |
def storage_update(self, name, config, timeout=10):
result = Result(*self.perform_request(**{
: ,
: .format(name),
: config,
: {
: timeout
}
}))
return result | Create or update a storage plugin configuration.
:param name: The name of the storage plugin configuration to create or update.
:param config: Overwrites the existing configuration if there is any, and therefore, must include all
required attributes and definitions.
:param timeout: int
:return: pydrill.client.Result |
def parse_http_accept_header(header):
components = [item.strip() for item in header.split()]
l = []
for component in components:
if in component:
subcomponents = [item.strip() for item in component.split()]
l.append(
(
subcomponents[0],
subcomponents[1][2:]
)
)
else:
l.append((component, ))
l.sort(
key = lambda i: i[1],
reverse = True
)
content_types = []
for i in l:
content_types.append(i[0])
return content_types | Return a list of content types listed in the HTTP Accept header
ordered by quality.
:param header: A string describing the contents of the HTTP Accept header. |
def import_fx_rates(self, rates: List[PriceModel]):
have_new_rates = False
base_currency = self.get_default_currency()
for rate in rates:
assert isinstance(rate, PriceModel)
currency = self.get_by_symbol(rate.symbol)
amount = rate.value
has_rate = currency.prices.filter(Price.date == rate.datetime.date()).first()
if not has_rate:
log(INFO, "Creating entry for %s, %s, %s, %s",
base_currency.mnemonic, currency.mnemonic, rate.datetime.date(), amount)
inverted_rate = 1 / amount
inverted_rate = inverted_rate.quantize(Decimal())
price = Price(commodity=currency,
currency=base_currency,
date=rate.datetime.date(),
value=str(inverted_rate))
have_new_rates = True
if have_new_rates:
log(INFO, "Saving new prices...")
self.book.flush()
self.book.save()
else:
log(INFO, "No prices imported.") | Imports the given prices into database. Write operation! |
def get_version(version=None):
if version is None:
version = VERSION
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
parts = 2 if version[2] == 0 else 3
main = ".".join(str(x) for x in version[:parts])
sub = ""
if version[3] != "final":
mapping = {"alpha": "a", "beta": "b", "rc": "c"}
sub = mapping[version[3]] + str(version[4])
return main + sub | Derives a PEP386-compliant version number from VERSION. |
def check_bottleneck(text):
err = "mixed_metaphors.misc.bottleneck"
msg = u"Mixed metaphor — bottles with big necks are easy to pass through."
list = [
"biggest bottleneck",
"big bottleneck",
"large bottleneck",
"largest bottleneck",
"world-wide bottleneck",
"huge bottleneck",
"massive bottleneck",
]
return existence_check(text, list, err, msg, max_errors=1) | Avoid mixing metaphors about bottles and their necks.
source: Sir Ernest Gowers
source_url: http://bit.ly/1CQPH61 |
def to_xml(self, name="address"):
for n, v in {"street_address": self.street_address, "city": self.city,
"country": self.country}.items():
if is_empty_or_none(v):
raise ValueError(" attribute cannot be empty or None." % n)
doc = Document()
root = doc.createElement(name)
self._create_text_node(root, "streetAddress", self.street_address, True)
self._create_text_node(root, "city", self.city, True)
self._create_text_node(root, "zipcode", self.zipcode)
self._create_text_node(root, "state", self.state, True)
self._create_text_node(root, "country", self.country)
return root | Returns a DOM Element containing the XML representation of the
address.
@return:Element |
def StartGdb(self):
if self.inferior.is_running:
self.inferior.ShutDownGdb()
program_arg = % self.inferior.pid
else:
program_arg =
os.system( + program_arg + .join(self.gdb_args))
reset_position = raw_input()
if not reset_position or reset_position == or reset_position == :
self.position = None | Hands control over to a new gdb process. |
def ArcSin(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().ArcSinVertex, label, cast_to_double_vertex(input_vertex)) | Takes the inverse sin of a vertex, Arcsin(vertex)
:param input_vertex: the vertex |
def kullback_leibler(p, q) :
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
if p.shape != q.shape :
raise ValueError("p and q must be of the same dimensions")
return np.sum(np.where(p > 0, np.log(p / q) * p, 0)) | Discrete Kullback-Leibler divergence D(P||Q) |
def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None, **args):
return _delete_resource(name, name_param=,
desc=, res_type=,
region=region, key=key, keyid=keyid, profile=profile, **args) | Delete a cache security group.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.delete_cache_security_group myelasticachesg |
def JCXZ(cpu, target):
cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.CX == 0, target.read(), cpu.PC) | Jumps short if CX register is 0.
:param cpu: current CPU.
:param target: destination operand. |
def get_as_dataframe(worksheet,
evaluate_formulas=False,
**options):
all_values = _get_all_values(worksheet, evaluate_formulas)
return TextParser(all_values, **options).read() | Returns the worksheet contents as a DataFrame.
:param worksheet: the worksheet.
:param evaluate_formulas: if True, get the value of a cell after
formula evaluation; otherwise get the formula itself if present.
Defaults to False.
:param \*\*options: all the options for pandas.io.parsers.TextParser,
according to the version of pandas that is installed.
(Note: TextParser supports only the default 'python' parser engine,
not the C engine.)
:returns: pandas.DataFrame |
def ratio(value, decimal_places=0, failure_string=):
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + | Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one. |
def limit_update(db, key, limits):
desired = [msgpack.dumps(l.dehydrate()) for l in limits]
desired_set = set(desired)
break | Safely updates the list of limits in the database.
:param db: The database handle.
:param key: The key the limits are stored under.
:param limits: A list or sequence of limit objects, each
understanding the dehydrate() method.
The limits list currently in the database will be atomically
changed to match the new list. This is done using the pipeline()
method. |
def get_name(self, name):
if name not in self.get_names():
raise exceptions.NameNotFoundError( % name)
return self.get_names()[name] | Return name `PyName` defined in this scope |
def send(self, stat, value, backend=None):
client = yield from self.client(backend)
if not client:
return False
client.send(stat, value)
client.disconnect() | Send stat to backend. |
def daffpa():
found = ctypes.c_int()
libspice.daffpa_c(ctypes.byref(found))
return bool(found.value) | Find the previous (backward) array in the current DAF.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/daffpa_c.html
:return: True if an array was found.
:rtype: bool |
def _write_hex_long(self, data, pos, value):
self._write_hex_byte(data, pos + 0, (value >> 56) & 0xff)
self._write_hex_byte(data, pos + 2, (value >> 48) & 0xff)
self._write_hex_byte(data, pos + 4, (value >> 40) & 0xff)
self._write_hex_byte(data, pos + 6, (value >> 32) & 0xff)
self._write_hex_byte(data, pos + 8, (value >> 24) & 0xff)
self._write_hex_byte(data, pos + 10, (value >> 16) & 0xff)
self._write_hex_byte(data, pos + 12, (value >> 8) & 0xff)
self._write_hex_byte(data, pos + 14, (value & 0xff)) | Writes an unsigned long value across a byte array.
:param data: the buffer to write the value to
:type data: bytearray
:param pos: the starting position
:type pos: int
:param value: the value to write
:type value: unsigned long |
def recv(self, filename, dest_file, timeout=None):
transport = DataFilesyncTransport(self.stream)
transport.write_data(, filename, timeout)
for data_msg in transport.read_until_done(, timeout):
dest_file.write(data_msg.data) | Retrieve a file from the device into the file-like dest_file. |
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built | Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted. |
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
return
try:
score = Score.create_reset_score(student_item)
if emit_signal:
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg) | Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores. |
def tasks_missing_predicate(
service_name,
old_task_ids,
task_predicate=None
):
try:
task_ids = get_service_task_ids(service_name, task_predicate)
except DCOSHTTPException:
print(.format(service_name))
task_ids = []
print(.format(
service_name, old_task_ids, task_ids))
for id in old_task_ids:
if id not in task_ids:
return True
return False | Returns whether any of old_task_ids are no longer present
:param service_name: the service name
:type service_name: str
:param old_task_ids: list of original task ids as returned by get_service_task_ids
:type old_task_ids: [str]
:param task_predicate: filter to use when searching for tasks
:type task_predicate: func
:return: True if any of old_task_ids are no longer present in the service
:rtype: bool |
def span(self, name=):
child_span = Span(name, parent_span=self)
self._child_spans.append(child_span)
return child_span | Create a child span for the current span and append it to the child
spans list.
:type name: str
:param name: (Optional) The name of the child span.
:rtype: :class: `~opencensus.trace.span.Span`
:returns: A child Span to be added to the current span. |
def _get_data(self, read_size):
if NIX:
return super(Mouse, self)._get_data(read_size)
return self._pipe.recv_bytes() | Get data from the character device. |
def SerializeFaultDetail(self, val, info):
self._SerializeDataObject(val, info, .format(val._wsdlName), self.defaultNS) | Serialize an object |
def update(self, repo_dir, **kwargs):
del kwargs
rev = self._args.get("revision")
if rev:
return [{"args": ["git", "checkout", rev], "cwd": repo_dir}] + _ff_command(
rev, repo_dir
)
return None | This function updates an existing checkout of source code. |
def enum_sigma_cubic(cutoff, r_axis):
sigmas = {}
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
odd_r = len(list(filter(lambda x: x % 2 == 1, r_axis)))
if odd_r == 3:
a_max = 4
elif odd_r == 0:
a_max = 1
else:
a_max = 2
n_max = int(np.sqrt(cutoff * a_max / sum(np.array(r_axis) ** 2)))
for n_loop in range(1, n_max + 1):
n = n_loop
m_max = int(np.sqrt(cutoff * a_max - n ** 2 * sum(np.array(r_axis) ** 2)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
if m == 0:
n = 1
else:
n = n_loop
quadruple = [m] + [x * n for x in r_axis]
odd_qua = len(list(filter(lambda x: x % 2 == 1, quadruple)))
if odd_qua == 4:
a = 4
elif odd_qua == 2:
a = 2
else:
a = 1
sigma = int(round((m ** 2 + n ** 2 * sum(np.array(r_axis) ** 2)) / a))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) \
/ np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
return sigmas | Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in cubic system.
The algorithm for this code is from reference, Acta Cryst, A40,108(1984)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angles of one grain respect to
the other grain.
When generate the microstructures of the grain boundary using these angles,
you need to analyze the symmetry of the structure. Different angles may
result in equivalent microstructures. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.