code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def time_limited(limit_seconds, iterable):
if limit_seconds < 0:
raise ValueError()
start_time = monotonic()
for item in iterable:
if monotonic() - start_time > limit_seconds:
break
yield item | Yield items from *iterable* until *limit_seconds* have passed.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = generator()
>>> list(time_limited(0.1, iterable))
[1, 2]
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything. |
def copy_channel(self, channel, owner, to_channel):
url = % (self.domain, owner, channel, to_channel)
res = self.session.post(url)
self._check_response(res, [201]) | Tag all files in channel <channel> also as channel <to_channel>
:param channel: channel to copy
:param owner: Perform this operation on all packages of this user
:param to_channel: Destination name (may be a channel that already exists) |
def _diagram_canvas_default(self):
canvas = Canvas()
for tool in self.tools:
canvas.tools.append(tool(canvas))
return canvas | Trait initialiser |
def gen_challenge(self, state):
state.checksig(self.key)
if (state.index >= state.n):
raise HeartbeatError("Out of challenges.")
state.seed = MerkleHelper.get_next_seed(self.key, state.seed)
chal = Challenge(state.seed, state.index)
state.index += 1
state.sign(self.key)
return chal | returns the next challenge and increments the seed and index
in the state.
:param state: the state to use for generating the challenge. will
verify the integrity of the state object before using it to generate
a challenge. it will then modify the state by incrementing the seed
and index and resign the state for passing back to the server for
storage |
def evaluaterforces(Pot,R,z,phi=None,t=0.,v=None):
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
dissipative= _isDissipative(Pot)
if dissipative and v is None:
raise PotentialError("The (list of) Potential instances includes dissipative, but you did not provide the 3D velocity (required for dissipative forces")
if isList:
sum= 0.
for pot in Pot:
if isinstance(pot,DissipativeForce):
sum+= pot.rforce(R,z,phi=phi,t=t,v=v,use_physical=False)
else:
sum+= pot.rforce(R,z,phi=phi,t=t,use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.rforce(R,z,phi=phi,t=t,use_physical=False)
elif isinstance(Pot,DissipativeForce):
return Pot.rforce(R,z,phi=phi,t=t,v=v,use_physical=False)
else:
raise PotentialError("Input to is neither a Potential-instance or a list of such instances") | NAME:
evaluaterforces
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
v - current velocity in cylindrical coordinates (optional, but required when including dissipative forces; can be a Quantity)
OUTPUT:
F_r(R,z,phi,t)
HISTORY:
2016-06-10 - Written - Bovy (UofT) |
async def seen(self, tick, source=None):
await self.set(, tick)
if source is not None:
seen = await self.snap.addNode(, (source, self.ndef))
await seen.set(, tick) | Update the .seen interval and optionally a source specific seen node. |
def isbn(self, fmt: Optional[ISBNFormat] = None,
locale: str = ) -> str:
fmt_value = self._validate_enum(item=fmt, enum=ISBNFormat)
mask = ISBN_MASKS[fmt_value].format(
ISBN_GROUPS[locale])
return self.random.custom_code(mask) | Generate ISBN for current locale.
To change ISBN format, pass parameter ``fmt`` with needed value of
the enum object :class:`~mimesis.enums.ISBNFormat`
:param fmt: ISBN format.
:param locale: Locale code.
:return: ISBN.
:raises NonEnumerableError: if fmt is not enum ISBNFormat. |
def create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs):
simple_shell_commandTest simple shell command serviceopereto_agent
if not agent:
agent = self.input.get()
if not mode:
mode=self.input.get() or
if not service_version:
service_version=self.input.get()
request_data = {: service, : agent, : mode, :service_version}
if title:
request_data[]=title
if self.input.get():
request_data[]=self.input.get()
request_data.update(**kwargs)
ret_data= self._call_rest_api(, , data=request_data, error=)
if not isinstance(ret_data, list):
raise OperetoClientError(str(ret_data))
pid = ret_data[0]
message = %(service, pid)
if agent:
message += %agent
else:
message +=
self.logger.info(message)
return str(pid) | create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs)
Registers a new process or processes
:Parameters:
* *service* (`string`) -- Service which process will be started
* *agent* (`string`) -- The service identifier (e.g shell_command)
* *title* (`string`) -- Title for the process
* *mode* (`string`) -- production/development
* *service_version* (`string`) -- Version of the service to execute
:Keywords args:
Json value map containing the process input properties
:return: process id
:Example:
.. code-block:: python
process_properties = {"my_input_param" : "1"}
pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service', agent=opereto_client.input['opereto_agent'], **process_properties) |
def _shutdown(self, manual):
if self._ssl is None:
return
while True:
result = libssl.SSL_shutdown(self._ssl)
continue
else:
break
elif error == LibsslConst.SSL_ERROR_WANT_WRITE:
self._raw_write()
continue
else:
handle_openssl_error(0, TLSError)
if manual:
self._local_closed = True
libssl.SSL_free(self._ssl)
self._ssl = None
self._rbio = None
self._wbio = None
try:
self._socket.shutdown(socket_.SHUT_RDWR)
except (socket_.error):
pass | Shuts down the TLS session and then shuts down the underlying socket
:param manual:
A boolean if the connection was manually shutdown |
def get_providing_power_source_type(self):
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
return POWER_TYPE_MAP[power_status.ACLineStatus] | Returns GetSystemPowerStatus().ACLineStatus
@raise: WindowsError if any underlying error occures. |
async def start_all_linking(self, linkcode, group, address=None):
_LOGGING.info()
if address:
linkdevice = self.plm.devices[Address(address).id]
if not linkdevice:
linkdevice = create(self.plm, address, None, None)
_LOGGING.info(,
address)
self.plm.start_all_linking(linkcode, group)
asyncio.sleep(.5, loop=self.loop)
linkdevice.enter_linking_mode(group=group)
else:
_LOGGING.info(
)
self.plm.start_all_linking(linkcode, group)
await asyncio.sleep(self.wait_time, loop=self.loop)
_LOGGING.info(,
len(self.plm.devices))
await asyncio.sleep(.1, loop=self.loop) | Start the All-Linking process with the IM and device. |
def get_messages(self, domain):
if domain not in self.domains:
raise ValueError(.format(domain))
if domain not in self.messages or not in self.messages[domain]:
self._process_domain(domain)
return self.messages[domain][] | Returns all valid messages after operation.
@type domain: str
@rtype: dict |
async def evaluate(self):
@database_sync_to_async
def remove_subscribers():
models.Observer.subscribers.through.objects.filter(
observer_id=self.id
).delete()
@database_sync_to_async
def get_subscriber_sessions():
return list(
models.Observer.subscribers.through.objects.filter(observer_id=self.id)
.distinct()
.values_list(, flat=True)
)
try:
settings = get_queryobserver_settings()
start = time.time()
added, changed, removed = await database_sync_to_async(self._evaluate)()
duration = time.time() - start
if duration > settings[][]:
self._warning("Slow observed viewset", duration=duration)
if duration > settings[][]:
logger.error(
"Removing subscribers to extremely slow observed viewset ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(duration=duration),
)
await remove_subscribers()
if self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
await get_channel_layer().send(
CHANNEL_MAIN,
{
: TYPE_POLL,
: self.id,
: self._meta.poll_interval,
},
)
message = {
: TYPE_ITEM_UPDATE,
: self.id,
: self._meta.primary_key,
: added,
: changed,
: removed,
}
if added or changed or removed:
for session_id in await get_subscriber_sessions():
await get_channel_layer().group_send(
GROUP_SESSIONS.format(session_id=session_id), message
)
except Exception:
logger.exception(
"Error while evaluating observer ({})".format(self._get_logging_id()),
extra=self._get_logging_extra(),
) | Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only) |
def use_plenary_asset_composition_view(self):
self._object_views[] = PLENARY
for session in self._get_provider_sessions():
try:
session.use_plenary_asset_composition_view()
except AttributeError:
pass | Pass through to provider AssetCompositionSession.use_plenary_asset_composition_view |
def check_webhook_secret(app_configs=None, **kwargs):
from . import settings as djstripe_settings
messages = []
secret = djstripe_settings.WEBHOOK_SECRET
if secret and not secret.startswith("whsec_"):
messages.append(
checks.Warning(
"DJSTRIPE_WEBHOOK_SECRET does not look valid",
hint="It should start with whsec_...",
id="djstripe.W003",
)
)
return messages | Check that DJSTRIPE_WEBHOOK_SECRET looks correct |
async def _create_proxy_connection(self, req, *args, **kwargs):
if req.proxy.scheme == :
return await super()._create_proxy_connection(req, *args, **kwargs)
else:
return await self._create_socks_connection(req=req) | args, kwargs can contain different elements (traces, timeout,...)
depending on aiohttp version |
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i | Cheap function to invert a hash. |
def slaves(self):
with self._mutex:
if not self._slaves:
self._slaves = [c for c in self.children if c.is_manager]
return self._slaves | The list of slave managers of this manager, if any.
This information can also be found by listing the children of this node
that are of type @ref Manager. |
def map_gate(gate: Gate, args: Sequence[Qubits]) -> Circuit:
circ = Circuit()
for qubits in args:
circ += gate.relabel(qubits)
return circ | Applies the same gate all input qubits in the argument list.
>>> circ = qf.map_gate(qf.H(), [[0], [1], [2]])
>>> print(circ)
H(0)
H(1)
H(2) |
def gapfill(model, universal=None, lower_bound=0.05,
penalties=None, demand_reactions=True, exchange_reactions=False,
iterations=1):
gapfiller = GapFiller(model, universal=universal,
lower_bound=lower_bound, penalties=penalties,
demand_reactions=demand_reactions,
exchange_reactions=exchange_reactions)
return gapfiller.fill(iterations=iterations) | Perform gapfilling on a model.
See documentation for the class GapFiller.
Parameters
----------
model : cobra.Model
The model to perform gap filling on.
universal : cobra.Model, None
A universal model with reactions that can be used to complete the
model. Only gapfill considering demand and exchange reactions if
left missing.
lower_bound : float
The minimally accepted flux for the objective in the filled model.
penalties : dict, None
A dictionary with keys being 'universal' (all reactions included in
the universal model), 'exchange' and 'demand' (all additionally
added exchange and demand reactions) for the three reaction types.
Can also have reaction identifiers for reaction specific costs.
Defaults are 1, 100 and 1 respectively.
iterations : int
The number of rounds of gapfilling to perform. For every iteration,
the penalty for every used reaction increases linearly. This way,
the algorithm is encouraged to search for alternative solutions
which may include previously used reactions. I.e., with enough
iterations pathways including 10 steps will eventually be reported
even if the shortest pathway is a single reaction.
exchange_reactions : bool
Consider adding exchange (uptake) reactions for all metabolites
in the model.
demand_reactions : bool
Consider adding demand reactions for all metabolites.
Returns
-------
iterable
list of lists with on set of reactions that completes the model per
requested iteration.
Examples
--------
>>> import cobra.test as ct
>>> from cobra import Model
>>> from cobra.flux_analysis import gapfill
>>> model = ct.create_test_model("salmonella")
>>> universal = Model('universal')
>>> universal.add_reactions(model.reactions.GF6PTA.copy())
>>> model.remove_reactions([model.reactions.GF6PTA])
>>> gapfill(model, universal) |
def link(self, source, target):
return self.operations(, target.decode(self.encoding),
source.decode(self.encoding)) | creates a hard link `target -> source` (e.g. ln source target) |
def run_edisgo_pool(ding0_file_list, run_args_opt,
workers=mp.cpu_count(), worker_lifetime=1):
def collect_pool_results(result):
results.append(result)
results = []
pool = mp.Pool(workers,
maxtasksperchild=worker_lifetime)
for file in ding0_file_list:
edisgo_args = [file] + run_args_opt
pool.apply_async(func=run_edisgo_twice,
args=(edisgo_args,),
callback=collect_pool_results)
pool.close()
pool.join()
all_costs_before_geno_import = [r[0] for r in results]
all_grid_issues_before_geno_import = [r[1] for r in results]
all_costs = [r[2] for r in results]
all_grid_issues = [r[3] for r in results]
return all_costs_before_geno_import, all_grid_issues_before_geno_import, \
all_costs, all_grid_issues | Use python multiprocessing toolbox for parallelization
Several grids are analyzed in parallel.
Parameters
----------
ding0_file_list : list
Ding0 grid data file names
run_args_opt : list
eDisGo options, see :func:`run_edisgo_basic` and
:func:`run_edisgo_twice`
workers: int
Number of parallel process
worker_lifetime : int
Bunch of grids sequentially analyzed by a worker
Returns
-------
all_costs_before_geno_import : list
Grid extension cost before grid connection of new generators
all_grid_issues_before_geno_import : list
Remaining overloading or over-voltage issues in grid
all_costs : list
Grid extension cost due to grid connection of new generators
all_grid_issues : list
Remaining overloading or over-voltage issues in grid |
def postinit(self, expr=None, globals=None, locals=None):
self.expr = expr
self.globals = globals
self.locals = locals | Do some setup after initialisation.
:param expr: The expression to be executed.
:type expr: NodeNG or None
:param globals:The globals dictionary to execute with.
:type globals: NodeNG or None
:param locals: The locals dictionary to execute with.
:type locals: NodeNG or None |
def push_tx(self, crypto, tx_hex):
url = "%s/pushtx" % self.base_url
return self.post_url(url, {: tx_hex}).content | This method is untested. |
def calculate_input(self, buffer):
if TriggerMode.ABBREVIATION in self.modes:
if self._should_trigger_abbreviation(buffer):
if self.immediate:
return len(self._get_trigger_abbreviation(buffer))
else:
return len(self._get_trigger_abbreviation(buffer)) + 1
if TriggerMode.HOTKEY in self.modes:
if buffer == :
return len(self.modifiers) + 1
return self.parent.calculate_input(buffer) | Calculate how many keystrokes were used in triggering this phrase. |
def quota(ip=None):
url =
data = urlopen(url)
credit = int(data.read().strip())
if data.code == 200:
return credit
else:
return "ERROR: Server responded with code %s" % data.code | Check your quota. |
def get_long_description():
root_path = get_root_path()
readme_path = os.path.join(root_path, "README.md")
try:
import pypandoc
return pypandoc.convert(readme_path, "rst").strip()
except ImportError:
return "Cloudsmith CLI" | Grok the readme, turn it into whine (rst). |
def HsvToRgb(h, s, v):
if s==0: return (v, v, v)
h /= 60.0
h = h % 6.0
i = int(h)
f = h - i
if not(i&1): f = 1-f
m = v * (1.0 - s)
n = v * (1.0 - (s * f))
if i==0: return (v, n, m)
if i==1: return (n, v, m)
if i==2: return (m, v, n)
if i==3: return (m, n, v)
if i==4: return (n, m, v)
return (v, m, n) | Convert the color from RGB coordinates to HSV.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.HslToRgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0) |
def _setup_stats(self):
self.stats_dict = {}
redis_conn = redis.Redis(host=self.settings[],
port=self.settings[],
db=self.settings.get())
try:
redis_conn.info()
self.logger.debug("Connected to Redis in StatsCollector Setup")
self.redis_conn = redis_conn
except ConnectionError:
self.logger.warn("Failed to connect to Redis in StatsCollector"
" Setup, no stats will be collected")
return
if self.settings[]:
self._setup_stats_total(redis_conn)
if self.settings[]:
self._setup_stats_plugins(redis_conn) | Sets up the stats collection |
def check_namespace(namespace_id):
if type(namespace_id) not in [str, unicode]:
return False
if not is_namespace_valid(namespace_id):
return False
return True | Verify that a namespace ID is well-formed
>>> check_namespace(123)
False
>>> check_namespace(None)
False
>>> check_namespace('')
False
>>> check_namespace('abcd')
True
>>> check_namespace('Abcd')
False
>>> check_namespace('a+bcd')
False
>>> check_namespace('.abcd')
False
>>> check_namespace('abcdabcdabcdabcdabcd')
False
>>> check_namespace('abcdabcdabcdabcdabc')
True |
def builds(self):
api_version = self._get_api_version()
if api_version == :
from .v2018_02_01_preview.operations import BuildsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2018-02-01-preview: :class:`BuildsOperations<azure.mgmt.containerregistry.v2018_02_01_preview.operations.BuildsOperations>` |
def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2):
if mapping is not None and not hasattr(mapping, ) and \
not callable(mapping):
raise TypeError("mapping from old to new argument values "
"must be dict or callable!")
def _deprecate_kwarg(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_arg_value = kwargs.pop(old_arg_name, None)
if new_arg_name is None and old_arg_value is not None:
msg = (
"the keyword is deprecated and will be "
"removed in a future version. "
"Please take steps to stop the use of "
).format(old_name=old_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
kwargs[old_arg_name] = old_arg_value
return func(*args, **kwargs)
if old_arg_value is not None:
if mapping is not None:
if hasattr(mapping, ):
new_arg_value = mapping.get(old_arg_value,
old_arg_value)
else:
new_arg_value = mapping(old_arg_value)
msg = ("the {old_name}={old_val!r} keyword is deprecated, "
"use {new_name}={new_val!r} instead"
).format(old_name=old_arg_name,
old_val=old_arg_value,
new_name=new_arg_name,
new_val=new_arg_value)
else:
new_arg_value = old_arg_value
msg = ("the keyword is deprecated, "
"use instead"
).format(old_name=old_arg_name,
new_name=new_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name, None) is not None:
msg = ("Can only specify or , "
"not both").format(old_name=old_arg_name,
new_name=new_arg_name)
raise TypeError(msg)
else:
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return wrapper
return _deprecate_kwarg | Decorator to deprecate a keyword argument of a function.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str or None
Name of preferred argument in function. Use None to raise warning that
``old_arg_name`` keyword is deprecated.
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
To raise a warning that a keyword will be removed entirely in the future
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None)
... def f(cols='', another_param=''):
... print(cols)
...
>>> f(cols='should raise warning')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
>>> f(another_param='should not raise warning')
should not raise warning
>>> f(cols='should raise warning', another_param='')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning |
def set_power_state(self, desired_state):
rn_array = [self.helper.service_profile,
ManagedObject(NamingId.LS_POWER).MakeRn()]
try:
ls_power = ucs_helper.get_managed_object(self.helper.handle,
LsPower.ClassId(),
{LsPower.DN: UcsUtils.MakeDn(rn_array)})
if not ls_power:
raise exception.UcsOperationError("set_power_state",
"Failed to get power MO,"
" configure valid service-profile.")
else:
ls_power_set = self.helper.handle.SetManagedObject(
ls_power,
LsPower.ClassId(),
{LsPower.STATE: desired_state},
dumpXml=YesOrNo.TRUE
)
if ls_power_set:
power = ls_power_set.pop()
return power.getattr(LsPower.STATE)
else:
return states.ERROR
except Exception as ex:
raise exception.UcsOperationError("set_power_state",
"Failed to get power MO,"
"configure valid servie-profile.") | Set power state of this node
:param node: Ironic node one of :class:`ironic.db.models.Node`
:raises: InvalidParameterValue if required seamicro parameters are
missing.
:raises: UcsOperationError on an error from UcsHandle Client.
:returns: Power state of the given node |
def logs_update(self):
Gdk.threads_enter()
if not self.debugging:
self.debugging = True
self.debug_btn.set_label()
else:
self.debugging = False
self.debug_btn.set_label()
for record in self.debug_logs[]:
if self.debugging:
if getattr(record, , ) != "cmd_retcode":
self.store.append([format_entry(record, show_level=True, colorize=True)])
else:
if int(record.levelno) > 10:
self.store.append([format_entry(record, colorize=True)])
Gdk.threads_leave() | Function updates logs. |
def check_positive(value, strict=False):
if not strict and value < 0:
raise ValueError("Value must be positive or zero, not %s" % str(value))
if strict and value <= 0:
raise ValueError("Value must be positive, not %s" % str(value)) | Checks if variable is positive
@param value: value to check
@type value: C{integer types}, C{float} or C{Decimal}
@return: None when check successful
@raise ValueError: check failed |
def dag_state(args):
dag = get_dag(args)
dr = DagRun.find(dag.dag_id, execution_date=args.execution_date)
print(dr[0].state if len(dr) > 0 else None) | Returns the state of a DagRun at the command line.
>>> airflow dag_state tutorial 2015-01-01T00:00:00.000000
running |
def calculate(self, T, P, zs, ws, method):
r
if method == SIMPLE:
sigmas = [i(T) for i in self.SurfaceTensions]
return mixing_simple(zs, sigmas)
elif method == DIGUILIOTEJA:
return Diguilio_Teja(T=T, xs=zs, sigmas_Tb=self.sigmas_Tb,
Tbs=self.Tbs, Tcs=self.Tcs)
elif method == WINTERFELDSCRIVENDAVIS:
sigmas = [i(T) for i in self.SurfaceTensions]
rhoms = [1./i(T, P) for i in self.VolumeLiquids]
return Winterfeld_Scriven_Davis(zs, sigmas, rhoms)
else:
raise Exception() | r'''Method to calculate surface tension of a liquid mixture at
temperature `T`, pressure `P`, mole fractions `zs` and weight fractions
`ws` with a given method.
This method has no exception handling; see `mixture_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
P : float
Pressure at which to calculate the property, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Name of the method to use
Returns
-------
sigma : float
Surface tension of the liquid at given conditions, [N/m] |
def _update_dprx(self):
if in self.freeparams:
for r in range(self.nsites):
self.dprx[][r] = self.prx[r] * (self.ln_pi_codon[r]
- scipy.dot(self.ln_pi_codon[r], self.prx[r]))
if in self.freeparams:
boolterm = scipy.ndarray(N_CODON, dtype=)
with scipy.errstate(divide=, under=, over=,
invalid=):
for i in range(N_NT - 1):
boolterm.fill(0)
for j in range(3):
boolterm += ((i <= CODON_NT_INDEX[j]).astype() /
(self.eta[i] - (i == CODON_NT_INDEX[j]).astype(
)))
for r in range(self.nsites):
self.dprx[][i][r] = self.prx[r] * (boolterm -
scipy.dot(boolterm, self.prx[r]) / self.prx[r].sum()) | Update `dprx`. |
def __select (self, iwtd, owtd, ewtd, timeout=None):
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select (iwtd, owtd, ewtd, timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([],[],[])
else:
raise | This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). |
def dimension(self, name, copy=True):
try:
return create_dimension(name, self._dims[name]) if copy else self._dims[name]
except KeyError:
raise KeyError("Dimension is not registered "
"on this cube".format(n=name)), None, sys.exc_info()[2] | Returns the requested :class:`~hypercube.dims.Dimension` object
Parameters
----------
name : str
Name of the :class:`~hypercube.dims.Dimension` object
copy : boolean
Returns a copy of the :class:`~hypercube.dims.Dimension` object if True (Default value = True)
Returns
-------
:class:`~hypercube.dims.Dimension`
A :class:`~hypercube.dims.Dimension` object. |
def cause_info(self, mechanism, purview):
return repertoire_distance(
Direction.CAUSE,
self.cause_repertoire(mechanism, purview),
self.unconstrained_cause_repertoire(purview)
) | Return the cause information for a mechanism over a purview. |
def calc_toa_gain_offset(meta):
sat_index = meta[].upper() + "_" + meta[].upper()
acf = np.asarray(meta[])
ebw = np.asarray(meta[])
gain = np.asarray(constants.DG_ABSCAL_GAIN[sat_index])
scale = (acf / ebw) * gain
offset = np.asarray(constants.DG_ABSCAL_OFFSET[sat_index])
e_sun_index = meta[].upper() + "_" + meta[].upper()
e_sun = np.asarray(constants.DG_ESUN[e_sun_index])
sun = ephem.Sun()
img_obs = ephem.Observer()
img_obs.lon = meta[][1]
img_obs.lat = meta[][0]
img_obs.elevation = meta[][2]
img_obs.date = datetime.datetime.fromtimestamp(meta[][] / 1000.0).strftime(
)
sun.compute(img_obs)
d_es = sun.earth_distance
theta_s = 90 - float(meta[])
scale2 = (d_es ** 2 * np.pi) / (e_sun * np.cos(np.deg2rad(theta_s)))
return zip(scale, scale2, offset) | Compute (gain, offset) tuples for each band of the specified image metadata |
def create_system(self, new_machine_id=False):
client_hostname = determine_hostname()
machine_id = generate_machine_id(new_machine_id)
branch_info = self.branch_info
if not branch_info:
return False
remote_branch = branch_info[]
remote_leaf = branch_info[]
data = {: machine_id,
: remote_branch,
: remote_leaf,
: client_hostname}
if self.config.display_name is not None:
data[] = self.config.display_name
data = json.dumps(data)
post_system_url = self.api_url +
logger.debug("POST System: %s", post_system_url)
logger.debug(data)
net_logger.info("POST %s", post_system_url)
return self.session.post(post_system_url,
headers={: },
data=data) | Create the machine via the API |
def officers(self, num, **kwargs):
baseuri = self._BASE_URI + "company/{}/officers".format(num)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | Search for a company's registered officers by company number.
Args:
num (str): Company number to search on.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword. |
def line(x_fn, y_fn, *, options={}, **interact_params):
fig = options.get(, False) or _create_fig(options=options)
[line] = (_create_marks(fig=fig, marks=[bq.Lines], options=options))
_add_marks(fig, [line])
def wrapped(**interact_params):
x_data = util.maybe_call(x_fn, interact_params, prefix=)
line.x = x_data
y_bound = util.maybe_curry(y_fn, x_data)
line.y = util.maybe_call(y_bound, interact_params, prefix=)
controls = widgets.interactive(wrapped, **interact_params)
return widgets.VBox([controls, fig]) | Generates an interactive line chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for x-coordinates.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the x-coordinates
of the line plot.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for y-coordinates.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the y-coordinates of the line
plot.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> line([1, 2, 3], [4, 7, 10])
VBox(...)
>>> def x_values(max): return np.arange(0, max)
>>> def y_values(xs, sd):
... return xs + np.random.normal(len(xs), scale=sd)
>>> line(x_values, y_values, max=(10, 50), sd=(1, 10))
VBox(...) |
def serialize(self, serializable: Optional[Union[SerializableType, List[SerializableType]]]) \
-> PrimitiveJsonType:
if serializable is None:
return None
elif isinstance(serializable, List):
return [self.serialize(item) for item in serializable]
else:
serialized = self._create_serialized_container()
for mapping in self._property_mappings:
if mapping.object_property_getter is not None and mapping.serialized_property_setter is not None:
value = mapping.object_property_getter(serializable)
if not (mapping.optional and value is None):
if isinstance(value, type(mapping.collection_factory([]))):
value = list(mapping.collection_iter(value))
encoded_value = self._serialize_property_value(value, mapping.serializer_cls)
mapping.serialized_property_setter(serialized, encoded_value)
return serialized | Serializes the given serializable object or collection of serializable objects.
:param serializable: the object or objects to serialize
:return: a serialization of the given object |
def update(self):
current_time = int(time.time())
last_refresh = 0 if self._last_refresh is None else self._last_refresh
if current_time >= (last_refresh + self._refresh_rate):
self.get_cameras_properties()
self.get_ambient_sensor_data()
self.get_camera_extended_properties()
self._attrs = self._session.refresh_attributes(self.name)
self._attrs = assert_is_dict(self._attrs)
_LOGGER.debug("Called base station update of camera properties: "
"Scan Interval: %s, New Properties: %s",
self._refresh_rate, self.camera_properties) | Update object properties. |
def transformer_base_vq_ada_32ex_packed():
hparams = transformer_base_v2()
expert_utils.update_hparams_for_vq_gating(hparams)
hparams.moe_num_experts = 32
hparams.gating_type = "vq"
hparams.batch_size = 5072
hparams.ffn_layer = "local_moe"
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_warmup_steps = 10000
hparams.learning_rate_decay_steps = 27200
hparams.num_heads = 4
hparams.num_blocks = 1
hparams.moe_k = 1
hparams.num_decoder_layers = 6
hparams.label_smoothing = 0.
hparams.layer_prepostprocess_dropout = 0.1
hparams.layer_postprocess_sequence = "dan"
hparams.layer_preprocess_sequence = "none"
hparams.weight_decay = 1e-06
hparams.attention_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay"
hparams.activation_dtype = "float32"
hparams.learning_rate = 0.1
hparams.learning_rate_constant = 1.0
return hparams | Set of hyperparameters for lm1b packed following tpu params. |
def _append_national_number(self, national_number):
prefix_before_nn_len = len(self._prefix_before_national_number)
if (self._should_add_space_after_national_prefix and prefix_before_nn_len > 0 and
self._prefix_before_national_number[-1] != _SEPARATOR_BEFORE_NATIONAL_NUMBER):
return self._prefix_before_national_number + _SEPARATOR_BEFORE_NATIONAL_NUMBER + national_number
else:
return self._prefix_before_national_number + national_number | Combines the national number with any prefix (IDD/+ and country
code or national prefix) that was collected. A space will be inserted
between them if the current formatting template indicates this to be
suitable. |
def Verify(self, mempool):
if not super(ClaimTransaction, self).Verify(mempool):
return False
otherclaimTxs = [tx for tx in mempool if tx is ClaimTransaction and tx is not self]
for other in otherclaimTxs:
return False | Verify the transaction.
Args:
mempool:
Returns:
bool: True if verified. False otherwise. |
def reintegrate(self, fullPointList):
fullPointList = _deletePoints(fullPointList, self.minT, self.maxT)
fullPointList = self.deleteOverlapping(fullPointList)
outputPointList = fullPointList + self.pointList
outputPointList.sort()
return outputPointList | Integrates the pitch values of the accent into a larger pitch contour |
def pitch(times, frequencies, midi=False, unvoiced=False, ax=None, **kwargs):
ax, _ = __get_axes(ax=ax)
times = np.asarray(times)
frequencies, voicings = freq_to_voicing(np.asarray(frequencies,
dtype=np.float))
v_changes = 1 + np.flatnonzero(voicings[1:] != voicings[:-1])
v_changes = np.unique(np.concatenate([[0], v_changes, [len(voicings)]]))
v_slices, u_slices = [], []
for start, end in zip(v_changes, v_changes[1:]):
idx = slice(start, end)
for idx in u_slices:
ax.plot(times[idx], frequencies[idx], **style)
return ax | Visualize pitch contours
Parameters
----------
times : np.ndarray, shape=(n,)
Sample times of frequencies
frequencies : np.ndarray, shape=(n,)
frequencies (in Hz) of the pitch contours.
Voicing is indicated by sign (positive for voiced,
non-positive for non-voiced).
midi : bool
If `True`, plot on a MIDI-numbered vertical axis.
Otherwise, plot on a linear frequency axis.
unvoiced : bool
If `True`, unvoiced pitch contours are plotted and indicated
by transparency.
Otherwise, unvoiced pitch contours are omitted from the display.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the pitch contours.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to `matplotlib.pyplot.plot`.
Returns
-------
ax : matplotlib.pyplot.axes._subplots.AxesSubplot
A handle to the (possibly constructed) plot axes |
def on_doctree_read(app, document):
literal_blocks = uqbar.book.sphinx.collect_literal_blocks(document)
cache_mapping = uqbar.book.sphinx.group_literal_blocks_by_cache_path(literal_blocks)
node_mapping = {}
use_cache = bool(app.config["uqbar_book_use_cache"])
for cache_path, literal_block_groups in cache_mapping.items():
kwargs = dict(
extensions=app.uqbar_book_extensions,
setup_lines=app.config["uqbar_book_console_setup"],
teardown_lines=app.config["uqbar_book_console_teardown"],
use_black=bool(app.config["uqbar_book_use_black"]),
)
for literal_blocks in literal_block_groups:
try:
if use_cache:
local_node_mapping = uqbar.book.sphinx.interpret_code_blocks_with_cache(
literal_blocks, cache_path, app.connection, **kwargs
)
else:
local_node_mapping = uqbar.book.sphinx.interpret_code_blocks(
literal_blocks, **kwargs
)
node_mapping.update(local_node_mapping)
except ConsoleError as exception:
message = exception.args[0].splitlines()[-1]
logger.warning(message, location=exception.args[1])
if app.config["uqbar_book_strict"]:
raise
uqbar.book.sphinx.rebuild_document(document, node_mapping) | Hooks into Sphinx's ``doctree-read`` event. |
def list_storage_accounts_sub(access_token, subscription_id):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
,
, STORAGE_API])
return do_get(endpoint, access_token) | List the storage accounts in the specified subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body list of storage accounts. |
def PilToRgb(pil):
(%g, %g, %g)(1, 0.501961, 0)
r = 0xff & pil
g = 0xff & (pil >> 8)
b = 0xff & (pil >> 16)
return tuple((v / 255.0 for v in (r, g, b))) | Convert the color from a PIL-compatible integer to RGB.
Parameters:
pil: a PIL compatible color representation (0xBBGGRR)
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r: [0...1]
g: [0...1]
b: [0...1]
>>> '(%g, %g, %g)' % Color.PilToRgb(0x0080ff)
'(1, 0.501961, 0)' |
def build(self, builder):
params = dict(ODMVersion="1.3",
FileType=self.filetype,
CreationDateTime=self.creationdatetime,
Originator=self.originator,
FileOID=self.fileoid,
xmlns="http://www.cdisc.org/ns/odm/v1.3",
)
if self.granularity_type:
params[] = self.granularity_type.value
if self.source_system:
params[] = self.source_system
if self.source_system_version:
params[] = self.source_system_version
params[] = "http://www.mdsol.com/ns/odm/metadata"
if self.description:
params[] = self.description
builder.start("ODM", params)
if self.study is not None:
self.study.build(builder)
if self.clinical_data:
for clinical_data in self.clinical_data:
clinical_data.build(builder)
if self.admindata is not None:
self.admindata.build(builder)
builder.end("ODM")
return builder.close() | Build XML object, return the root, this is a copy for consistency and testing |
def checkOptions(options, parser):
if options.jobStore == None:
parser.error("Specify --jobStore")
defaultCategories = ["time", "clock", "wait", "memory"]
if options.categories is None:
options.categories = defaultCategories
else:
options.categories = [x.lower() for x in options.categories.split(",")]
for c in options.categories:
if c not in defaultCategories:
parser.error("Unknown category %s. Must be from %s"
% (c, str(defaultCategories)))
extraSort = ["count", "alpha"]
if options.sortCategory is not None:
if (options.sortCategory not in defaultCategories and
options.sortCategory not in extraSort):
parser.error("Unknown --sortCategory %s. Must be from %s"
% (options.sortCategory,
str(defaultCategories + extraSort)))
sortFields = ["min", "med", "ave", "max", "total"]
if options.sortField is not None:
if (options.sortField not in sortFields):
parser.error("Unknown --sortField %s. Must be from %s"
% (options.sortField, str(sortFields))) | Check options, throw parser.error() if something goes wrong |
def run_program(self, command, working_directory=os.getcwd(),
environment=None, cleanup_files=True,
native_spec="-l cputype=intel"):
try:
s = drmaa.Session()
s.initialize()
jt = s.createJobTemplate()
jt.remoteCommand = os.path.dirname(
os.path.abspath(__file__)) +
jt.args = [command]
if environment is not None:
jt.jobEnvironment = environment
jt.workingDirectory = working_directory
jt.nativeSpecification = native_spec
output_filename = os.path.join(working_directory, )
jt.outputPath = + output_filename
jt.joinFiles = True
jobid = s.runJob(jt)
s.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)
with open(output_filename, ) as output:
stdout = output.read()
if cleanup_files:
os.remove(output_filename)
finally:
try:
s.control(drmaa.JOB_IDS_SESSION_ALL,
drmaa.JobControlAction.TERMINATE)
s.synchronize([drmaa.JOB_IDS_SESSION_ALL], dispose=True)
s.exit()
except(drmaa.errors.NoActiveSessionException):
pass
return stdout | Run a program through the grid, capturing the standard output. |
def area(self):
r
if self._dimension != 2:
raise NotImplementedError(
"2D is the only supported dimension",
"Current dimension",
self._dimension,
)
edge1, edge2, edge3 = self._get_edges()
return _surface_helpers.compute_area(
(edge1._nodes, edge2._nodes, edge3._nodes)
) | r"""The area of the current surface.
For surfaces in :math:`\mathbf{R}^2`, this computes the area via
Green's theorem. Using the vector field :math:`\mathbf{F} =
\left[-y, x\right]^T`, since :math:`\partial_x(x) - \partial_y(-y) = 2`
Green's theorem says twice the area is equal to
.. math::
\int_{B\left(\mathcal{U}\right)} 2 \, d\mathbf{x} =
\int_{\partial B\left(\mathcal{U}\right)} -y \, dx + x \, dy.
This relies on the assumption that the current surface is valid, which
implies that the image of the unit triangle under the B |eacute| zier
map --- :math:`B\left(\mathcal{U}\right)` --- has the edges of the
surface as its boundary.
Note that for a given edge :math:`C(r)` with control points
:math:`x_j, y_j`, the integral can be simplified:
.. math::
\int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr
= \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d}
b'_{j, d} \, dr
where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials.
Returns:
float: The area of the current surface.
Raises:
NotImplementedError: If the current surface isn't in
:math:`\mathbf{R}^2`. |
def extract_header(msg_or_header):
if not msg_or_header:
return {}
try:
h = msg_or_header[]
except KeyError:
try:
h = msg_or_header[]
except KeyError:
raise
else:
h = msg_or_header
if not isinstance(h, dict):
h = dict(h)
return h | Given a message or header, return the header. |
def readinto(self, buf, *, start=0, end=None):
if end is None:
end = len(buf)
for i in range(start, end):
buf[i] = self._readbyte() | Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include |
def parcor_stable(filt):
try:
return all(abs(k) < 1 for k in parcor(ZFilter(filt.denpoly)))
except ParCorError:
return False | Tests whether the given filter is stable or not by using the partial
correlation coefficients (reflection coefficients) of the given filter.
Parameters
----------
filt :
A LTI filter as a LinearFilter object.
Returns
-------
A boolean that is true only when all correlation coefficients are inside the
unit circle. Critical stability (i.e., when outer coefficient has magnitude
equals to one) is seem as an instability, and returns False.
See Also
--------
parcor :
Partial correlation coefficients generator.
lsf_stable :
Tests filter stability with Line Spectral Frequencies (LSF) values. |
def _get_converter_module(sk_obj):
try:
cv_idx = _converter_lookup[sk_obj.__class__]
except KeyError:
raise ValueError(
"Transformer not supported; supported transformers are %s."
% (repr(sk_obj),
",".join(k.__name__ for k in _converter_module_list)))
return _converter_module_list[cv_idx] | Returns the module holding the conversion functions for a
particular model). |
def set_intersection(self, division, intersection):
IntersectRelationship.objects.filter(
from_division=self, to_division=division
).update(intersection=intersection) | Set intersection percentage of intersecting divisions. |
def import_attr(path):
if isinstance(path, six.binary_type):
path = path.decode("utf-8")
if not isinstance(path, six.text_type):
return path
if u"." not in path:
ValueError("%r should be of the form `module.attr` and we just got `attr`" % path)
module, attr = path.rsplit(u, 1)
try:
return getattr(import_module(module), attr)
except ImportError:
raise ImportError("Module %r not found" % module)
except AttributeError:
raise AttributeError("Module %r has not attribut %r" % (module, attr)) | transform a python dotted path to the attr
:param path: A dotted path to a python object or a python object
:type path: :obj:`unicode` or :obj:`str` or anything
:return: The python object pointed by the dotted path or the python object unchanged |
def mapping_get(index, doc_type, hosts=None, profile=None):
es = _get_instance(hosts, profile)
try:
return es.indices.get_mapping(index=index, doc_type=doc_type)
except elasticsearch.exceptions.NotFoundError:
return None
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot retrieve mapping {0}, server returned code {1} with message {2}".format(index, e.status_code, e.error)) | Retrieve mapping definition of index or index/type
index
Index for the mapping
doc_type
Name of the document type
CLI example::
salt myminion elasticsearch.mapping_get testindex user |
def update_exc(exc, msg, before=True, separator="\n"):
emsg = exc.message
if before:
parts = (msg, separator, emsg)
else:
parts = (emsg, separator, msg)
new_msg = "%s%s%s" % parts
new_args = (new_msg, ) + exc.args[1:]
exc.message = new_msg
exc.args = new_args
return exc | Adds additional text to an exception's error message.
The new text will be added before the existing text by default; to append
it after the original text, pass False to the `before` parameter.
By default the old and new text will be separated by a newline. If you wish
to use a different separator, pass that as the `separator` parameter. |
def _with_inline(func, admin_site, metadata_class, inline_class):
def register(model_or_iterable, admin_class=None, **options):
func(model_or_iterable, admin_class, **options)
_monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site)
return register | Decorator for register function that adds an appropriate inline. |
def expand_alias(self, line):
pre,_,fn,rest = split_user_input(line)
res = pre + self.expand_aliases(fn, rest)
return res | Expand an alias in the command line
Returns the provided command line, possibly with the first word
(command) translated according to alias expansion rules.
[ipython]|16> _ip.expand_aliases("np myfile.txt")
<16> 'q:/opt/np/notepad++.exe myfile.txt' |
def write(self, label, index):
if label in self.cache:
if self.cache[label] != index:
error_message = .format(label, index, self.cache)
raise RuntimeError(error_message)
else:
self.cache[label] = index | Saves a new label, index mapping to the cache.
Raises a RuntimeError on a conflict. |
def send_signal(self, s):
self._get_signal_event(s)
pid = self.get_pid()
if not pid:
raise ValueError()
os.kill(pid, s) | Send a signal to the daemon process.
The signal must have been enabled using the ``signals``
parameter of :py:meth:`Service.__init__`. Otherwise, a
``ValueError`` is raised. |
def catch_osd_errors(conn, logger, args):
logger.info()
status = osd_status_check(conn, args.cluster)
osds = int(status.get(, 0))
up_osds = int(status.get(, 0))
in_osds = int(status.get(, 0))
full = status.get(, False)
nearfull = status.get(, False)
if osds > up_osds:
difference = osds - up_osds
logger.warning( % (
[, ][difference != 1],
difference,
"s"[difference == 1:])
)
if osds > in_osds:
difference = osds - in_osds
logger.warning( % (
[, ][difference != 1],
difference,
"s"[difference == 1:])
)
if full:
logger.warning()
if nearfull:
logger.warning() | Look for possible issues when checking the status of an OSD and
report them back to the user. |
def run(self):
self.fenum.write()
self.fcpp = open(os.path.join(os.path.abspath(self.ctp_dir), ), )
for idx, line in enumerate(self.fcpp):
l = self.process_line(idx, line)
self.f_data_type.write(l)
self.fcpp.close()
self.f_data_type.close()
self.fenum.close()
print() | 主函数 |
def show_vmatrix(vm):
abb
unhandled = vm
while(unhandled.__len__()>0):
next_unhandled = []
for i in range(0,unhandled.__len__()):
ele = unhandled[i]
print(ele)
cond = elel.is_leaf(ele)
if(cond):
pass
else:
children = ele[0]
next_unhandled.append(children)
unhandled = next_unhandled | d = {1: {2: {22: 222}}, 3: {'a': 'b'}}
vm = [[[222]], ['b']]
show_vmatrix(vm) |
def restore(self):
try:
for modname, mod in self._saved.items():
if mod is not None:
sys.modules[modname] = mod
else:
try:
del sys.modules[modname]
except KeyError:
pass
finally:
imp.release_lock() | Restores the modules that the saver knows about into
sys.modules. |
def vrel(v1, v2):
v1 = stypes.toDoubleVector(v1)
v2 = stypes.toDoubleVector(v2)
return libspice.vrel_c(v1, v2) | Return the relative difference between two 3-dimensional vectors.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vrel_c.html
:param v1: First vector
:type v1: 3-Element Array of floats
:param v2: Second vector
:type v2: 3-Element Array of floats
:return: the relative difference between v1 and v2.
:rtype: float |
def Normalize(str_):
if isinstance(str_, bytes):
str_ = str_.decode("utf-8")
return SASLPREP.prepare(str_).encode("utf-8") | The Normalize(str) function.
This one also accepts Unicode string input (in the RFC only UTF-8
strings are used). |
def set_errors(self):
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error} | Set errors markup. |
def _get_output_columns(nodes, context):
columns = []
for node in nodes:
for sql_output in sql_context_helpers.get_outputs(node, context):
field_name = sql_output.field_name
column = sql_context_helpers.get_column(field_name, node, context)
column = column.label(sql_output.output_name)
columns.append(column)
return columns | Get the output columns for a list of SqlNodes.
Args:
nodes: List[SqlNode], the nodes to get output columns from.
context: CompilationContext, global compilation state and metadata.
Returns:
List[Column], list of SqlAlchemy Columns to output for this query. |
def get_changeset(args):
parser = argparse.ArgumentParser(description=get_changeset.__doc__)
parser.add_argument(
, nargs=, type=argparse.FileType(), default=sys.stdin,
help=)
parser.add_argument(
, action=, version=.format(version))
options = parser.parse_args(args)
try:
bundle = yaml.safe_load(options.infile)
except Exception:
return
errors = validation.validate(bundle)
if errors:
return .join(errors)
print()
for num, change in enumerate(changeset.parse(bundle)):
if num:
print()
print(json.dumps(change))
print() | Dump the changeset objects as JSON, reading the provided bundle YAML.
The YAML can be provided either from stdin or by passing a file path as
first argument. |
def on_lxml_loads(self, lxml, config, content, **kwargs):
from ..contrib.xml_parser import XMLParser
return XMLParser.from_xml(
content, encoding=kwargs.pop("encoding", "utf-8")
).to_dict() | The `lxml <https://pypi.org/project/lxml/>`_ loads method.
:param module lxml: The ``lxml`` module
:param class config: The loading config class
:param str content: The content to deserialize
:param str encoding: The encoding to read the given xml document as, defaults to
"utf-8", optional
:returns: The deserialized dictionary
:rtype: dict |
async def send_notification(self, title, message):
query = gql(
% (title, message)
)
res = await self.execute(query)
if not res:
return False
noti = res.get("sendPushNotification", {})
successful = noti.get("successful", False)
pushed_to_number_of_devices = noti.get("pushedToNumberOfDevices", 0)
_LOGGER.debug(
"send_notification: status %s, send to %s devices",
successful,
pushed_to_number_of_devices,
)
return successful | Send notification. |
def ProcessBlocks(self, block_limit=1000):
self._lock.acquire()
try:
blockcount = 0
while self._current_height <= Blockchain.Default().Height and (block_limit == 0 or blockcount < block_limit):
block = Blockchain.Default().GetBlockByHeight(self._current_height)
if block is not None:
self.ProcessNewBlock(block)
else:
self._current_height += 1
blockcount += 1
self.SaveStoredData("Height", self._current_height)
except Exception as e:
logger.warn("Could not process ::: %s " % e)
finally:
self._lock.release() | Method called on a loop to check the current height of the blockchain. If the height of the blockchain
is more than the current stored height in the wallet, we get the next block in line and
processes it.
In the case that the wallet height is far behind the height of the blockchain, we do this 1000
blocks at a time.
Args:
block_limit (int): the number of blocks to process synchronously. defaults to 1000. set to 0 to block until the wallet is fully rebuilt. |
def GetFormatterObject(cls, data_type):
data_type = data_type.lower()
if data_type not in cls._formatter_objects:
formatter_object = None
if data_type in cls._formatter_classes:
formatter_class = cls._formatter_classes[data_type]
formatter_object = formatter_class()
if not formatter_object:
logger.warning(
.format(data_type))
formatter_object = default.DefaultFormatter()
cls._formatter_objects[data_type] = formatter_object
return cls._formatter_objects[data_type] | Retrieves the formatter object for a specific data type.
Args:
data_type (str): data type.
Returns:
EventFormatter: corresponding formatter or the default formatter if
not available. |
def _get_closest_matches(input_attributes, target_attributes):
closest_matches = {}
for a in input_attributes:
best_dist = float()
best_matches = []
for b in target_attributes:
dist = _euclidean_dist(input_attributes[a], target_attributes[b])
if dist < best_dist:
best_matches = [b]
best_dist = dist
elif dist == best_dist:
best_matches.append(b)
closest_matches[a] = best_matches
return closest_matches | :param input_attributes: First dictionary of objects to attribute tuples.
:param target_attributes: Second dictionary of blocks to attribute tuples.
:returns: A dictionary of objects in the input_attributes to the closest objects in the
target_attributes. |
def _encode(self, data, algorithm, key=None):
if algorithm[] == :
return data + self._hmac_generate(data, algorithm, key)
elif algorithm[] == :
return self._aes_encrypt(data, algorithm, key)
elif algorithm[] == :
return data
elif algorithm[] == :
return json.dumps(data)
elif algorithm[] == :
return data
elif algorithm[] == :
return self._zlib_compress(data, algorithm)
else:
raise Exception( % algorithm[]) | Encode data with specific algorithm |
def _html_to_img_tuples(html:str, format:str=, n_images:int=10) -> list:
"Parse the google images html to img tuples containining `(fname, url)`"
bs = BeautifulSoup(html, )
img_tags = bs.find_all(, {: })
metadata_dicts = (json.loads(e.text) for e in img_tags)
img_tuples = ((_img_fname(d[]), d[]) for d in metadata_dicts if d[] == format)
return list(itertools.islice(img_tuples, n_images)) | Parse the google images html to img tuples containining `(fname, url)` |
def set_default (feature, value):
f = __all_features[feature]
bad_attribute = None
if f.free:
bad_attribute = "free"
elif f.optional:
bad_attribute = "optional"
if bad_attribute:
raise InvalidValue ("%s property %s cannot have a default" % (bad_attribute, f.name))
if value not in f.values:
raise InvalidValue ("The specified default value, is invalid.\n" % value + "allowed values are: %s" % f.values)
f.set_default(value) | Sets the default value of the given feature, overriding any previous default.
feature: the name of the feature
value: the default value to assign |
def clear_score_system(self):
if (self.get_score_system_metadata().is_read_only() or
self.get_score_system_metadata().is_required()):
raise errors.NoAccess()
self._my_map[] = self._score_system_default | Clears the score system.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* |
async def disconnect(self):
if not self._watch_stopped.is_set():
log.debug()
self._watch_stopping.set()
await self._watch_stopped.wait()
self._watch_stopping.clear()
if self.is_connected():
log.debug()
await self._connector.disconnect()
self._info = None | Shut down the watcher task and close websockets. |
def image(self):
obj = self.content_object
try:
image = obj.get_image()
except AttributeError:
try:
image = obj.content_object.get_image()
except:
image = None
try:
return image.image
except AttributeError:
return image | Attempts to provide a representative image from a content_object based on
the content object's get_image() method.
If there is a another content.object, as in the case of comments and other GFKs,
then it will follow to that content_object and then get the image.
Requires get_image() to be defined on the related model even if it just
returns object.image, to avoid bringing back images you may not want.
Note that this expects the image only. Anything related (caption, etc) should be stripped. |
def set_data(self, data):
self._data = data
keys = list(data.keys())
self.breakpoints = []
for key in keys:
bp_list = data[key]
if bp_list:
for item in data[key]:
self.breakpoints.append((key, item[0], item[1], ""))
self.reset() | Set model data |
def build(self, signing_private_key):
is_oscrypto = isinstance(signing_private_key, asymmetric.PrivateKey)
if not isinstance(signing_private_key, keys.PrivateKeyInfo) and not is_oscrypto:
raise TypeError(_pretty_message(
,
_type_name(signing_private_key)
))
signature_algo = signing_private_key.algorithm
if signature_algo == :
signature_algo =
signature_algorithm_id = % (self._hash_algo, signature_algo)
def _make_extension(name, value):
return {
: name,
: self._determine_critical(name),
: value
}
extensions = []
for name in sorted(self._special_extensions):
value = getattr(self, % name)
if value is not None:
extensions.append(_make_extension(name, value))
for name in sorted(self._other_extensions.keys()):
extensions.append(_make_extension(name, self._other_extensions[name]))
attributes = []
if extensions:
attributes.append({
: ,
: [extensions]
})
certification_request_info = csr.CertificationRequestInfo({
: ,
: self._subject,
: self._subject_public_key,
: attributes
})
if signing_private_key.algorithm == :
sign_func = asymmetric.rsa_pkcs1v15_sign
elif signing_private_key.algorithm == :
sign_func = asymmetric.dsa_sign
elif signing_private_key.algorithm == :
sign_func = asymmetric.ecdsa_sign
if not is_oscrypto:
signing_private_key = asymmetric.load_private_key(signing_private_key)
signature = sign_func(signing_private_key, certification_request_info.dump(), self._hash_algo)
return csr.CertificationRequest({
: certification_request_info,
: {
: signature_algorithm_id,
},
: signature
}) | Validates the certificate information, constructs an X.509 certificate
and then signs it
:param signing_private_key:
An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey
object for the private key to sign the request with. This should be
the private key that matches the public key.
:return:
An asn1crypto.csr.CertificationRequest object of the request |
def conditional_write(strm, fmt, value, *args, **kwargs):
if value is not None:
strm.write(fmt.format(value, *args, **kwargs)) | Write to stream using fmt and value if value is not None |
def update_machine_group(self, project_name, group_detail):
headers = {}
params = {}
resource = "/machinegroups/" + group_detail.group_name
headers[] =
body = six.b(json.dumps(group_detail.to_json()))
headers[] = str(len(body))
(resp, headers) = self._send("PUT", project_name, body, resource, params, headers)
return UpdateMachineGroupResponse(headers, resp) | update machine group in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_detail: MachineGroupDetail
:param group_detail: the machine group detail config
:return: UpdateMachineGroupResponse
:raise: LogException |
def addFileHandler(self,filename=, dr=,lvl=1):
fname = self.name
if filename != :
fname = filename
if not in fname:
fname+=
fh = logging.FileHandler(os.path.join(dr,fname))
fh.setLevel(lvl)
frmtString =
fFrmt = logging.Formatter(frmtString)
fh.setFormatter(fFrmt)
self.addHandler(fh) | This function will add a file handler to a log with the provided level.
Args:
lvl (int): The severity level of messages printed to the file with
the file handler, default = 1. |
def _dispatch_commands(self, from_state, to_state, smtp_command):
name_handler_method = % smtp_command.lower().replace(, )
try:
handler_method = getattr(self, name_handler_method)
except AttributeError:
self.reply(451, )
else:
handler_method() | This method dispatches a SMTP command to the appropriate handler
method. It is called after a new command was received and a valid
transition was found. |
def download_from_plugin(plugin: APlugin):
plugin.log.info()
plugin.update_last_update()
save_state = plugin.load_save_state()
if plugin.last_update <= save_state.last_update:
plugin.log.info()
return
plugin.log.info()
plugin.update_download_links()
down_link_item_dict = plugin.get_updated_data(save_state.link_item_dict)
plugin.log.info( + str(len(plugin.download_data)))
if not down_link_item_dict:
plugin.log.info()
return
plugin.log.info(f"Download new {plugin.unit}s: {len(down_link_item_dict)}")
plugin.download(down_link_item_dict, plugin.download_path, + plugin.unit + , plugin.unit)
succeed_link_item_dict, lost_link_item_dict = plugin.check_download(down_link_item_dict, plugin.download_path)
plugin.log.info(f"Downloaded: {len(succeed_link_item_dict)}/{len(down_link_item_dict)}")
plugin.log.info()
plugin.update_dict(save_state.link_item_dict, succeed_link_item_dict)
plugin.log.info()
plugin.save_save_state(save_state.link_item_dict) | Download routine.
1. get newest update time
2. load savestate
3. compare last update time with savestate time
4. get download links
5. compare with savestate
6. download new/updated data
7. check downloads
8. update savestate
9. write new savestate
:param plugin: plugin
:type plugin: ~unidown.plugin.a_plugin.APlugin |
def getaddrinfo_wrapper(host, port, family=socket.AF_INET, socktype=0, proto=0, flags=0):
return orig_getaddrinfo(host, port, family, socktype, proto, flags) | Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True) |
def summarize(self):
s = str(self.allval())
return self.parse(s[:2]+ .join([]*len(s[2:]))) | Convert all of the values to their max values. This form is used to represent the summary level |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.