text stringlengths 75 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 0.18 |
|---|---|---|---|
def _make_tarfile(self, output_filename, source_dir):
"""Create .tar.gz file
"""
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir)) | [
"def",
"_make_tarfile",
"(",
"self",
",",
"output_filename",
",",
"source_dir",
")",
":",
"with",
"tarfile",
".",
"open",
"(",
"output_filename",
",",
"\"w:gz\"",
")",
"as",
"tar",
":",
"tar",
".",
"add",
"(",
"source_dir",
",",
"arcname",
"=",
"os",
"."... | 44.2 | 0.008889 |
def get_evernote_client(self, token=None):
"""
get the token from evernote
"""
if token:
return EvernoteClient(token=token, sandbox=self.sandbox)
else:
return EvernoteClient(consumer_key=self.consumer_key, consumer_secret=self.consumer_secret,
sandbox=self.sandbox) | [
"def",
"get_evernote_client",
"(",
"self",
",",
"token",
"=",
"None",
")",
":",
"if",
"token",
":",
"return",
"EvernoteClient",
"(",
"token",
"=",
"token",
",",
"sandbox",
"=",
"self",
".",
"sandbox",
")",
"else",
":",
"return",
"EvernoteClient",
"(",
"c... | 39.888889 | 0.008174 |
def loadMoreItems(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):
"""
Load more items using the continuation parameters of previously loaded items.
"""
self.lastLoadOk = False
self.lastLoadLength = 0
if not continuation and not self.continuation:
return
self._itemsLoadedDone(self._getContent(excludeRead, continuation or self.continuation, loadLimit, since, until)) | [
"def",
"loadMoreItems",
"(",
"self",
",",
"excludeRead",
"=",
"False",
",",
"continuation",
"=",
"None",
",",
"loadLimit",
"=",
"20",
",",
"since",
"=",
"None",
",",
"until",
"=",
"None",
")",
":",
"self",
".",
"lastLoadOk",
"=",
"False",
"self",
".",
... | 51.666667 | 0.012685 |
def validate(self, url):
''' takes in a Github repository for validation of preview and
runtime (and possibly tests passing?
'''
# Preview must provide the live URL of the repository
if not url.startswith('http') or not 'github' in url:
bot.error('Test of preview must be given a Github repostitory.')
return False
if not self._validate_preview(url):
return False
return True | [
"def",
"validate",
"(",
"self",
",",
"url",
")",
":",
"# Preview must provide the live URL of the repository",
"if",
"not",
"url",
".",
"startswith",
"(",
"'http'",
")",
"or",
"not",
"'github'",
"in",
"url",
":",
"bot",
".",
"error",
"(",
"'Test of preview must ... | 33 | 0.008421 |
def clear_cache(cls):
"""Call this before closing tk root"""
#Prevent tkinter errors on python 2 ??
for key in cls._cached:
cls._cached[key] = None
cls._cached = {} | [
"def",
"clear_cache",
"(",
"cls",
")",
":",
"#Prevent tkinter errors on python 2 ??",
"for",
"key",
"in",
"cls",
".",
"_cached",
":",
"cls",
".",
"_cached",
"[",
"key",
"]",
"=",
"None",
"cls",
".",
"_cached",
"=",
"{",
"}"
] | 33.833333 | 0.014423 |
def _sampleRange(rng, start, end, step, k):
"""
Equivalent to:
random.sample(xrange(start, end, step), k)
except it uses our random number generator.
This wouldn't need to create the arange if it were implemented in C.
"""
array = numpy.empty(k, dtype="uint32")
rng.sample(numpy.arange(start, end, step, dtype="uint32"), array)
return array | [
"def",
"_sampleRange",
"(",
"rng",
",",
"start",
",",
"end",
",",
"step",
",",
"k",
")",
":",
"array",
"=",
"numpy",
".",
"empty",
"(",
"k",
",",
"dtype",
"=",
"\"uint32\"",
")",
"rng",
".",
"sample",
"(",
"numpy",
".",
"arange",
"(",
"start",
",... | 26.846154 | 0.01385 |
def read_voltages(self, voltage_file):
"""import voltages from a volt.dat file
Parameters
----------
voltage_file : string
Path to volt.dat file
"""
measurements_raw = np.loadtxt(
voltage_file,
skiprows=1,
)
measurements = np.atleast_2d(measurements_raw)
# extract measurement configurations
A = (measurements[:, 0] / 1e4).astype(int)
B = (measurements[:, 0] % 1e4).astype(int)
M = (measurements[:, 1] / 1e4).astype(int)
N = (measurements[:, 1] % 1e4).astype(int)
ABMN = np.vstack((A, B, M, N)).T
if self.configs.configs is None:
self.configs.configs = ABMN
else:
# configurations don't match
if not np.all(ABMN == self.configs.configs):
for nr, (old_config, new_config) in enumerate(zip(
self.configs.configs, ABMN)):
if np.all(old_config == new_config):
continue
# check polarity
current_electrodes_are_equal = np.all(
old_config[0:2] == new_config[0:2]
)
voltage_electrodes_are_switched = np.all(
old_config[2:4] == new_config[4:1:-1]
)
if(current_electrodes_are_equal and
voltage_electrodes_are_switched):
if len(self.configs.measurements.keys()) > 0:
raise Exception(
'need to switch electrode polarity, but ' +
'there are already measurements stored for ' +
'the old configuration!')
else:
# switch M/N in configurations
self.configs.configs[nr, :] = new_config
else:
raise Exception(
'There was an error matching configurations of ' +
'voltages with configurations already imported'
)
# add measurements to the config instance
mid_mag = self.configs.add_measurements(
measurements[:, 2]
)
mid_pha = self.configs.add_measurements(
measurements[:, 3]
)
self.assignments['measurements'] = [mid_mag, mid_pha] | [
"def",
"read_voltages",
"(",
"self",
",",
"voltage_file",
")",
":",
"measurements_raw",
"=",
"np",
".",
"loadtxt",
"(",
"voltage_file",
",",
"skiprows",
"=",
"1",
",",
")",
"measurements",
"=",
"np",
".",
"atleast_2d",
"(",
"measurements_raw",
")",
"# extrac... | 37.181818 | 0.000794 |
def compute_K_analytical(dataframe, spacing):
"""Given an electrode spacing, compute geometrical factors using the
equation for the homogeneous half-space (Neumann-equation)
If a dataframe is given, use the column (a, b, m, n). Otherwise, expect an
Nx4 arrray.
Parameters
----------
dataframe : pandas.DataFrame or numpy.ndarray
Configurations, either as DataFrame
spacing : float or numpy.ndarray
distance between electrodes. If array, then these are the x-coordinates
of the electrodes
"""
if isinstance(dataframe, pd.DataFrame):
configs = dataframe[['a', 'b', 'm', 'n']].values
else:
configs = dataframe
r_am = np.abs(configs[:, 0] - configs[:, 2]) * spacing
r_an = np.abs(configs[:, 0] - configs[:, 3]) * spacing
r_bm = np.abs(configs[:, 1] - configs[:, 2]) * spacing
r_bn = np.abs(configs[:, 1] - configs[:, 3]) * spacing
K = 2 * np.pi / (1 / r_am - 1 / r_an - 1 / r_bm + 1 / r_bn)
if isinstance(dataframe, pd.DataFrame):
dataframe['k'] = K
return K | [
"def",
"compute_K_analytical",
"(",
"dataframe",
",",
"spacing",
")",
":",
"if",
"isinstance",
"(",
"dataframe",
",",
"pd",
".",
"DataFrame",
")",
":",
"configs",
"=",
"dataframe",
"[",
"[",
"'a'",
",",
"'b'",
",",
"'m'",
",",
"'n'",
"]",
"]",
".",
"... | 33.870968 | 0.000926 |
def get_info(self):
'Retrieve the data from CrossRef.'
escaped_doi = urllib2.quote(self.doi, '')
html = get_resource("www.crossref.org", '/guestquery?queryType=doi&restype=unixref&doi=%s&doi_search=Search' % escaped_doi)
xml_matches = []
for m in re.finditer('(<doi_records>.*?</doi_records>)', html, re.DOTALL):
xml_matches.append(m.group(0))
if len(xml_matches) == 0:
raise DOIRetrievalException('No matches found for the DOI "%s".' % self.doi)
elif len(xml_matches) == 1:
return xml_matches[0]
else:
raise DOIRetrievalException('Multiple (%d) matches found for the DOI "%s".' % (len(xml_matches), self.doi)) | [
"def",
"get_info",
"(",
"self",
")",
":",
"escaped_doi",
"=",
"urllib2",
".",
"quote",
"(",
"self",
".",
"doi",
",",
"''",
")",
"html",
"=",
"get_resource",
"(",
"\"www.crossref.org\"",
",",
"'/guestquery?queryType=doi&restype=unixref&doi=%s&doi_search=Search'",
"%"... | 47.333333 | 0.008287 |
def frobenius_norm(self):
""" Frobenius norm (||data - USV||) for a data matrix and a low rank
approximation given by SVH using rank k for U and V
Returns:
frobenius norm: F = ||data - USV||
"""
if scipy.sparse.issparse(self.data):
err = self.data - self.U*self.S*self.V
err = err.multiply(err)
err = np.sqrt(err.sum())
else:
err = self.data[:,:] - np.dot(np.dot(self.U, self.S), self.V)
err = np.sqrt(np.sum(err**2))
return err | [
"def",
"frobenius_norm",
"(",
"self",
")",
":",
"if",
"scipy",
".",
"sparse",
".",
"issparse",
"(",
"self",
".",
"data",
")",
":",
"err",
"=",
"self",
".",
"data",
"-",
"self",
".",
"U",
"*",
"self",
".",
"S",
"*",
"self",
".",
"V",
"err",
"=",... | 37.6875 | 0.012945 |
def cron(self, pattern, name, *args, **kwargs):
"""
A function to setup a RQ job as a cronjob::
@rq.job('low', timeout=60)
def add(x, y):
return x + y
add.cron('* * * * *', 'add-some-numbers', 1, 2, timeout=10)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param pattern: A Crontab pattern.
:type pattern: str
:param name: The name of the cronjob.
:type name: str
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param repeat: The number of times the job needs to be repeatedly
queued via the cronjob. Take care only using this for
cronjob that don't already repeat themselves natively
due to their crontab.
:type repeat: int
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
"""
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
description = kwargs.pop('description', None)
repeat = kwargs.pop('repeat', None)
return self.rq.get_scheduler().cron(
pattern,
self.wrapped,
args=args,
kwargs=kwargs,
repeat=repeat,
queue_name=queue_name,
id='cron-%s' % name,
timeout=timeout,
description=description,
) | [
"def",
"cron",
"(",
"self",
",",
"pattern",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"queue_name",
"=",
"kwargs",
".",
"pop",
"(",
"'queue'",
",",
"self",
".",
"queue_name",
")",
"timeout",
"=",
"kwargs",
".",
"pop",
"(",
... | 33.298246 | 0.001024 |
def groups_for_perm(
cls,
instance,
perm_name,
group_ids=None,
limit_group_permissions=False,
db_session=None,
):
"""
return PermissionTuples for groups that have given
permission for the resource, perm_name is __any_permission__ then
users with any permission will be listed
:param instance:
:param perm_name:
:param group_ids: limits the permissions to specific group ids
:param limit_group_permissions: should be used if we do not want to have
user objects returned for group permissions, this might cause performance
issues for big groups
:param db_session:
:return:
""" # noqa
db_session = get_db_session(db_session, instance)
group_perms = resource_permissions_for_users(
cls.models_proxy,
[perm_name],
[instance.resource_id],
group_ids=group_ids,
limit_group_permissions=limit_group_permissions,
skip_user_perms=True,
db_session=db_session,
)
if instance.owner_group_id:
for user in instance.owner_group.users:
group_perms.append(
PermissionTuple(
user,
ALL_PERMISSIONS,
"group",
instance.owner_group,
instance,
True,
True,
)
)
return group_perms | [
"def",
"groups_for_perm",
"(",
"cls",
",",
"instance",
",",
"perm_name",
",",
"group_ids",
"=",
"None",
",",
"limit_group_permissions",
"=",
"False",
",",
"db_session",
"=",
"None",
",",
")",
":",
"# noqa",
"db_session",
"=",
"get_db_session",
"(",
"db_session... | 32.808511 | 0.001889 |
def fit(self, data, method='kmeans', **kwargs):
"""
fit classifiers from large dataset.
Parameters
----------
data : dict
A dict of data for clustering. Must contain
items with the same name as analytes used for
clustering.
method : str
A string defining the clustering method used. Can be:
* 'kmeans' : K-Means clustering algorithm
* 'meanshift' : Meanshift algorithm
n_clusters : int
*K-Means only*. The numebr of clusters to identify
bandwidth : float
*Meanshift only.*
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
*Meanshift only.*
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs :
passed to `sklearn.cluster.MeanShift`.
Returns
-------
list
"""
self.method = method
ds_fit = self.fitting_data(data)
mdict = {'kmeans': self.fit_kmeans,
'meanshift': self.fit_meanshift}
clust = mdict[method]
self.classifier = clust(data=ds_fit, **kwargs)
# sort cluster centers by value of first column, to avoid random variation.
c0 = self.classifier.cluster_centers_.T[self.sort_by]
self.classifier.cluster_centers_ = self.classifier.cluster_centers_[np.argsort(c0)]
# recalculate the labels, so it's consistent with cluster centers
self.classifier.labels_ = self.classifier.predict(ds_fit)
self.classifier.ulabels_ = np.unique(self.classifier.labels_)
return | [
"def",
"fit",
"(",
"self",
",",
"data",
",",
"method",
"=",
"'kmeans'",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"method",
"=",
"method",
"ds_fit",
"=",
"self",
".",
"fitting_data",
"(",
"data",
")",
"mdict",
"=",
"{",
"'kmeans'",
":",
"self... | 34.884615 | 0.002145 |
def _align_intervals(int_hier, lab_hier, t_min=0.0, t_max=None):
'''Align a hierarchical annotation to span a fixed start and end time.
Parameters
----------
int_hier : list of list of intervals
lab_hier : list of list of str
Hierarchical segment annotations, encoded as a
list of list of intervals (int_hier) and list of
list of strings (lab_hier)
t_min : None or number >= 0
The minimum time value for the segmentation
t_max : None or number >= t_min
The maximum time value for the segmentation
Returns
-------
intervals_hier : list of list of intervals
labels_hier : list of list of str
`int_hier` `lab_hier` aligned to span `[t_min, t_max]`.
'''
return [list(_) for _ in zip(*[util.adjust_intervals(np.asarray(ival),
labels=lab,
t_min=t_min,
t_max=t_max)
for ival, lab in zip(int_hier, lab_hier)])] | [
"def",
"_align_intervals",
"(",
"int_hier",
",",
"lab_hier",
",",
"t_min",
"=",
"0.0",
",",
"t_max",
"=",
"None",
")",
":",
"return",
"[",
"list",
"(",
"_",
")",
"for",
"_",
"in",
"zip",
"(",
"*",
"[",
"util",
".",
"adjust_intervals",
"(",
"np",
".... | 38.75 | 0.000899 |
def magphase_function(f='1.0/(1+1j*x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots function(s) magnitude and phase over the specified range.
Parameters
----------
f='1.0/(1+1j*x)'
Complex-valued function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
function(f, xmin, xmax, steps, p, g, erange, plotter=magphase_data, **kwargs) | [
"def",
"magphase_function",
"(",
"f",
"=",
"'1.0/(1+1j*x)'",
",",
"xmin",
"=",
"-",
"1",
",",
"xmax",
"=",
"1",
",",
"steps",
"=",
"200",
",",
"p",
"=",
"'x'",
",",
"g",
"=",
"None",
",",
"erange",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":... | 42.363636 | 0.008395 |
def run_with_retcodes(argv):
"""
Run luigi with command line parsing, but raise ``SystemExit`` with the configured exit code.
Note: Usually you use the luigi binary directly and don't call this function yourself.
:param argv: Should (conceptually) be ``sys.argv[1:]``
"""
logger = logging.getLogger('luigi-interface')
with luigi.cmdline_parser.CmdlineParser.global_instance(argv):
retcodes = retcode()
worker = None
try:
worker = luigi.interface._run(argv).worker
except luigi.interface.PidLockAlreadyTakenExit:
sys.exit(retcodes.already_running)
except Exception:
# Some errors occur before logging is set up, we set it up now
env_params = luigi.interface.core()
InterfaceLogging.setup(env_params)
logger.exception("Uncaught exception in luigi")
sys.exit(retcodes.unhandled_exception)
with luigi.cmdline_parser.CmdlineParser.global_instance(argv):
task_sets = luigi.execution_summary._summary_dict(worker)
root_task = luigi.execution_summary._root_task(worker)
non_empty_categories = {k: v for k, v in task_sets.items() if v}.keys()
def has(status):
assert status in luigi.execution_summary._ORDERED_STATUSES
return status in non_empty_categories
codes_and_conds = (
(retcodes.missing_data, has('still_pending_ext')),
(retcodes.task_failed, has('failed')),
(retcodes.already_running, has('run_by_other_worker')),
(retcodes.scheduling_error, has('scheduling_error')),
(retcodes.not_run, has('not_run')),
)
expected_ret_code = max(code * (1 if cond else 0) for code, cond in codes_and_conds)
if expected_ret_code == 0 and \
root_task not in task_sets["completed"] and \
root_task not in task_sets["already_done"]:
sys.exit(retcodes.not_run)
else:
sys.exit(expected_ret_code) | [
"def",
"run_with_retcodes",
"(",
"argv",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'luigi-interface'",
")",
"with",
"luigi",
".",
"cmdline_parser",
".",
"CmdlineParser",
".",
"global_instance",
"(",
"argv",
")",
":",
"retcodes",
"=",
"retcode... | 39.145833 | 0.002077 |
def add_section(self, section, friendly_name = None):
"""Adds a section and optionally gives it a friendly name.."""
if not isinstance(section, BASESTRING): # Make sure the user isn't expecting to use something stupid as a key.
raise ValueError(section)
# See if we've got this section already:
if section in self.config:
raise DuplicateSectionError(section) # Yep... Kick off.
else:
self.config[section] = OrderedDict() # Nope... Ad it
if friendly_name == None:
friendly_name = section.title()
if '&' not in friendly_name:
friendly_name = '&' + friendly_name
self.section_names[section] = friendly_name | [
"def",
"add_section",
"(",
"self",
",",
"section",
",",
"friendly_name",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"section",
",",
"BASESTRING",
")",
":",
"# Make sure the user isn't expecting to use something stupid as a key.\r",
"raise",
"ValueError",
"... | 45.642857 | 0.029141 |
def _parse_response_for_dict(response):
''' Extracts name-values from response header. Filter out the standard
http headers.'''
if response is None:
return None
http_headers = ['server', 'date', 'location', 'host',
'via', 'proxy-connection', 'connection']
return_dict = _HeaderDict()
if response.headers:
for name, value in response.headers:
if not name.lower() in http_headers:
return_dict[name] = value
return return_dict | [
"def",
"_parse_response_for_dict",
"(",
"response",
")",
":",
"if",
"response",
"is",
"None",
":",
"return",
"None",
"http_headers",
"=",
"[",
"'server'",
",",
"'date'",
",",
"'location'",
",",
"'host'",
",",
"'via'",
",",
"'proxy-connection'",
",",
"'connecti... | 33.533333 | 0.001934 |
def insert_bool(param, command_args):
'''
:param param:
:param command_args:
:return:
'''
index = 0
found = False
for lelem in command_args:
if lelem == '--' and not found:
break
if lelem == param:
found = True
break
index = index + 1
if found:
command_args.insert(index + 1, 'True')
return command_args | [
"def",
"insert_bool",
"(",
"param",
",",
"command_args",
")",
":",
"index",
"=",
"0",
"found",
"=",
"False",
"for",
"lelem",
"in",
"command_args",
":",
"if",
"lelem",
"==",
"'--'",
"and",
"not",
"found",
":",
"break",
"if",
"lelem",
"==",
"param",
":",... | 17.894737 | 0.027933 |
def send_acks(self, message):
"""
send acks to the service
:param message: EventHub_pb2.Message
:return: None
"""
if isinstance(message, EventHub_pb2.Message):
ack = EventHub_pb2.Ack(partition=message.partition, offset=message.offset)
self.grpc_manager.send_message(EventHub_pb2.SubscriptionResponse(ack=ack))
elif isinstance(message, EventHub_pb2.SubscriptionMessage):
acks = []
for m in message.messages:
acks.append(EventHub_pb2.Ack(parition=m.partition, offset=m.offset))
self.grpc_manager.send_message(EventHub_pb2.SubscriptionAcks(ack=acks)) | [
"def",
"send_acks",
"(",
"self",
",",
"message",
")",
":",
"if",
"isinstance",
"(",
"message",
",",
"EventHub_pb2",
".",
"Message",
")",
":",
"ack",
"=",
"EventHub_pb2",
".",
"Ack",
"(",
"partition",
"=",
"message",
".",
"partition",
",",
"offset",
"=",
... | 44.4 | 0.008824 |
def __normalize_list(self, msg):
"""Split message to list by commas and trim whitespace."""
if isinstance(msg, list):
msg = "".join(msg)
return list(map(lambda x: x.strip(), msg.split(","))) | [
"def",
"__normalize_list",
"(",
"self",
",",
"msg",
")",
":",
"if",
"isinstance",
"(",
"msg",
",",
"list",
")",
":",
"msg",
"=",
"\"\"",
".",
"join",
"(",
"msg",
")",
"return",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
... | 44.4 | 0.00885 |
def list_joined_topics(self, start=0):
"""
已加入的所有小组的话题列表
:param start: 翻页
:return: 带下一页的列表
"""
xml = self.api.xml(API_GROUP_HOME, params={'start': start})
return build_list_result(self._parse_topic_table(xml, 'title,comment,created,group'), xml) | [
"def",
"list_joined_topics",
"(",
"self",
",",
"start",
"=",
"0",
")",
":",
"xml",
"=",
"self",
".",
"api",
".",
"xml",
"(",
"API_GROUP_HOME",
",",
"params",
"=",
"{",
"'start'",
":",
"start",
"}",
")",
"return",
"build_list_result",
"(",
"self",
".",
... | 33.555556 | 0.012903 |
def path_from_keywords(keywords,into='path'):
'''
turns keyword pairs into path or filename
if `into=='path'`, then keywords are separted by underscores, else keywords are used to create a directory hierarchy
'''
subdirs = []
def prepare_string(s):
s = str(s)
s = re.sub('[][{},*"'+f"'{os.sep}]",'_',s)#replace characters that make bash life difficult by underscore
if into=='file':
s = s.replace('_', ' ')#Remove underscore because they will be used as separator
if ' ' in s:
s = s.title()
s = s.replace(' ','')
return s
if isinstance(keywords,set):
keywords_list = sorted(keywords)
for property in keywords_list:
subdirs.append(prepare_string(property))
else:
keywords_list = sorted(keywords.items())
for property,value in keywords_list: # @reservedassignment
if Bool.valid(value):
subdirs.append(('' if value else ('not_' if into=='path' else 'not'))+prepare_string(property))
#elif String.valid(value):
# subdirs.append(prepare_string(value))
elif (Float|Integer).valid(value):
subdirs.append('{}{}'.format(prepare_string(property),prepare_string(value)))
else:
subdirs.append('{}{}{}'.format(prepare_string(property),'_' if into == 'path' else '',prepare_string(value)))
if into == 'path':
out = os.path.join(*subdirs)
else:
out = '_'.join(subdirs)
return out | [
"def",
"path_from_keywords",
"(",
"keywords",
",",
"into",
"=",
"'path'",
")",
":",
"subdirs",
"=",
"[",
"]",
"def",
"prepare_string",
"(",
"s",
")",
":",
"s",
"=",
"str",
"(",
"s",
")",
"s",
"=",
"re",
".",
"sub",
"(",
"'[][{},*\"'",
"+",
"f\"'{os... | 42.444444 | 0.017914 |
def adjust_whitespace(text):
"""remove the left-whitespace margin of a block of Python code."""
state = [False, False]
(backslashed, triplequoted) = (0, 1)
def in_multi_line(line):
start_state = (state[backslashed] or state[triplequoted])
if re.search(r"\\$", line):
state[backslashed] = True
else:
state[backslashed] = False
def match(reg, t):
m = re.match(reg, t)
if m:
return m, t[len(m.group(0)):]
else:
return None, t
while line:
if state[triplequoted]:
m, line = match(r"%s" % state[triplequoted], line)
if m:
state[triplequoted] = False
else:
m, line = match(r".*?(?=%s|$)" % state[triplequoted], line)
else:
m, line = match(r'#', line)
if m:
return start_state
m, line = match(r"\"\"\"|\'\'\'", line)
if m:
state[triplequoted] = m.group(0)
continue
m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line)
return start_state
def _indent_line(line, stripspace=''):
return re.sub(r"^%s" % stripspace, '', line)
lines = []
stripspace = None
for line in re.split(r'\r?\n', text):
if in_multi_line(line):
lines.append(line)
else:
line = line.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", line):
stripspace = re.match(r"^([ \t]*)", line).group(1)
lines.append(_indent_line(line, stripspace))
return "\n".join(lines) | [
"def",
"adjust_whitespace",
"(",
"text",
")",
":",
"state",
"=",
"[",
"False",
",",
"False",
"]",
"(",
"backslashed",
",",
"triplequoted",
")",
"=",
"(",
"0",
",",
"1",
")",
"def",
"in_multi_line",
"(",
"line",
")",
":",
"start_state",
"=",
"(",
"sta... | 29.894737 | 0.000568 |
def _authenticate_ssh(org):
"""Try authenticating via ssh, if succesful yields a User, otherwise raises Error."""
# Try to get username from git config
username = os.environ.get(f"{org.upper()}_USERNAME")
# Require ssh-agent
child = pexpect.spawn("ssh -T git@github.com", encoding="utf8")
# GitHub prints 'Hi {username}!...' when attempting to get shell access
i = child.expect(["Hi (.+)! You've successfully authenticated",
"Enter passphrase for key",
"Permission denied",
"Are you sure you want to continue connecting"])
child.close()
if i == 0:
if username is None:
username = child.match.groups()[0]
else:
return None
return User(name=username,
repo=f"git@github.com:{org}/{username}") | [
"def",
"_authenticate_ssh",
"(",
"org",
")",
":",
"# Try to get username from git config",
"username",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"f\"{org.upper()}_USERNAME\"",
")",
"# Require ssh-agent",
"child",
"=",
"pexpect",
".",
"spawn",
"(",
"\"ssh -T git@gith... | 39.285714 | 0.002367 |
def print_traceback(self):
"""
Print the traceback of the exception wrapped by the AbbreviatedException.
"""
traceback.print_exception(self.etype, self.value, self.traceback) | [
"def",
"print_traceback",
"(",
"self",
")",
":",
"traceback",
".",
"print_exception",
"(",
"self",
".",
"etype",
",",
"self",
".",
"value",
",",
"self",
".",
"traceback",
")"
] | 40.4 | 0.014563 |
def rebuildGrid( self ):
"""
Rebuilds the ruler data.
"""
vruler = self.verticalRuler()
hruler = self.horizontalRuler()
rect = self._buildData['grid_rect']
# process the vertical ruler
h_lines = []
h_alt = []
h_notches = []
vpstart = vruler.padStart()
vnotches = vruler.notches()
vpend = vruler.padEnd()
vcount = len(vnotches) + vpstart + vpend
deltay = rect.height() / max((vcount - 1), 1)
y = rect.bottom()
alt = False
for i in range(vcount):
h_lines.append(QLineF(rect.left(), y, rect.right(), y))
# store alternate color
if ( alt ):
alt_rect = QRectF(rect.left(), y, rect.width(), deltay)
h_alt.append(alt_rect)
# store notch information
nidx = i - vpstart
if ( 0 <= nidx and nidx < len(vnotches) ):
notch = vnotches[nidx]
notch_rect = QRectF(0, y - 3, rect.left() - 3, deltay)
h_notches.append((notch_rect, notch))
y -= deltay
alt = not alt
self._buildData['grid_h_lines'] = h_lines
self._buildData['grid_h_alt'] = h_alt
self._buildData['grid_h_notches'] = h_notches
# process the horizontal ruler
v_lines = []
v_alt = []
v_notches = []
hpstart = hruler.padStart()
hnotches = hruler.notches()
hpend = hruler.padEnd()
hcount = len(hnotches) + hpstart + hpend
deltax = rect.width() / max((hcount - 1), 1)
x = rect.left()
alt = False
for i in range(hcount):
v_lines.append(QLineF(x, rect.top(), x, rect.bottom()))
# store alternate info
if ( alt ):
alt_rect = QRectF(x - deltax, rect.top(), deltax, rect.height())
v_alt.append(alt_rect)
# store notch information
nidx = i - hpstart
if ( 0 <= nidx and nidx < len(hnotches) ):
notch = hnotches[nidx]
notch_rect = QRectF(x - (deltax / 2.0),
rect.bottom() + 3,
deltax,
13)
v_notches.append((notch_rect, notch))
x += deltax
alt = not alt
self._buildData['grid_v_lines'] = v_lines
self._buildData['grid_v_alt'] = v_alt
self._buildData['grid_v_notches'] = v_notches
# draw the axis lines
axis_lines = []
axis_lines.append(QLineF(rect.left(),
rect.top(),
rect.left(),
rect.bottom()))
axis_lines.append(QLineF(rect.left(),
rect.bottom(),
rect.right(),
rect.bottom()))
self._buildData['axis_lines'] = axis_lines | [
"def",
"rebuildGrid",
"(",
"self",
")",
":",
"vruler",
"=",
"self",
".",
"verticalRuler",
"(",
")",
"hruler",
"=",
"self",
".",
"horizontalRuler",
"(",
")",
"rect",
"=",
"self",
".",
"_buildData",
"[",
"'grid_rect'",
"]",
"# process the vertical ruler\r",
"h... | 35.473684 | 0.019053 |
def plot_gain_offsets(dio_cross,dio_chan_per_coarse=8,feedtype='l',ax1=None,ax2=None,legend=True,**kwargs):
'''
Plots the calculated gain offsets of each coarse channel along with
the time averaged power spectra of the X and Y feeds
'''
#Get ON-OFF ND spectra
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
obs = Waterfall(dio_cross,max_load=150)
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Get phase offsets and convert to degrees
coarse_G = gain_offsets(I,Q,U,V,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
coarse_freqs = convert_to_coarse(freqs,dio_chan_per_coarse)
#Get X and Y spectra for the noise diode ON and OFF
#If using circular feeds these correspond to LL and RR
XX_OFF,XX_ON = foldcal(np.expand_dims(data[:,0,:],axis=1),tsamp,**kwargs)
YY_OFF,YY_ON = foldcal(np.expand_dims(data[:,1,:],axis=1),tsamp,**kwargs)
if ax1==None:
plt.subplot(211)
else:
axG = plt.axes(ax1)
plt.setp(axG.get_xticklabels(),visible=False)
plt.plot(coarse_freqs,coarse_G,'ko',markersize=2)
plt.ylabel(r'$\frac{\Delta G}{2}$',rotation=90)
if feedtype=='l':
plt.title('XY Gain Difference')
if feedtype=='c':
plt.title('LR Gain Difference')
plt.grid(True)
if ax2==None:
plt.subplot(212)
else:
axXY = plt.axes(ax2,sharex=axG)
if feedtype=='l':
plt.plot(freqs,XX_OFF,'b-',label='XX')
plt.plot(freqs,YY_OFF,'r-',label='YY')
if feedtype=='c':
plt.plot(freqs,XX_OFF,'b-',label='LL')
plt.plot(freqs,YY_OFF,'r-',label='RR')
plt.xlabel('Frequency (MHz)')
plt.ylabel('Power (Counts)')
if legend==True:
plt.legend() | [
"def",
"plot_gain_offsets",
"(",
"dio_cross",
",",
"dio_chan_per_coarse",
"=",
"8",
",",
"feedtype",
"=",
"'l'",
",",
"ax1",
"=",
"None",
",",
"ax2",
"=",
"None",
",",
"legend",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"#Get ON-OFF ND spectra",
"I... | 35.510204 | 0.040268 |
def save(self, filename=None, deleteid3=False):
"""Save metadata blocks to a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
f = open(filename, 'rb+')
try:
# Ensure we've got padding at the end, and only at the end.
# If adding makes it too large, we'll scale it down later.
self.metadata_blocks.append(Padding(b'\x00' * 1020))
MetadataBlock.group_padding(self.metadata_blocks)
header = self.__check_header(f)
# "fLaC" and maybe ID3
available = self.__find_audio_offset(f) - header
data = MetadataBlock.writeblocks(self.metadata_blocks)
# Delete ID3v2
if deleteid3 and header > 4:
available += header - 4
header = 4
if len(data) > available:
# If we have too much data, see if we can reduce padding.
padding = self.metadata_blocks[-1]
newlength = padding.length - (len(data) - available)
if newlength > 0:
padding.length = newlength
data = MetadataBlock.writeblocks(self.metadata_blocks)
assert len(data) == available
elif len(data) < available:
# If we have too little data, increase padding.
self.metadata_blocks[-1].length += (available - len(data))
data = MetadataBlock.writeblocks(self.metadata_blocks)
assert len(data) == available
if len(data) != available:
# We couldn't reduce the padding enough.
diff = (len(data) - available)
insert_bytes(f, diff, header)
f.seek(header - 4)
f.write(b"fLaC" + data)
# Delete ID3v1
if deleteid3:
try:
f.seek(-128, 2)
except IOError:
pass
else:
if f.read(3) == b"TAG":
f.seek(-128, 2)
f.truncate()
finally:
f.close() | [
"def",
"save",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"deleteid3",
"=",
"False",
")",
":",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"self",
".",
"filename",
"f",
"=",
"open",
"(",
"filename",
",",
"'rb+'",
")",
"try",
":",
"# ... | 35.852459 | 0.00089 |
def all(self):
"""
SQLA like 'all' method, will populate all rows and apply all
filters and orders to it.
"""
items = list()
if not self._filters_cmd:
items = self.store.get(self.query_class)
else:
for item in self.store.get(self.query_class):
tmp_flag = True
for filter_cmd in self._filters_cmd:
if not filter_cmd[0](item, filter_cmd[1], filter_cmd[2]):
tmp_flag = False
break
if tmp_flag:
items.append(item)
if self._order_by_cmd:
items = self._order_by(items, self._order_by_cmd)
total_length = len(items)
if self._limit != 0:
items = items[self._offset: self._offset + self._limit]
return total_length, items | [
"def",
"all",
"(",
"self",
")",
":",
"items",
"=",
"list",
"(",
")",
"if",
"not",
"self",
".",
"_filters_cmd",
":",
"items",
"=",
"self",
".",
"store",
".",
"get",
"(",
"self",
".",
"query_class",
")",
"else",
":",
"for",
"item",
"in",
"self",
".... | 37.826087 | 0.002242 |
def _preprocess_input(test, ref, mask=None):
"""Wrapper to the metric
Parameters
----------
ref : np.ndarray
the reference image
test : np.ndarray
the tested image
mask : np.ndarray, optional
the mask for the ROI
Notes
-----
Compute the metric only on magnetude.
Returns
-------
ssim: float, the snr
"""
test = np.abs(np.copy(test)).astype('float64')
ref = np.abs(np.copy(ref)).astype('float64')
test = min_max_normalize(test)
ref = min_max_normalize(ref)
if (not isinstance(mask, np.ndarray)) and (mask is not None):
raise ValueError("mask should be None, or a np.ndarray,"
" got '{0}' instead.".format(mask))
if mask is None:
return test, ref, None
return test, ref, mask | [
"def",
"_preprocess_input",
"(",
"test",
",",
"ref",
",",
"mask",
"=",
"None",
")",
":",
"test",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"copy",
"(",
"test",
")",
")",
".",
"astype",
"(",
"'float64'",
")",
"ref",
"=",
"np",
".",
"abs",
"(",
"np"... | 22.628571 | 0.001211 |
async def stop(self, force: bool=False) -> None:
'''Cancel the task if it hasn't yet started, or tell it to
gracefully stop running if it has.'''
Log.debug('stopping task %s', self.name)
self.running = False
if force:
self.task.cancel() | [
"async",
"def",
"stop",
"(",
"self",
",",
"force",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"Log",
".",
"debug",
"(",
"'stopping task %s'",
",",
"self",
".",
"name",
")",
"self",
".",
"running",
"=",
"False",
"if",
"force",
":",
"self",
"... | 31.333333 | 0.013793 |
def run_wait(name, location='\\'):
r'''
Run a scheduled task and return when the task finishes
:param str name: The name of the task to run.
:param str location: A string value representing the location of the task.
Default is '\\' which is the root for the task scheduler
(C:\Windows\System32\tasks).
:return: True if successful, False if unsuccessful
:rtype: bool
CLI Example:
.. code-block:: bash
salt 'minion-id' task.list_run_wait <task_name>
'''
# Check for existing folder
if name not in list_tasks(location):
return '{0} not found in {1}'.format(name, location)
# connect to the task scheduler
with salt.utils.winapi.Com():
task_service = win32com.client.Dispatch("Schedule.Service")
task_service.Connect()
# get the folder to delete the folder from
task_folder = task_service.GetFolder(location)
task = task_folder.GetTask(name)
# Is the task already running
if task.State == TASK_STATE_RUNNING:
return 'Task already running'
try:
task.Run('')
time.sleep(1)
running = True
except pythoncom.com_error:
return False
while running:
running = False
try:
running_tasks = task_service.GetRunningTasks(0)
if running_tasks.Count:
for item in running_tasks:
if item.Name == name:
running = True
except pythoncom.com_error:
running = False
return True | [
"def",
"run_wait",
"(",
"name",
",",
"location",
"=",
"'\\\\'",
")",
":",
"# Check for existing folder",
"if",
"name",
"not",
"in",
"list_tasks",
"(",
"location",
")",
":",
"return",
"'{0} not found in {1}'",
".",
"format",
"(",
"name",
",",
"location",
")",
... | 27.272727 | 0.000644 |
def is_disabled(self, name):
"""Check if a given service name is disabled """
if self.services and name in self.services:
return self.services[name]['config'] == 'disabled'
return False | [
"def",
"is_disabled",
"(",
"self",
",",
"name",
")",
":",
"if",
"self",
".",
"services",
"and",
"name",
"in",
"self",
".",
"services",
":",
"return",
"self",
".",
"services",
"[",
"name",
"]",
"[",
"'config'",
"]",
"==",
"'disabled'",
"return",
"False"... | 43.4 | 0.00905 |
def prev_listing(self, limit=None):
"""GETs previous :class:`Listing` directed to by this :class:`Listing`. Returns :class:`Listing` object.
:param limit: max number of entries to get
"""
if self.before:
return self._reddit._limit_get(self._path, eparams={'before': self.before}, limit=limit or self._limit)
else:
raise NoMoreError('no previous items') | [
"def",
"prev_listing",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"if",
"self",
".",
"before",
":",
"return",
"self",
".",
"_reddit",
".",
"_limit_get",
"(",
"self",
".",
"_path",
",",
"eparams",
"=",
"{",
"'before'",
":",
"self",
".",
"before... | 46.444444 | 0.011737 |
def patch_persistent_volume(self, name, body, **kwargs): # noqa: E501
"""patch_persistent_volume # noqa: E501
partially update the specified PersistentVolume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_persistent_volume(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PersistentVolume (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1PersistentVolume
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_persistent_volume_with_http_info(name, body, **kwargs) # noqa: E501
else:
(data) = self.patch_persistent_volume_with_http_info(name, body, **kwargs) # noqa: E501
return data | [
"def",
"patch_persistent_volume",
"(",
"self",
",",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"se... | 57.666667 | 0.001421 |
def create_track(self, href=None, media_url=None, label=None,
audio_channel=None):
"""Add a new track to a bundle. Note that the total number of
allowable tracks is limited. See the API documentation for
details.
'href' the relative href to the tracks list. May not be None.
'media_url' public URL to media file. May not be None.
'label' short name for the track. May be None.
'audio_channel' the channel(s) to use in a stereo file. May be
None. For details see the API documentation.
Returns a data structure equivalent to the JSON returned by the
API.
If the response status is not 2xx, or if the maximum number of
tracks is exceeded, throws an APIException. If the JSON to
python data struct conversion fails, throws an
APIDataException."""
# Argument error checking.
assert href is not None
assert media_url is not None
# Prepare the data we're going to write.
data = None
fields = {}
fields['media_url'] = media_url
if label is not None:
fields['label'] = label
if audio_channel is not None:
fields['audio_channel'] = audio_channel
if len(fields) > 0:
data = fields
raw_result = self.post(href, data)
if raw_result.status < 200 or raw_result.status > 202:
raise APIException(raw_result.status, raw_result.json)
# Convert the JSON to a python data struct.
return self._parse_json(raw_result.json) | [
"def",
"create_track",
"(",
"self",
",",
"href",
"=",
"None",
",",
"media_url",
"=",
"None",
",",
"label",
"=",
"None",
",",
"audio_channel",
"=",
"None",
")",
":",
"# Argument error checking.",
"assert",
"href",
"is",
"not",
"None",
"assert",
"media_url",
... | 34.711111 | 0.001868 |
def _parse(self, str):
""" Parses the text data from an XML element defined by tag.
"""
str = replace_entities(str)
str = strip_tags(str)
str = collapse_spaces(str)
return str | [
"def",
"_parse",
"(",
"self",
",",
"str",
")",
":",
"str",
"=",
"replace_entities",
"(",
"str",
")",
"str",
"=",
"strip_tags",
"(",
"str",
")",
"str",
"=",
"collapse_spaces",
"(",
"str",
")",
"return",
"str"
] | 25.888889 | 0.016598 |
def _get(url, use_kerberos=None, debug=False):
"""Perform a GET query against the CIS
"""
from ligo.org import request
# perform query
try:
response = request(url, debug=debug, use_kerberos=use_kerberos)
except HTTPError:
raise ValueError("Channel not found at URL %s "
"Information System. Please double check the "
"name and try again." % url)
if isinstance(response, bytes):
response = response.decode('utf-8')
return json.loads(response) | [
"def",
"_get",
"(",
"url",
",",
"use_kerberos",
"=",
"None",
",",
"debug",
"=",
"False",
")",
":",
"from",
"ligo",
".",
"org",
"import",
"request",
"# perform query",
"try",
":",
"response",
"=",
"request",
"(",
"url",
",",
"debug",
"=",
"debug",
",",
... | 33.375 | 0.001821 |
def single(window, config):
"""Single theme
===== Header =====
= items =
==================
"""
cordx = round(config.get('cordx', 0))
color = config.get('color', red)
icon = config.get('icon', '=')
align = config.get('align', 'left')
term = window.term
width = round(config.get('width', window.width))
title = config.get('header', ' Menu ')
header = term.center(title, width, icon)
header = header.split(title)
header = color(header[0]) + title + color(header[1])
footer = color(icon * width)
l_eq = term.move_x(cordx) + color(icon)
l_eq += term.move_x(cordx + width - 1) + \
color(icon) + term.move_x(cordx + 2)
if align == 'right':
for_s = functools.partial(term.rjust, width=width - 4) # *
elif align == 'center':
for_s = functools.partial(
term.center, width=width - 4) # -4 width "= text ="
elif align == 'left':
for_s = functools.partial(term.ljust, width=width - 4) # *
else:
raise ValueError("Only align center, left, right")
return {
'header': header,
'footer': footer,
'formater': lambda text, **kwargs: l_eq + for_s(text),
'selector': lambda text, **kwargs: l_eq + underline_ns(for_s(text)),
} | [
"def",
"single",
"(",
"window",
",",
"config",
")",
":",
"cordx",
"=",
"round",
"(",
"config",
".",
"get",
"(",
"'cordx'",
",",
"0",
")",
")",
"color",
"=",
"config",
".",
"get",
"(",
"'color'",
",",
"red",
")",
"icon",
"=",
"config",
".",
"get",... | 30.047619 | 0.000767 |
def get_all_api_keys(self, **kwargs): # noqa: E501
"""Get all API keys # noqa: E501
An endpoint for retrieving API keys in an array, optionally filtered by the owner. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/api-keys -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_all_api_keys(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:param str key__eq: API key filter.
:param str owner__eq: Owner name filter.
:return: ApiKeyInfoRespList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_all_api_keys_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_api_keys_with_http_info(**kwargs) # noqa: E501
return data | [
"def",
"get_all_api_keys",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
".",
"get_all_api_keys_with_h... | 55.576923 | 0.001361 |
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
return scipy.asarray([numpy.random.uniform(low=b[0], high=b[1], size=size) for b in self.bounds]) | [
"def",
"random_draw",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"return",
"scipy",
".",
"asarray",
"(",
"[",
"numpy",
".",
"random",
".",
"uniform",
"(",
"low",
"=",
"b",
"[",
"0",
"]",
",",
"high",
"=",
"b",
"[",
"1",
"]",
",",
"size",
... | 40.9 | 0.009569 |
def add_single_feature_methods(cls):
"""Custom decorator intended for :class:`~vision.helpers.VisionHelpers`.
This metaclass adds a `{feature}` method for every feature
defined on the Feature enum.
"""
# Sanity check: This only makes sense if we are building the GAPIC
# subclass and have enums already attached.
if not hasattr(cls, "enums"):
return cls
# Add each single-feature method to the class.
for feature in cls.enums.Feature.Type:
# Sanity check: Do not make a method for the falsy feature.
if feature.name == "TYPE_UNSPECIFIED":
continue
# Assign the appropriate metadata to the function.
detect = _create_single_feature_method(feature)
# Assign a qualified name to the function, and perform module
# replacement on the docstring.
detect.__qualname__ = "{cls}.{name}".format(
cls=cls.__name__, name=detect.__name__
)
detect.__doc__ = detect.__doc__.format(module=cls.__module__)
# Place the function on the class being created.
setattr(cls, detect.__name__, detect)
# Done; return the class.
return cls | [
"def",
"add_single_feature_methods",
"(",
"cls",
")",
":",
"# Sanity check: This only makes sense if we are building the GAPIC",
"# subclass and have enums already attached.",
"if",
"not",
"hasattr",
"(",
"cls",
",",
"\"enums\"",
")",
":",
"return",
"cls",
"# Add each single-fe... | 35.96875 | 0.000846 |
def target(self):
"""
Returns the label associated with each item in data.
"""
return [
os.path.basename(os.path.dirname(f)) for f in self.files
] | [
"def",
"target",
"(",
"self",
")",
":",
"return",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"f",
")",
")",
"for",
"f",
"in",
"self",
".",
"files",
"]"
] | 27.428571 | 0.010101 |
def write_configs(self):
"""Generate the configurations needed for pipes."""
utils.banner("Generating Configs")
if not self.runway_dir:
app_configs = configs.process_git_configs(git_short=self.git_short)
else:
app_configs = configs.process_runway_configs(runway_dir=self.runway_dir)
self.configs = configs.write_variables(
app_configs=app_configs, out_file=self.raw_path, git_short=self.git_short) | [
"def",
"write_configs",
"(",
"self",
")",
":",
"utils",
".",
"banner",
"(",
"\"Generating Configs\"",
")",
"if",
"not",
"self",
".",
"runway_dir",
":",
"app_configs",
"=",
"configs",
".",
"process_git_configs",
"(",
"git_short",
"=",
"self",
".",
"git_short",
... | 46.5 | 0.008439 |
def convert_to_oqhazardlib(
self, tom, simple_mesh_spacing=1.0,
complex_mesh_spacing=2.0, area_discretisation=10.0,
use_defaults=False):
"""
Converts the source model to an iterator of sources of :class:
openquake.hazardlib.source.base.BaseSeismicSource
"""
oq_source_model = []
for source in self.sources:
if isinstance(source, mtkAreaSource):
oq_source_model.append(source.create_oqhazardlib_source(
tom,
simple_mesh_spacing,
area_discretisation,
use_defaults))
elif isinstance(source, mtkPointSource):
oq_source_model.append(source.create_oqhazardlib_source(
tom,
simple_mesh_spacing,
use_defaults))
elif isinstance(source, mtkSimpleFaultSource):
oq_source_model.append(source.create_oqhazardlib_source(
tom,
simple_mesh_spacing,
use_defaults))
elif isinstance(source, mtkComplexFaultSource):
oq_source_model.append(source.create_oqhazardlib_source(
tom,
complex_mesh_spacing,
use_defaults))
else:
raise ValueError('Source type not recognised!')
return oq_source_model | [
"def",
"convert_to_oqhazardlib",
"(",
"self",
",",
"tom",
",",
"simple_mesh_spacing",
"=",
"1.0",
",",
"complex_mesh_spacing",
"=",
"2.0",
",",
"area_discretisation",
"=",
"10.0",
",",
"use_defaults",
"=",
"False",
")",
":",
"oq_source_model",
"=",
"[",
"]",
"... | 42.058824 | 0.001367 |
def set_args(args):
"""Set computed command arguments.
Parameters
----------
args : `argparse.Namespace`
Command arguments.
base : `iterable` of `type`
Generator mixins.
Raises
------
ValueError
If output file is stdout and progress bars are enabled.
"""
try:
if args.output is sys.stdout and args.progress:
raise ValueError('args.output is stdout and args.progress')
except AttributeError:
pass
try:
fname = '.' + args.type
except AttributeError:
try:
fname = args.state
except AttributeError:
try:
fname = args.output
except AttributeError:
fname = '.json'
if fname is None or fname.endswith('.json') or fname.endswith('.json.bz2'):
args.type = JSON
else:
args.type = SQLITE
settings = {}
try:
if args.settings is not None:
settings = json.load(args.settings)
args.settings.close()
except AttributeError:
pass
args.settings = settings | [
"def",
"set_args",
"(",
"args",
")",
":",
"try",
":",
"if",
"args",
".",
"output",
"is",
"sys",
".",
"stdout",
"and",
"args",
".",
"progress",
":",
"raise",
"ValueError",
"(",
"'args.output is stdout and args.progress'",
")",
"except",
"AttributeError",
":",
... | 23.844444 | 0.000895 |
def get_host(self, retry_count=3):
"""
The function for retrieving host information for an IP address.
Args:
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
Returns:
namedtuple:
:hostname (str): The hostname returned mapped to the given IP
address.
:aliaslist (list): Alternate names for the given IP address.
:ipaddrlist (list): IPv4/v6 addresses mapped to the same hostname.
Raises:
HostLookupError: The host lookup failed.
"""
try:
default_timeout_set = False
if not socket.getdefaulttimeout():
socket.setdefaulttimeout(self.timeout)
default_timeout_set = True
log.debug('Host query for {0}'.format(self.address_str))
ret = socket.gethostbyaddr(self.address_str)
if default_timeout_set: # pragma: no cover
socket.setdefaulttimeout(None)
results = namedtuple('get_host_results', 'hostname, aliaslist, '
'ipaddrlist')
return results(ret)
except (socket.timeout, socket.error) as e:
log.debug('Host query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('Host query retrying (count: {0})'.format(
str(retry_count)))
return self.get_host(retry_count - 1)
else:
raise HostLookupError(
'Host lookup failed for {0}.'.format(self.address_str)
)
except: # pragma: no cover
raise HostLookupError(
'Host lookup failed for {0}.'.format(self.address_str)
) | [
"def",
"get_host",
"(",
"self",
",",
"retry_count",
"=",
"3",
")",
":",
"try",
":",
"default_timeout_set",
"=",
"False",
"if",
"not",
"socket",
".",
"getdefaulttimeout",
"(",
")",
":",
"socket",
".",
"setdefaulttimeout",
"(",
"self",
".",
"timeout",
")",
... | 30.672131 | 0.001554 |
async def post(self, path, data={}, send_raw=False, **params):
'''sends post request
Parameters
----------
path : str
same as get_url
query : kargs dict
additional info to pass to get_url
See Also
--------
get_url :
Returns
-------
requests.models.Response
the response that was given
'''
url = self.get_url(path, **params)
jstr = json.dumps(data)
for i in range(self.tries+1):
try:
if send_raw:
resp = await self.session.post(url, data=data, timeout=self.timeout)
else:
resp = await self.session.post(url, data=jstr, timeout=self.timeout)
if await self._process_resp(resp):
return resp
else:
continue
except aiohttp.ClientConnectionError:
if i >= self.tries:
raise aiohttp.ClientConnectionError(
'Emby server is probably down'
) | [
"async",
"def",
"post",
"(",
"self",
",",
"path",
",",
"data",
"=",
"{",
"}",
",",
"send_raw",
"=",
"False",
",",
"*",
"*",
"params",
")",
":",
"url",
"=",
"self",
".",
"get_url",
"(",
"path",
",",
"*",
"*",
"params",
")",
"jstr",
"=",
"json",
... | 24.702703 | 0.009474 |
def _is_download(ending):
"""Check if file ending is considered as download."""
list = [
'PDF',
'DOC',
'TXT',
'PPT',
'XLSX',
'MP3',
'SVG',
'7Z',
'HTML',
'TEX',
'MPP',
'ODT',
'RAR',
'ZIP',
'TAR',
'EPUB',
]
list_regex = [
'PDF'
]
if ending in list:
return True
for file_type in list_regex:
if re.search(re.escape(file_type), ending):
return True
return False | [
"def",
"_is_download",
"(",
"ending",
")",
":",
"list",
"=",
"[",
"'PDF'",
",",
"'DOC'",
",",
"'TXT'",
",",
"'PPT'",
",",
"'XLSX'",
",",
"'MP3'",
",",
"'SVG'",
",",
"'7Z'",
",",
"'HTML'",
",",
"'TEX'",
",",
"'MPP'",
",",
"'ODT'",
",",
"'RAR'",
",",... | 21.310345 | 0.001548 |
def visit_default(self, node):
"""check the node line number and check it if not yet done"""
if not node.is_statement:
return
if not node.root().pure_python:
return # XXX block visit of child nodes
prev_sibl = node.previous_sibling()
if prev_sibl is not None:
prev_line = prev_sibl.fromlineno
else:
# The line on which a finally: occurs in a try/finally
# is not directly represented in the AST. We infer it
# by taking the last line of the body and adding 1, which
# should be the line of finally:
if (
isinstance(node.parent, nodes.TryFinally)
and node in node.parent.finalbody
):
prev_line = node.parent.body[0].tolineno + 1
else:
prev_line = node.parent.statement().fromlineno
line = node.fromlineno
assert line, node
if prev_line == line and self._visited_lines.get(line) != 2:
self._check_multi_statement_line(node, line)
return
if line in self._visited_lines:
return
try:
tolineno = node.blockstart_tolineno
except AttributeError:
tolineno = node.tolineno
assert tolineno, node
lines = []
for line in range(line, tolineno + 1):
self._visited_lines[line] = 1
try:
lines.append(self._lines[line].rstrip())
except KeyError:
lines.append("") | [
"def",
"visit_default",
"(",
"self",
",",
"node",
")",
":",
"if",
"not",
"node",
".",
"is_statement",
":",
"return",
"if",
"not",
"node",
".",
"root",
"(",
")",
".",
"pure_python",
":",
"return",
"# XXX block visit of child nodes",
"prev_sibl",
"=",
"node",
... | 38.45 | 0.001268 |
def translate(self, hWndFrom = HWND_DESKTOP, hWndTo = HWND_DESKTOP):
"""
Translate coordinates from one window to another.
@see: L{client_to_screen}, L{screen_to_client}
@type hWndFrom: int or L{HWND} or L{system.Window}
@param hWndFrom: Window handle to translate from.
Use C{HWND_DESKTOP} for screen coordinates.
@type hWndTo: int or L{HWND} or L{system.Window}
@param hWndTo: Window handle to translate to.
Use C{HWND_DESKTOP} for screen coordinates.
@rtype: L{Rect}
@return: New object containing the translated coordinates.
"""
points = [ (self.left, self.top), (self.right, self.bottom) ]
return MapWindowPoints(hWndFrom, hWndTo, points) | [
"def",
"translate",
"(",
"self",
",",
"hWndFrom",
"=",
"HWND_DESKTOP",
",",
"hWndTo",
"=",
"HWND_DESKTOP",
")",
":",
"points",
"=",
"[",
"(",
"self",
".",
"left",
",",
"self",
".",
"top",
")",
",",
"(",
"self",
".",
"right",
",",
"self",
".",
"bott... | 39.578947 | 0.01039 |
def to_dict_hook(obj):
"""Convert internal objects to a serializable representation.
During serialization if the object has the hook method `to_dict` it will be
automatically called and metadata for decoding will be added. This allows
for the translation of objects trees of arbitrary depth. E.g.:
>>> class Root:
>>> def __init__(self, left, right):
>>> self.left = left
>>> self.right = right
>>> def to_dict(self):
>>> return {
>>> 'left': left,
>>> 'right': right,
>>> }
>>> class Node:
>>> def to_dict(self):
>>> return {'value': 'node'}
>>> root = Root(left=None(), right=None())
>>> json.dumps(root, default=to_dict_hook)
'{
"_type": "Root",
"left": {"_type": "Node", "value": "node"},
"right": {"_type": "Node", "value": "node"}
}'
"""
if hasattr(obj, 'to_dict'):
result = obj.to_dict()
assert isinstance(result, dict), 'to_dict must return a dictionary'
result['_type'] = f'{obj.__module__}.{obj.__class__.__name__}'
result['_version'] = 0
return result
raise TypeError(
f'Object of type {obj.__class__.__name__} is not JSON serializable',
) | [
"def",
"to_dict_hook",
"(",
"obj",
")",
":",
"if",
"hasattr",
"(",
"obj",
",",
"'to_dict'",
")",
":",
"result",
"=",
"obj",
".",
"to_dict",
"(",
")",
"assert",
"isinstance",
"(",
"result",
",",
"dict",
")",
",",
"'to_dict must return a dictionary'",
"resul... | 33.972973 | 0.000773 |
def _music_lib_search(self, search, start, max_items):
"""Perform a music library search and extract search numbers.
You can get an overview of all the relevant search prefixes (like
'A:') and their meaning with the request:
.. code ::
response = device.contentDirectory.Browse([
('ObjectID', '0'),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', 0),
('RequestedCount', 100),
('SortCriteria', '')
])
Args:
search (str): The ID to search.
start (int): The index of the forst item to return.
max_items (int): The maximum number of items to return.
Returns:
tuple: (response, metadata) where response is the returned metadata
and metadata is a dict with the 'number_returned',
'total_matches' and 'update_id' integers
"""
response = self.contentDirectory.Browse([
('ObjectID', search),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
# Get result information
metadata = {}
for tag in ['NumberReturned', 'TotalMatches', 'UpdateID']:
metadata[camel_to_underscore(tag)] = int(response[tag])
return response, metadata | [
"def",
"_music_lib_search",
"(",
"self",
",",
"search",
",",
"start",
",",
"max_items",
")",
":",
"response",
"=",
"self",
".",
"contentDirectory",
".",
"Browse",
"(",
"[",
"(",
"'ObjectID'",
",",
"search",
")",
",",
"(",
"'BrowseFlag'",
",",
"'BrowseDirec... | 35.536585 | 0.001336 |
async def call_command(bot: NoneBot, ctx: Context_T,
name: Union[str, CommandName_T], *,
current_arg: str = '',
args: Optional[CommandArgs_T] = None,
check_perm: bool = True,
disable_interaction: bool = False) -> bool:
"""
Call a command internally.
This function is typically called by some other commands
or "handle_natural_language" when handling NLPResult object.
Note: If disable_interaction is not True, after calling this function,
any previous command session will be overridden, even if the command
being called here does not need further interaction (a.k.a asking
the user for more info).
:param bot: NoneBot instance
:param ctx: message context
:param name: command name
:param current_arg: command current argument string
:param args: command args
:param check_perm: should check permission before running command
:param disable_interaction: disable the command's further interaction
:return: the command is successfully called
"""
cmd = _find_command(name)
if not cmd:
return False
session = CommandSession(bot, ctx, cmd, current_arg=current_arg, args=args)
return await _real_run_command(session, context_id(session.ctx),
check_perm=check_perm,
disable_interaction=disable_interaction) | [
"async",
"def",
"call_command",
"(",
"bot",
":",
"NoneBot",
",",
"ctx",
":",
"Context_T",
",",
"name",
":",
"Union",
"[",
"str",
",",
"CommandName_T",
"]",
",",
"*",
",",
"current_arg",
":",
"str",
"=",
"''",
",",
"args",
":",
"Optional",
"[",
"Comma... | 43.848485 | 0.000676 |
def update_now_playing(
self,
artist,
title,
album=None,
album_artist=None,
duration=None,
track_number=None,
mbid=None,
context=None,
):
"""
Used to notify Last.fm that a user has started listening to a track.
Parameters:
artist (Required) : The artist name
title (Required) : The track title
album (Optional) : The album name.
album_artist (Optional) : The album artist - if this differs
from the track artist.
duration (Optional) : The length of the track in seconds.
track_number (Optional) : The track number of the track on the
album.
mbid (Optional) : The MusicBrainz Track ID.
context (Optional) : Sub-client version
(not public, only enabled for certain API keys)
"""
params = {"track": title, "artist": artist}
if album:
params["album"] = album
if album_artist:
params["albumArtist"] = album_artist
if context:
params["context"] = context
if track_number:
params["trackNumber"] = track_number
if mbid:
params["mbid"] = mbid
if duration:
params["duration"] = duration
_Request(self, "track.updateNowPlaying", params).execute() | [
"def",
"update_now_playing",
"(",
"self",
",",
"artist",
",",
"title",
",",
"album",
"=",
"None",
",",
"album_artist",
"=",
"None",
",",
"duration",
"=",
"None",
",",
"track_number",
"=",
"None",
",",
"mbid",
"=",
"None",
",",
"context",
"=",
"None",
"... | 32.545455 | 0.002034 |
def handle_message(self, response, ignore_subscribe_messages=False):
"""
Parses a pub/sub message. If the channel or pattern was subscribed to
with a message handler, the handler is invoked instead of a parsed
message being returned.
"""
message_type = nativestr(response[0])
if message_type == 'pmessage':
message = {
'type': message_type,
'pattern': response[1],
'channel': response[2],
'data': response[3]
}
else:
message = {
'type': message_type,
'pattern': None,
'channel': response[1],
'data': response[2]
}
# if this is an unsubscribe message, remove it from memory
if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
subscribed_dict = None
if message_type == 'punsubscribe':
subscribed_dict = self.patterns
else:
subscribed_dict = self.channels
try:
del subscribed_dict[message['channel']]
except KeyError:
pass
if message_type in self.PUBLISH_MESSAGE_TYPES:
# if there's a message handler, invoke it
handler = None
if message_type == 'pmessage':
handler = self.patterns.get(message['pattern'], None)
else:
handler = self.channels.get(message['channel'], None)
if handler:
handler(message)
return None
else:
# this is a subscribe/unsubscribe message. ignore if we don't
# want them
if ignore_subscribe_messages or self.ignore_subscribe_messages:
return None
return message | [
"def",
"handle_message",
"(",
"self",
",",
"response",
",",
"ignore_subscribe_messages",
"=",
"False",
")",
":",
"message_type",
"=",
"nativestr",
"(",
"response",
"[",
"0",
"]",
")",
"if",
"message_type",
"==",
"'pmessage'",
":",
"message",
"=",
"{",
"'type... | 35.529412 | 0.001074 |
def decode_intervals(self, encoded, duration=None, multi=True, sparse=False,
transition=None, p_state=None, p_init=None):
'''Decode labeled intervals into (start, end, value) triples
Parameters
----------
encoded : np.ndarray, shape=(n_frames, m)
Frame-level annotation encodings as produced by
``encode_intervals``
duration : None or float > 0
The max duration of the annotation (in seconds)
Must be greater than the length of encoded array.
multi : bool
If true, allow multiple labels per input frame.
If false, take the most likely label per input frame.
sparse : bool
If true, values are returned as indices, not one-hot.
If false, values are returned as one-hot encodings.
Only applies when `multi=False`.
transition : None or np.ndarray [shape=(m, m) or (2, 2) or (m, 2, 2)]
Optional transition matrix for each interval, used for Viterbi
decoding. If `multi=True`, then transition should be `(2, 2)` or
`(m, 2, 2)`-shaped. If `multi=False`, then transition should be
`(m, m)`-shaped.
p_state : None or np.ndarray [shape=(m,)]
Optional marginal probability for each label.
p_init : None or np.ndarray [shape=(m,)]
Optional marginal probability for each label.
Returns
-------
[(start, end, value)] : iterable of tuples
where `start` and `end` are the interval boundaries (in seconds)
and `value` is an np.ndarray, shape=(m,) of the encoded value
for this interval.
'''
if np.isrealobj(encoded):
if multi:
if transition is None:
encoded = encoded >= 0.5
else:
encoded = viterbi_binary(encoded.T, transition,
p_init=p_init, p_state=p_state).T
elif sparse and encoded.shape[1] > 1:
# map to argmax if it's densely encoded (logits)
if transition is None:
encoded = np.argmax(encoded, axis=1)[:, np.newaxis]
else:
encoded = viterbi_discriminative(encoded.T, transition,
p_init=p_init,
p_state=p_state)[:, np.newaxis]
elif not sparse:
# if dense and multi, map to one-hot encoding
if transition is None:
encoded = (encoded == np.max(encoded, axis=1, keepdims=True))
else:
encoded_ = viterbi_discriminative(encoded.T, transition,
p_init=p_init,
p_state=p_state)
# Map to one-hot encoding
encoded = np.zeros(encoded.shape, dtype=bool)
encoded[np.arange(len(encoded_)), encoded_] = True
if duration is None:
# 1+ is fair here, because encode_intervals already pads
duration = 1 + encoded.shape[0]
else:
duration = 1 + time_to_frames(duration,
sr=self.sr,
hop_length=self.hop_length)
# [0, duration] inclusive
times = times_like(duration + 1,
sr=self.sr, hop_length=self.hop_length)
# Find the change-points of the rows
if sparse:
idx = np.where(encoded[1:] != encoded[:-1])[0]
else:
idx = np.where(np.max(encoded[1:] != encoded[:-1], axis=-1))[0]
idx = np.unique(np.append(idx, encoded.shape[0]))
delta = np.diff(np.append(-1, idx))
# Starting positions can be integrated from changes
position = np.cumsum(np.append(0, delta))
return [(times[p], times[p + d], encoded[p])
for (p, d) in zip(position, delta)] | [
"def",
"decode_intervals",
"(",
"self",
",",
"encoded",
",",
"duration",
"=",
"None",
",",
"multi",
"=",
"True",
",",
"sparse",
"=",
"False",
",",
"transition",
"=",
"None",
",",
"p_state",
"=",
"None",
",",
"p_init",
"=",
"None",
")",
":",
"if",
"np... | 42.416667 | 0.0012 |
def init_package(path=None, name='manage'):
"""Initialize (import) the submodules, and recursively the
subpackages, of a "manage" package at ``path``.
``path`` may be specified as either a system directory path or a
list of these.
If ``path`` is unspecified, it is inferred from the already-imported
"manage" top-level module.
"""
if path is None:
manager = sys.modules[name]
init_package(manager.__path__, name)
return
if isinstance(path, str):
init_package([path], name)
return
for module_info in pkgutil.walk_packages(path, f'{name}.'):
if not module_info.ispkg:
importlib.import_module(module_info.name) | [
"def",
"init_package",
"(",
"path",
"=",
"None",
",",
"name",
"=",
"'manage'",
")",
":",
"if",
"path",
"is",
"None",
":",
"manager",
"=",
"sys",
".",
"modules",
"[",
"name",
"]",
"init_package",
"(",
"manager",
".",
"__path__",
",",
"name",
")",
"ret... | 30 | 0.001404 |
def to_volume(self):
"""Return a 3D volume of the data"""
if hasattr(self.header.definitions, "Lattice"):
X, Y, Z = self.header.definitions.Lattice
else:
raise ValueError("Unable to determine data size")
volume = self.decoded_data.reshape(Z, Y, X)
return volume | [
"def",
"to_volume",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"header",
".",
"definitions",
",",
"\"Lattice\"",
")",
":",
"X",
",",
"Y",
",",
"Z",
"=",
"self",
".",
"header",
".",
"definitions",
".",
"Lattice",
"else",
":",
"raise",
... | 36.222222 | 0.008982 |
def read_table(data, fields):
"""Read a table structure.
These are used by Blizzard to collect pieces of data together. Each
value is prefixed by two bytes, first denoting (doubled) index and the
second denoting some sort of key -- so far it has always been '09'. The
actual value follows as a Variable-Length Quantity, also known as uintvar.
The actual value is also doubled.
In some tables the keys might jump from 0A 09 to 04 09 for example.
I have no idea why this happens, as the next logical key is 0C. Perhaps
it's a table in a table? Some sort of headers might exist for these
tables, I'd imagine at least denoting length. Further research required.
"""
def read_field(field_name):
data.read(2)
table[field_name] = vlq2int(data) / 2
# Discard unknown fields.
if field_name == 'unknown':
del table[field_name]
table = {}
for field in fields:
read_field(field)
return table | [
"def",
"read_table",
"(",
"data",
",",
"fields",
")",
":",
"def",
"read_field",
"(",
"field_name",
")",
":",
"data",
".",
"read",
"(",
"2",
")",
"table",
"[",
"field_name",
"]",
"=",
"vlq2int",
"(",
"data",
")",
"/",
"2",
"# Discard unknown fields.",
"... | 38.8 | 0.001006 |
def get_cookies_from_cache(username):
"""
Returns a RequestsCookieJar containing the cached cookies for the given
user.
"""
logging.debug('Trying to get cookies from the cache.')
path = get_cookies_cache_path(username)
cj = requests.cookies.RequestsCookieJar()
try:
cached_cj = get_cookie_jar(path)
for cookie in cached_cj:
cj.set_cookie(cookie)
logging.debug(
'Loaded cookies from %s', get_cookies_cache_path(username))
except IOError:
logging.debug('Could not load cookies from the cache.')
return cj | [
"def",
"get_cookies_from_cache",
"(",
"username",
")",
":",
"logging",
".",
"debug",
"(",
"'Trying to get cookies from the cache.'",
")",
"path",
"=",
"get_cookies_cache_path",
"(",
"username",
")",
"cj",
"=",
"requests",
".",
"cookies",
".",
"RequestsCookieJar",
"(... | 29.1 | 0.001664 |
def process(self):
"""
populate the report from the xml
:return:
"""
suites = None
if isinstance(self.tree, ET.Element):
root = self.tree
else:
root = self.tree.getroot()
if root.tag == "testrun":
root = root[0]
if root.tag == "testsuite":
suites = [root]
if root.tag == "testsuites":
suites = [x for x in root]
assert suites, "could not find test suites in results xml"
for suite in suites:
cursuite = Suite()
self.suites.append(cursuite)
cursuite.name = suite.attrib["name"]
if "package" in suite.attrib:
cursuite.package = suite.attrib["package"]
cursuite.duration = float(suite.attrib.get("time", '0').replace(',',''))
for element in suite:
if element.tag == "error":
# top level error?
errtag = {
"message": element.attrib.get("message", ""),
"type": element.attrib.get("type", ""),
"text": element.text
}
cursuite.errors.append(errtag)
if element.tag == "system-out":
cursuite.stdout = element.text
if element.tag == "system-err":
cursuite.stderr = element.text
if element.tag == "properties":
for prop in element:
if prop.tag == "property":
cursuite.properties[prop.attrib["name"]] = prop.attrib["value"]
if element.tag == "testcase":
testcase = element
if not testcase.attrib.get("classname", None):
testcase.attrib["classname"] = NO_CLASSNAME
if testcase.attrib["classname"] not in cursuite:
testclass = Class()
testclass.name = testcase.attrib["classname"]
cursuite[testclass.name] = testclass
testclass = cursuite[testcase.attrib["classname"]]
newcase = Case()
newcase.name = testcase.attrib["name"]
newcase.testclass = testclass
newcase.duration = float(testcase.attrib.get("time", '0').replace(',',''))
testclass.cases.append(newcase)
# does this test case have any children?
for child in testcase:
if child.tag == "skipped":
newcase.skipped = child.text
if "message" in child.attrib:
newcase.skipped_msg = child.attrib["message"]
elif child.tag == "system-out":
newcase.stdout = child.text
elif child.tag == "system-err":
newcase.stderr = child.text
elif child.tag == "failure":
newcase.failure = child.text
if "message" in child.attrib:
newcase.failure_msg = child.attrib["message"]
elif child.tag == "error":
newcase.failure = child.text
if "message" in child.attrib:
newcase.failure_msg = child.attrib["message"]
elif child.tag == "properties":
for property in child:
newproperty = Property()
newproperty.name = property.attrib["name"]
newproperty.value = property.attrib["value"]
newcase.properties.append(newproperty) | [
"def",
"process",
"(",
"self",
")",
":",
"suites",
"=",
"None",
"if",
"isinstance",
"(",
"self",
".",
"tree",
",",
"ET",
".",
"Element",
")",
":",
"root",
"=",
"self",
".",
"tree",
"else",
":",
"root",
"=",
"self",
".",
"tree",
".",
"getroot",
"(... | 42.659341 | 0.001762 |
def message(msg, *args):
'''Program message output.'''
clear_progress()
text = (msg % args)
sys.stdout.write(text + '\n') | [
"def",
"message",
"(",
"msg",
",",
"*",
"args",
")",
":",
"clear_progress",
"(",
")",
"text",
"=",
"(",
"msg",
"%",
"args",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"text",
"+",
"'\\n'",
")"
] | 25 | 0.03876 |
def scan_dir_for_template_files(search_dir):
"""
Return a map of "likely service/template name" to "template file".
This includes all the template files in fixtures and in services.
"""
template_files = {}
cf_dir = os.path.join(search_dir, 'cloudformation')
for type in os.listdir(cf_dir):
template_dir = os.path.join(cf_dir, type, 'templates')
for x in os.listdir(template_dir):
name = os.path.splitext(x)[0]
template_files[name] = os.path.join(template_dir, x)
return template_files | [
"def",
"scan_dir_for_template_files",
"(",
"search_dir",
")",
":",
"template_files",
"=",
"{",
"}",
"cf_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"search_dir",
",",
"'cloudformation'",
")",
"for",
"type",
"in",
"os",
".",
"listdir",
"(",
"cf_dir",
")... | 41.846154 | 0.001799 |
def update_qsd(url, qsd=None, remove=None):
"""
Update or remove keys from a query string in a URL
:param url: URL to update
:param qsd: dict of keys to update, a None value leaves it unchanged
:param remove: list of keys to remove, or "*" to remove all
note: updated keys are never removed, even if unchanged
:return: updated URL
"""
qsd = qsd or {}
remove = remove or []
# parse current query string
parsed = urlparse(url)
current_qsd = OrderedDict(parse_qsl(parsed.query))
# * removes all possible keys
if remove == "*":
remove = list(current_qsd.keys())
# remove keys before updating, but leave updated keys untouched
for key in remove:
if key not in qsd:
del current_qsd[key]
# and update the query string
for key, value in qsd.items():
if value:
current_qsd[key] = value
return parsed._replace(query=urlencode(current_qsd)).geturl() | [
"def",
"update_qsd",
"(",
"url",
",",
"qsd",
"=",
"None",
",",
"remove",
"=",
"None",
")",
":",
"qsd",
"=",
"qsd",
"or",
"{",
"}",
"remove",
"=",
"remove",
"or",
"[",
"]",
"# parse current query string",
"parsed",
"=",
"urlparse",
"(",
"url",
")",
"c... | 29.875 | 0.001013 |
def reset_option(self, key, subkey):
"""Resets a single option to the default values.
:param str key: First identifier of the option.
:param str subkey: Second identifier of the option.
:raise:
:NotRegisteredError: If ``key`` or ``subkey`` do not define any
option.
:ValueError: If the targeted obtion is locked.
"""
if not self.open:
return
key, subkey = _lower_keys(key, subkey)
_entry_must_exist(self.gc, key, subkey)
df = self.gc[(self.gc["k1"] == key) & (self.gc["k2"] == subkey)]
if df["locked"].values[0]:
raise ValueError("{0}.{1} option is locked".format(key, subkey))
val = df["default"].values[0]
self.gc.loc[
(self.gc["k1"] == key) &
(self.gc["k2"] == subkey), "value"] = val | [
"def",
"reset_option",
"(",
"self",
",",
"key",
",",
"subkey",
")",
":",
"if",
"not",
"self",
".",
"open",
":",
"return",
"key",
",",
"subkey",
"=",
"_lower_keys",
"(",
"key",
",",
"subkey",
")",
"_entry_must_exist",
"(",
"self",
".",
"gc",
",",
"key... | 35.5 | 0.002286 |
def get_string_from_data(self, offset, data):
"""Get an ASCII string from within the data."""
# OC Patch
b = None
try:
b = data[offset]
except IndexError:
return ''
s = ''
while ord(b):
s += b
offset += 1
try:
b = data[offset]
except IndexError:
break
return s | [
"def",
"get_string_from_data",
"(",
"self",
",",
"offset",
",",
"data",
")",
":",
"# OC Patch",
"b",
"=",
"None",
"try",
":",
"b",
"=",
"data",
"[",
"offset",
"]",
"except",
"IndexError",
":",
"return",
"''",
"s",
"=",
"''",
"while",
"ord",
"(",
"b",... | 21.142857 | 0.012931 |
def simple_nearest_indices(xs,res):
'''
Simple nearest interpolator that interpolates based on
the minima and maxima of points based on the passed
resolution in res.
Parameters:
-----------
xs -- A collection of `ndim` arrays of points.
res -- List of resolutions.
'''
maxs = [max(a) for a in xs]
mins = [min(a) for a in xs]
XS = [np.linspace(mn, mx, r) for mn,mx,r in zip(mins,maxs,res)];
XS = tuple(np.meshgrid(*XS,indexing='ij'));
if type(xs) != tuple:
xs = tuple(xs);
return nearest_indices(xs,XS); | [
"def",
"simple_nearest_indices",
"(",
"xs",
",",
"res",
")",
":",
"maxs",
"=",
"[",
"max",
"(",
"a",
")",
"for",
"a",
"in",
"xs",
"]",
"mins",
"=",
"[",
"min",
"(",
"a",
")",
"for",
"a",
"in",
"xs",
"]",
"XS",
"=",
"[",
"np",
".",
"linspace",... | 29.210526 | 0.020942 |
def stuff(packet):
"""
Add byte stuffing to TSIP packet.
:param packet: TSIP packet with byte stuffing. The packet must already
have been stripped or `ValueError` will be raised.
:type packet: Binary string.
:return: Packet with byte stuffing.
"""
if is_framed(packet):
raise ValueError('packet contains leading DLE and trailing DLE/ETX')
else:
return packet.replace(CHR_DLE, CHR_DLE + CHR_DLE) | [
"def",
"stuff",
"(",
"packet",
")",
":",
"if",
"is_framed",
"(",
"packet",
")",
":",
"raise",
"ValueError",
"(",
"'packet contains leading DLE and trailing DLE/ETX'",
")",
"else",
":",
"return",
"packet",
".",
"replace",
"(",
"CHR_DLE",
",",
"CHR_DLE",
"+",
"C... | 31.357143 | 0.002212 |
def help(self, event, command_name=None):
"""
Shows the help message for the bot. Takes an optional command name
which when given, will show help for that command.
"""
if command_name is None:
return ("Type !commands for a list of all commands. Type "
"!help [command] to see help for a specific command.")
try:
command = self.commands_dict()[command_name]
except KeyError:
return "%s is not a command" % command_name
argspec = getargspec(command)
args = argspec.args[2:]
defaults = argspec.defaults or []
for i in range(-1, -len(defaults) - 1, -1):
args[i] = "%s [default: %s]" % (args[i], defaults[i])
args = ", ".join(args)
help = getdoc(command).replace("\n", " ")
return "help for %s: (args: %s) %s" % (command_name, args, help) | [
"def",
"help",
"(",
"self",
",",
"event",
",",
"command_name",
"=",
"None",
")",
":",
"if",
"command_name",
"is",
"None",
":",
"return",
"(",
"\"Type !commands for a list of all commands. Type \"",
"\"!help [command] to see help for a specific command.\"",
")",
"try",
"... | 42.571429 | 0.002188 |
def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a text string
:arg obj: An object to make sure is a text string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a byte string to
a text string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the byte string is not
decodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. We support three additional error strategies
specifically aimed at helping people to port code:
:surrogate_or_strict: Will use surrogateescape if it is a valid
handler, otherwise it will use strict
:surrogate_or_replace: Will use surrogateescape if it is a valid
handler, otherwise it will use replace.
:surrogate_then_replace: Does the same as surrogate_or_replace but
`was added for symmetry with the error handlers in
:func:`ansible.module_utils._text.to_bytes` (Added in Ansible 2.3)
Because surrogateescape was added in Python3 this usually means that
Python3 will use `surrogateescape` and Python2 will use the fallback
error handler. Note that the code checks for surrogateescape when the
module is imported. If you have a backport of `surrogateescape` for
python2, be sure to register the error handler prior to importing this
module.
The default until Ansible-2.2 was `surrogate_or_replace`
In Ansible-2.3 this defaults to `surrogate_then_replace` for symmetry
with :func:`ansible.module_utils._text.to_bytes` .
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the text version of that string.
:empty: Return an empty text string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a text string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a byte string.
From Ansible-2.3 onwards, the default is `surrogate_then_replace`.
.. version_changed:: 2.3
Added the surrogate_then_replace error handler and made it the default error handler.
"""
if isinstance(obj, text_type):
return obj
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, binary_type):
# Note: We don't need special handling for surrogate_then_replace
# because all bytes will either be made into surrogates or are valid
# to decode.
return obj.decode(encoding, errors)
# Note: We do these last even though we have to call to_text again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return u''
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
return u''
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_text\'s nonstring parameter' % nonstring)
return to_text(value, encoding, errors) | [
"def",
"to_text",
"(",
"obj",
",",
"encoding",
"=",
"'utf-8'",
",",
"errors",
"=",
"None",
",",
"nonstring",
"=",
"'simplerepr'",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"text_type",
")",
":",
"return",
"obj",
"if",
"errors",
"in",
"_COMPOSED_ERRO... | 44.494382 | 0.001235 |
def related_obj_to_dict(obj, **kwargs):
""" Covert a known related object to a dictionary. """
# Explicitly discard formatter kwarg, should not be cascaded down.
kwargs.pop('formatter', None)
# If True, remove fields that start with an underscore (e.g. _secret)
suppress_private_attr = kwargs.get("suppress_private_attr", False)
# if True, don't store fields with None values into dictionary.
suppress_empty_values = kwargs.get("suppress_empty_values", False)
# get list of attrs fields
attrs = fields(obj.__class__)
# instantiate return dict, use OrderedDict type by default
return_dict = kwargs.get("dict_factory", OrderedDict)()
for a in attrs:
# skip if private attr and flag tells you to skip
if suppress_private_attr and a.name.startswith("_"):
continue
metadata = a.metadata or {}
# formatter is a related-specific `attrs` meta field
# see fields.DateField
formatter = metadata.get('formatter')
# get value and call to_dict on it, passing the kwargs/formatter
value = getattr(obj, a.name)
value = to_dict(value, formatter=formatter, **kwargs)
# check flag, skip None values
if suppress_empty_values and value is None:
continue
# field name can be overridden by the metadata field
key_name = a.metadata.get('key') or a.name
# store converted / formatted value into return dictionary
return_dict[key_name] = value
return return_dict | [
"def",
"related_obj_to_dict",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"# Explicitly discard formatter kwarg, should not be cascaded down.",
"kwargs",
".",
"pop",
"(",
"'formatter'",
",",
"None",
")",
"# If True, remove fields that start with an underscore (e.g. _secret)"... | 33.444444 | 0.000646 |
def get_all_users(path_prefix='/', region=None, key=None, keyid=None,
profile=None):
'''
Get and return all IAM user details, starting at the optional path.
.. versionadded:: 2016.3.0
CLI Example:
salt-call boto_iam.get_all_users
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
_users = conn.get_all_users(path_prefix=path_prefix)
users = _users.list_users_response.list_users_result.users
marker = getattr(
_users.list_users_response.list_users_result, 'marker', None
)
while marker:
_users = conn.get_all_users(path_prefix=path_prefix, marker=marker)
users = users + _users.list_users_response.list_users_result.users
marker = getattr(
_users.list_users_response.list_users_result, 'marker', None
)
return users | [
"def",
"get_all_users",
"(",
"path_prefix",
"=",
"'/'",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
... | 33.846154 | 0.00221 |
def Search(self,key):
"""Search group list by providing partial name, ID, description or other key.
>>> clc.v2.Datacenter().Groups().Search("Default Group")
[<clc.APIv2.group.Group object at 0x1065b0f50>, <clc.APIv2.group.Group object at 0x1065b0d10>]
"""
results = []
for group in self.groups:
if group.id.lower().find(key.lower()) != -1: results.append(group)
elif group.name.lower().find(key.lower()) != -1: results.append(group)
elif group.description.lower().find(key.lower()) != -1: results.append(group)
return(results) | [
"def",
"Search",
"(",
"self",
",",
"key",
")",
":",
"results",
"=",
"[",
"]",
"for",
"group",
"in",
"self",
".",
"groups",
":",
"if",
"group",
".",
"id",
".",
"lower",
"(",
")",
".",
"find",
"(",
"key",
".",
"lower",
"(",
")",
")",
"!=",
"-",... | 36.133333 | 0.032374 |
def values(self, limit=None, page=None):
"""
convenience method to get just the values from the query (same as get().values())
if you want to get all values, you can use: self.all().values()
"""
return self.get(limit=limit, page=page).values() | [
"def",
"values",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"page",
"=",
"None",
")",
":",
"return",
"self",
".",
"get",
"(",
"limit",
"=",
"limit",
",",
"page",
"=",
"page",
")",
".",
"values",
"(",
")"
] | 39.714286 | 0.010563 |
def decrypt_verify(encrypted, session_keys=None):
"""Decrypts the given ciphertext string and returns both the
signatures (if any) and the plaintext.
:param bytes encrypted: the mail to decrypt
:param list[str] session_keys: a list OpenPGP session keys
:returns: the signatures and decrypted plaintext data
:rtype: tuple[list[gpg.resuit.Signature], str]
:raises alot.errors.GPGProblem: if the decryption fails
"""
if session_keys is not None:
try:
return _decrypt_verify_session_keys(encrypted, session_keys)
except GPGProblem:
pass
ctx = gpg.core.Context()
return _decrypt_verify_with_context(ctx, encrypted) | [
"def",
"decrypt_verify",
"(",
"encrypted",
",",
"session_keys",
"=",
"None",
")",
":",
"if",
"session_keys",
"is",
"not",
"None",
":",
"try",
":",
"return",
"_decrypt_verify_session_keys",
"(",
"encrypted",
",",
"session_keys",
")",
"except",
"GPGProblem",
":",
... | 37.666667 | 0.001439 |
def _parse(string):
"""
Parses given XML document content.
Returns the resulting root XML element node or None if the given XML
content is empty.
@param string: XML document content to parse.
@type string: I{bytes}
@return: Resulting root XML element node or None.
@rtype: L{Element}|I{None}
"""
if string:
return suds.sax.parser.Parser().parse(string=string) | [
"def",
"_parse",
"(",
"string",
")",
":",
"if",
"string",
":",
"return",
"suds",
".",
"sax",
".",
"parser",
".",
"Parser",
"(",
")",
".",
"parse",
"(",
"string",
"=",
"string",
")"
] | 26.4 | 0.002439 |
def request_openbus(self, service, endpoint, **kwargs):
"""Make a request to the given endpoint of the ``openbus`` server.
This returns the plain JSON (dict) response which can then be parsed
using one of the implemented types.
Args:
service (str): Service to fetch ('bus' or 'geo').
endpoint (str): Endpoint to send the request to.
This string corresponds to the key in the ``ENDPOINTS`` dict.
**kwargs: Request arguments.
Returns:
Obtained response (dict) or None if the endpoint was not found.
"""
if service == 'bus':
endpoints = ENDPOINTS_BUS
elif service == 'geo':
endpoints = ENDPOINTS_GEO
else:
# Unknown service
return None
if endpoint not in endpoints:
# Unknown endpoint
return None
url = URL_OPENBUS + endpoints[endpoint]
# Append credentials to request
kwargs['idClient'] = self._emt_id
kwargs['passKey'] = self._emt_pass
# SSL verification fails...
# return requests.post(url, data=kwargs, verify=False).json()
return requests.post(url, data=kwargs, verify=True).json() | [
"def",
"request_openbus",
"(",
"self",
",",
"service",
",",
"endpoint",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"service",
"==",
"'bus'",
":",
"endpoints",
"=",
"ENDPOINTS_BUS",
"elif",
"service",
"==",
"'geo'",
":",
"endpoints",
"=",
"ENDPOINTS_GEO",
"el... | 32.289474 | 0.001582 |
def mult_inv(a, b):
"""
Calculate the multiplicative inverse a**-1 % b.
This function works for n >= 5 where n is prime.
"""
# in addition to the normal setup, we also remember b
last_b, x, last_x, y, last_y = b, 0, 1, 1, 0
while b != 0:
q = a // b
a, b = b, a % b
x, last_x = last_x - q * x, x
y, last_y = last_y - q * y, y
# and add b to x if x is negative
if last_x < 0:
return last_x + last_b
return last_x | [
"def",
"mult_inv",
"(",
"a",
",",
"b",
")",
":",
"# in addition to the normal setup, we also remember b",
"last_b",
",",
"x",
",",
"last_x",
",",
"y",
",",
"last_y",
"=",
"b",
",",
"0",
",",
"1",
",",
"1",
",",
"0",
"while",
"b",
"!=",
"0",
":",
"q",... | 27.941176 | 0.002037 |
def qteBindKeyApplet(self, keysequence, macroName: str,
appletObj: QtmacsApplet):
"""
Bind ``macroName`` to all widgets in ``appletObj``.
This method does not affect the key bindings of other applets,
or other instances of the same applet.
The ``keysequence`` can be specified either as a string (eg
'<ctrl>+x <ctrl>+f'), or a list of tuples containing the
constants from the ``QtCore.Qt`` name space
(eg. [(ControlModifier, Key_X), (ControlModifier, Key_F)]), or
as a ``QtmacsKeysequence`` object.
|Args|
* ``keysequence`` (**str**, **list** of **tuples**,
**QtmacsKeysequence**):
key sequence to activate ``macroName`` for specified
``widgetSignature``.
* ``macroName`` (**str**): the macro to associated with
``keysequence``.
* ``appletObj`` (**QtmacsApplet**): only widgets in this
applet are affected.
|Returns|
* **bool**: whether or not at least one widget was
successfully bound.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
* **QtmacsKeysequenceError** if the provided ``keysequence``
could not be parsed.
"""
# Convert the key sequence into a QtmacsKeysequence object, or
# raise a QtmacsKeysequenceError if the conversion is
# impossible.
keysequence = QtmacsKeysequence(keysequence)
# Verify that Qtmacs knows a macro named 'macroName'.
if not self.qteIsMacroRegistered(macroName):
msg = ('Cannot bind key because the macro <b>{}</b> does'
'not exist.'.format(macroName))
self.qteLogger.error(msg, stack_info=True)
return False
# Bind the key also to the applet itself because it can
# receive keyboard events (eg. when it is empty).
appletObj._qteAdmin.keyMap.qteInsertKey(keysequence, macroName)
# Update the key map of every widget inside the applet.
for wid in appletObj._qteAdmin.widgetList:
self.qteBindKeyWidget(keysequence, macroName, wid)
return True | [
"def",
"qteBindKeyApplet",
"(",
"self",
",",
"keysequence",
",",
"macroName",
":",
"str",
",",
"appletObj",
":",
"QtmacsApplet",
")",
":",
"# Convert the key sequence into a QtmacsKeysequence object, or",
"# raise a QtmacsKeysequenceError if the conversion is",
"# impossible.",
... | 38.803571 | 0.001346 |
def write_feed_dangerously(
feed: Feed, outpath: str, nodes: Optional[Collection[str]] = None
) -> str:
"""Naively write a feed to a zipfile
This function provides no sanity checks. Use it at
your own risk.
"""
nodes = DEFAULT_NODES if nodes is None else nodes
try:
tmpdir = tempfile.mkdtemp()
def write_node(node):
df = feed.get(node)
if not df.empty:
path = os.path.join(tmpdir, node)
df.to_csv(path, index=False)
pool = ThreadPool(len(nodes))
try:
pool.map(write_node, nodes)
finally:
pool.terminate()
if outpath.endswith(".zip"):
outpath, _ = os.path.splitext(outpath)
outpath = shutil.make_archive(outpath, "zip", tmpdir)
finally:
shutil.rmtree(tmpdir)
return outpath | [
"def",
"write_feed_dangerously",
"(",
"feed",
":",
"Feed",
",",
"outpath",
":",
"str",
",",
"nodes",
":",
"Optional",
"[",
"Collection",
"[",
"str",
"]",
"]",
"=",
"None",
")",
"->",
"str",
":",
"nodes",
"=",
"DEFAULT_NODES",
"if",
"nodes",
"is",
"None... | 26.21875 | 0.001149 |
def spf_rec(rdata):
'''
Validate and parse DNS record data for SPF record(s)
:param rdata: DNS record data
:return: dict w/fields
'''
spf_fields = rdata.split(' ')
if not spf_fields.pop(0).startswith('v=spf'):
raise ValueError('Not an SPF record')
res = OrderedDict()
mods = set()
for mech_spec in spf_fields:
if mech_spec.startswith(('exp', 'redirect')):
# It's a modifier
mod, val = mech_spec.split('=', 1)
if mod in mods:
raise KeyError('Modifier {0} can only appear once'.format(mod))
mods.add(mod)
continue
# TODO: Should be in something intelligent like an SPF_get
# if mod == 'exp':
# res[mod] = lookup(val, 'TXT', **qargs)
# continue
# elif mod == 'redirect':
# return query(val, 'SPF', **qargs)
mech = {}
if mech_spec[0] in ('+', '-', '~', '?'):
mech['qualifier'] = mech_spec[0]
mech_spec = mech_spec[1:]
if ':' in mech_spec:
mech_spec, val = mech_spec.split(':', 1)
elif '/' in mech_spec:
idx = mech_spec.find('/')
mech_spec = mech_spec[0:idx]
val = mech_spec[idx:]
else:
val = None
res[mech_spec] = mech
if not val:
continue
elif mech_spec in ('ip4', 'ip6'):
val = ipaddress.ip_interface(val)
assert val.version == int(mech_spec[-1])
mech['value'] = val
return res | [
"def",
"spf_rec",
"(",
"rdata",
")",
":",
"spf_fields",
"=",
"rdata",
".",
"split",
"(",
"' '",
")",
"if",
"not",
"spf_fields",
".",
"pop",
"(",
"0",
")",
".",
"startswith",
"(",
"'v=spf'",
")",
":",
"raise",
"ValueError",
"(",
"'Not an SPF record'",
"... | 29.113208 | 0.000627 |
def autoencoder_discrete_pong():
"""Discrete autoencoder model for compressing pong frames."""
hparams = autoencoder_ordered_discrete()
hparams.num_hidden_layers = 3
hparams.bottleneck_bits = 24
hparams.batch_size = 2
hparams.gan_loss_factor = 0.01
hparams.bottleneck_l2_factor = 0.001
hparams.add_hparam("video_modality_loss_cutoff", 0.02)
return hparams | [
"def",
"autoencoder_discrete_pong",
"(",
")",
":",
"hparams",
"=",
"autoencoder_ordered_discrete",
"(",
")",
"hparams",
".",
"num_hidden_layers",
"=",
"3",
"hparams",
".",
"bottleneck_bits",
"=",
"24",
"hparams",
".",
"batch_size",
"=",
"2",
"hparams",
".",
"gan... | 36.4 | 0.02681 |
def get(self, sid):
"""
Constructs a ModelBuildContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildContext
:rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildContext
"""
return ModelBuildContext(self._version, assistant_sid=self._solution['assistant_sid'], sid=sid, ) | [
"def",
"get",
"(",
"self",
",",
"sid",
")",
":",
"return",
"ModelBuildContext",
"(",
"self",
".",
"_version",
",",
"assistant_sid",
"=",
"self",
".",
"_solution",
"[",
"'assistant_sid'",
"]",
",",
"sid",
"=",
"sid",
",",
")"
] | 41.2 | 0.011876 |
def parse_args():
"""
Parse commandline arguments.
"""
def exclusive_group(group, name, default, help):
destname = name.replace('-', '_')
subgroup = group.add_mutually_exclusive_group(required=False)
subgroup.add_argument(f'--{name}', dest=f'{destname}',
action='store_true',
help=f'{help} (use \'--no-{name}\' to disable)')
subgroup.add_argument(f'--no-{name}', dest=f'{destname}',
action='store_false', help=argparse.SUPPRESS)
subgroup.set_defaults(**{destname: default})
parser = argparse.ArgumentParser(
description='GNMT training',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# dataset
dataset = parser.add_argument_group('dataset setup')
dataset.add_argument('--dataset-dir', default='data/wmt16_de_en',
help='path to the directory with training/test data')
dataset.add_argument('--max-size', default=None, type=int,
help='use at most MAX_SIZE elements from training \
dataset (useful for benchmarking), by default \
uses entire dataset')
# results
results = parser.add_argument_group('results setup')
results.add_argument('--results-dir', default='results',
help='path to directory with results, it will be \
automatically created if it does not exist')
results.add_argument('--save', default='gnmt',
help='defines subdirectory within RESULTS_DIR for \
results from this training run')
results.add_argument('--print-freq', default=10, type=int,
help='print log every PRINT_FREQ batches')
# model
model = parser.add_argument_group('model setup')
model.add_argument('--hidden-size', default=1024, type=int,
help='model hidden size')
model.add_argument('--num-layers', default=4, type=int,
help='number of RNN layers in encoder and in decoder')
model.add_argument('--dropout', default=0.2, type=float,
help='dropout applied to input of RNN cells')
exclusive_group(group=model, name='share-embedding', default=True,
help='use shared embeddings for encoder and decoder')
model.add_argument('--smoothing', default=0.1, type=float,
help='label smoothing, if equal to zero model will use \
CrossEntropyLoss, if not zero model will be trained \
with label smoothing loss')
# setup
general = parser.add_argument_group('general setup')
general.add_argument('--math', default='fp32', choices=['fp16', 'fp32'],
help='arithmetic type')
general.add_argument('--seed', default=None, type=int,
help='master seed for random number generators, if \
"seed" is undefined then the master seed will be \
sampled from random.SystemRandom()')
exclusive_group(group=general, name='eval', default=True,
help='run validation and test after every epoch')
exclusive_group(group=general, name='env', default=False,
help='print info about execution env')
exclusive_group(group=general, name='cuda', default=True,
help='enables cuda')
exclusive_group(group=general, name='cudnn', default=True,
help='enables cudnn')
# training
training = parser.add_argument_group('training setup')
training.add_argument('--train-batch-size', default=128, type=int,
help='training batch size per worker')
training.add_argument('--train-global-batch-size', default=None, type=int,
help='global training batch size, this argument \
does not have to be defined, if it is defined it \
will be used to automatically \
compute train_iter_size \
using the equation: train_iter_size = \
train_global_batch_size // (train_batch_size * \
world_size)')
training.add_argument('--train-iter-size', metavar='N', default=1,
type=int,
help='training iter size, training loop will \
accumulate gradients over N iterations and execute \
optimizer every N steps')
training.add_argument('--epochs', default=8, type=int,
help='max number of training epochs')
training.add_argument('--grad-clip', default=5.0, type=float,
help='enables gradient clipping and sets maximum \
norm of gradients')
training.add_argument('--max-length-train', default=50, type=int,
help='maximum sequence length for training \
(including special BOS and EOS tokens)')
training.add_argument('--min-length-train', default=0, type=int,
help='minimum sequence length for training \
(including special BOS and EOS tokens)')
training.add_argument('--train-loader-workers', default=2, type=int,
help='number of workers for training data loading')
training.add_argument('--batching', default='bucketing', type=str,
choices=['random', 'sharding', 'bucketing'],
help='select batching algorithm')
training.add_argument('--shard-size', default=80, type=int,
help='shard size for "sharding" batching algorithm, \
in multiples of global batch size')
training.add_argument('--num-buckets', default=5, type=int,
help='number of buckets for "bucketing" batching \
algorithm')
# optimizer
optimizer = parser.add_argument_group('optimizer setup')
optimizer.add_argument('--optimizer', type=str, default='Adam',
help='training optimizer')
optimizer.add_argument('--lr', type=float, default=1.00e-3,
help='learning rate')
optimizer.add_argument('--optimizer-extra', type=str,
default="{}",
help='extra options for the optimizer')
# scheduler
scheduler = parser.add_argument_group('learning rate scheduler setup')
scheduler.add_argument('--warmup-steps', type=str, default='200',
help='number of learning rate warmup iterations')
scheduler.add_argument('--remain-steps', type=str, default='0.666',
help='starting iteration for learning rate decay')
scheduler.add_argument('--decay-interval', type=str, default='None',
help='interval between learning rate decay steps')
scheduler.add_argument('--decay-steps', type=int, default=4,
help='max number of learning rate decay steps')
scheduler.add_argument('--decay-factor', type=float, default=0.5,
help='learning rate decay factor')
# validation
val = parser.add_argument_group('validation setup')
val.add_argument('--val-batch-size', default=64, type=int,
help='batch size for validation')
val.add_argument('--max-length-val', default=125, type=int,
help='maximum sequence length for validation \
(including special BOS and EOS tokens)')
val.add_argument('--min-length-val', default=0, type=int,
help='minimum sequence length for validation \
(including special BOS and EOS tokens)')
val.add_argument('--val-loader-workers', default=0, type=int,
help='number of workers for validation data loading')
# test
test = parser.add_argument_group('test setup')
test.add_argument('--test-batch-size', default=128, type=int,
help='batch size for test')
test.add_argument('--max-length-test', default=150, type=int,
help='maximum sequence length for test \
(including special BOS and EOS tokens)')
test.add_argument('--min-length-test', default=0, type=int,
help='minimum sequence length for test \
(including special BOS and EOS tokens)')
test.add_argument('--beam-size', default=5, type=int,
help='beam size')
test.add_argument('--len-norm-factor', default=0.6, type=float,
help='length normalization factor')
test.add_argument('--cov-penalty-factor', default=0.1, type=float,
help='coverage penalty factor')
test.add_argument('--len-norm-const', default=5.0, type=float,
help='length normalization constant')
test.add_argument('--intra-epoch-eval', metavar='N', default=0, type=int,
help='evaluate within training epoch, this option will \
enable extra N equally spaced evaluations executed \
during each training epoch')
test.add_argument('--test-loader-workers', default=0, type=int,
help='number of workers for test data loading')
# checkpointing
chkpt = parser.add_argument_group('checkpointing setup')
chkpt.add_argument('--start-epoch', default=0, type=int,
help='manually set initial epoch counter')
chkpt.add_argument('--resume', default=None, type=str, metavar='PATH',
help='resumes training from checkpoint from PATH')
chkpt.add_argument('--save-all', action='store_true', default=False,
help='saves checkpoint after every epoch')
chkpt.add_argument('--save-freq', default=5000, type=int,
help='save checkpoint every SAVE_FREQ batches')
chkpt.add_argument('--keep-checkpoints', default=0, type=int,
help='keep only last KEEP_CHECKPOINTS checkpoints, \
affects only checkpoints controlled by --save-freq \
option')
# benchmarking
benchmark = parser.add_argument_group('benchmark setup')
benchmark.add_argument('--target-bleu', default=24.0, type=float,
help='target accuracy, training will be stopped \
when the target is achieved')
# distributed
distributed = parser.add_argument_group('distributed setup')
distributed.add_argument('--rank', default=0, type=int,
help='global rank of the process, do not set!')
distributed.add_argument('--local_rank', default=0, type=int,
help='local rank of the process, do not set!')
args = parser.parse_args()
args.warmup_steps = literal_eval(args.warmup_steps)
args.remain_steps = literal_eval(args.remain_steps)
args.decay_interval = literal_eval(args.decay_interval)
return args | [
"def",
"parse_args",
"(",
")",
":",
"def",
"exclusive_group",
"(",
"group",
",",
"name",
",",
"default",
",",
"help",
")",
":",
"destname",
"=",
"name",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"subgroup",
"=",
"group",
".",
"add_mutually_exclusive_g... | 53.114286 | 0.000088 |
def getrange(self):
"""Retrieve the dataset min and max values.
Args::
no argument
Returns::
(min, max) tuple (attribute 'valid_range')
Note that those are the values as stored
by the 'setrange' method. 'getrange' does *NOT* compute the
min and max from the current dataset contents.
An exception is raised if the range is not set.
The range returned by 'getrange' is part of the so-called
"standard" SDS attributes. It corresponds to the following
attribute::
valid_range
C library equivalent: SDgetrange
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = \
self.info()
except HDF4Error:
raise HDF4Error('getrange : invalid SDS identifier')
n_values = 1
convert = _array_to_ret
if data_type == SDC.CHAR8:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf1 = _C.array_int8(n_values)
buf2 = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf1 = _C.array_int16(n_values)
buf2 = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf1 = _C.array_uint16(n_values)
buf2 = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf1 = _C.array_int32(n_values)
buf2 = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf1 = _C.array_uint32(n_values)
buf2 = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf1 = _C.array_float32(n_values)
buf2 = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf1 = _C.array_float64(n_values)
buf2 = _C.array_float64(n_values)
else:
raise HDF4Error("getrange: SDS has an illegal or " \
"unsupported type %d" % data)
# Note: The C routine returns the max in buf1 and the min
# in buf2. We swap the values returned by the Python
# interface, since it is more natural to return
# min first, then max.
status = _C.SDgetrange(self._id, buf1, buf2)
_checkErr('getrange', status, 'range not set')
return convert(buf2, n_values), convert(buf1, n_values) | [
"def",
"getrange",
"(",
"self",
")",
":",
"# Obtain SDS data type.",
"try",
":",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs",
"=",
"self",
".",
"info",
"(",
")",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"'getra... | 31.686747 | 0.001844 |
def proc_val(key, val):
"""
Static helper method to convert Feff parameters to proper types, e.g.
integers, floats, lists, etc.
Args:
key: Feff parameter key
val: Actual value of Feff parameter.
"""
list_type_keys = list(VALID_FEFF_TAGS)
del list_type_keys[list_type_keys.index("ELNES")]
del list_type_keys[list_type_keys.index("EXELFS")]
boolean_type_keys = ()
float_type_keys = ("S02", "EXAFS", "RPATH")
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key.lower() == 'cif':
m = re.search(r"\w+.cif", val)
return m.group(0)
if key in list_type_keys:
output = list()
toks = re.split(r"\s+", val)
for tok in toks:
m = re.match(r"(\d+)\*([\d\.\-\+]+)", tok)
if m:
output.extend([smart_int_or_float(m.group(2))] *
int(m.group(1)))
else:
output.append(smart_int_or_float(tok))
return output
if key in boolean_type_keys:
m = re.search(r"^\W+([TtFf])", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_type_keys:
return float(val)
except ValueError:
return val.capitalize()
return val.capitalize() | [
"def",
"proc_val",
"(",
"key",
",",
"val",
")",
":",
"list_type_keys",
"=",
"list",
"(",
"VALID_FEFF_TAGS",
")",
"del",
"list_type_keys",
"[",
"list_type_keys",
".",
"index",
"(",
"\"ELNES\"",
")",
"]",
"del",
"list_type_keys",
"[",
"list_type_keys",
".",
"i... | 32.654545 | 0.001081 |
def sort(self, key=None, reverse=False):
"""
Sort a structure in place. The parameters have the same meaning as in
list.sort. By default, sites are sorted by the electronegativity of
the species. The difference between this method and
get_sorted_structure (which also works in IStructure) is that the
latter returns a new Structure, while this just sorts the Structure
in place.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
self._sites.sort(key=key, reverse=reverse) | [
"def",
"sort",
"(",
"self",
",",
"key",
"=",
"None",
",",
"reverse",
"=",
"False",
")",
":",
"self",
".",
"_sites",
".",
"sort",
"(",
"key",
"=",
"key",
",",
"reverse",
"=",
"reverse",
")"
] | 50.117647 | 0.002304 |
def change_ref(self, gm=None, r0=None, lmax=None):
"""
Return a new SHGravCoeffs class instance with a different reference gm
or r0.
Usage
-----
clm = x.change_ref([gm, r0, lmax])
Returns
-------
clm : SHGravCoeffs class instance.
Parameters
----------
gm : float, optional, default = self.gm
The gravitational constant time the mass that is associated with
the gravitational potential coefficients.
r0 : float, optional, default = self.r0
The reference radius of the spherical harmonic coefficients.
lmax : int, optional, default = self.lmax
Maximum spherical harmonic degree to output.
Description
-----------
This method returns a new class instance of the gravitational
potential, but using a difference reference gm or r0. When
changing the reference radius r0, the spherical harmonic coefficients
will be upward or downward continued under the assumption that the
reference radius is exterior to the body.
"""
if lmax is None:
lmax = self.lmax
clm = self.pad(lmax)
if gm is not None and gm != self.gm:
clm.coeffs *= self.gm / gm
clm.gm = gm
if self.errors is not None:
clm.errors *= self.gm / gm
if r0 is not None and r0 != self.r0:
for l in _np.arange(lmax+1):
clm.coeffs[:, l, :l+1] *= (self.r0 / r0)**l
if self.errors is not None:
clm.errors[:, l, :l+1] *= (self.r0 / r0)**l
clm.r0 = r0
return clm | [
"def",
"change_ref",
"(",
"self",
",",
"gm",
"=",
"None",
",",
"r0",
"=",
"None",
",",
"lmax",
"=",
"None",
")",
":",
"if",
"lmax",
"is",
"None",
":",
"lmax",
"=",
"self",
".",
"lmax",
"clm",
"=",
"self",
".",
"pad",
"(",
"lmax",
")",
"if",
"... | 33.38 | 0.001746 |
def retry_until_not_none_or_limit_reached(method, limit, sleep_s=1,
catch_exceptions=()):
"""Executes a method until the retry limit is hit or not None is returned."""
return retry_until_valid_or_limit_reached(
method, limit, lambda x: x is not None, sleep_s, catch_exceptions) | [
"def",
"retry_until_not_none_or_limit_reached",
"(",
"method",
",",
"limit",
",",
"sleep_s",
"=",
"1",
",",
"catch_exceptions",
"=",
"(",
")",
")",
":",
"return",
"retry_until_valid_or_limit_reached",
"(",
"method",
",",
"limit",
",",
"lambda",
"x",
":",
"x",
... | 65 | 0.009119 |
def dist_mlipns(src, tar, threshold=0.25, max_mismatches=2):
"""Return the MLIPNS distance between two strings.
This is a wrapper for :py:meth:`MLIPNS.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below which
the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove before
declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS distance
Examples
--------
>>> dist_mlipns('cat', 'hat')
0.0
>>> dist_mlipns('Niall', 'Neil')
1.0
>>> dist_mlipns('aluminum', 'Catalan')
1.0
>>> dist_mlipns('ATCG', 'TAGC')
1.0
"""
return MLIPNS().dist(src, tar, threshold, max_mismatches) | [
"def",
"dist_mlipns",
"(",
"src",
",",
"tar",
",",
"threshold",
"=",
"0.25",
",",
"max_mismatches",
"=",
"2",
")",
":",
"return",
"MLIPNS",
"(",
")",
".",
"dist",
"(",
"src",
",",
"tar",
",",
"threshold",
",",
"max_mismatches",
")"
] | 25.75 | 0.00104 |
def paramsarray(self):
"""All free model parameters as 1-dimensional `numpy.ndarray`.
You are allowed to update model parameters by direct
assignment of this property."""
# Return copy of `_paramsarray` because setter checks if changed
if self._paramsarray is not None:
return self._paramsarray.copy()
nparams = len(self._index_to_param)
self._paramsarray = scipy.ndarray(shape=(nparams,), dtype='float')
for (i, param) in self._index_to_param.items():
if isinstance(param, str):
self._paramsarray[i] = getattr(self.model, param)
elif isinstance(param, tuple):
self._paramsarray[i] = getattr(self.model, param[0])[param[1]]
else:
raise ValueError("Invalid param type")
return self._paramsarray.copy() | [
"def",
"paramsarray",
"(",
"self",
")",
":",
"# Return copy of `_paramsarray` because setter checks if changed",
"if",
"self",
".",
"_paramsarray",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_paramsarray",
".",
"copy",
"(",
")",
"nparams",
"=",
"len",
"(",
... | 47.333333 | 0.002301 |
def compute_exported_specifications(svc_ref):
# type: (pelix.framework.ServiceReference) -> List[str]
"""
Computes the list of specifications exported by the given service
:param svc_ref: A ServiceReference
:return: The list of exported specifications (or an empty list)
"""
if svc_ref.get_property(pelix.remote.PROP_EXPORT_NONE):
# The export of this service is explicitly forbidden, stop here
return []
# Service specifications
specs = svc_ref.get_property(pelix.constants.OBJECTCLASS)
# Exported specifications
exported_specs = svc_ref.get_property(pelix.remote.PROP_EXPORTED_INTERFACES)
if exported_specs and exported_specs != "*":
# A set of specifications is exported, replace "objectClass"
iterable_exports = pelix.utilities.to_iterable(exported_specs, False)
all_exported_specs = [
spec for spec in specs if spec in iterable_exports
]
else:
# Export everything
all_exported_specs = pelix.utilities.to_iterable(specs)
# Authorized and rejected specifications
export_only_specs = pelix.utilities.to_iterable(
svc_ref.get_property(pelix.remote.PROP_EXPORT_ONLY), False
)
if export_only_specs:
# Filter specifications (keep authorized specifications)
return [
spec for spec in all_exported_specs if spec in export_only_specs
]
# Filter specifications (reject)
rejected_specs = pelix.utilities.to_iterable(
svc_ref.get_property(pelix.remote.PROP_EXPORT_REJECT), False
)
return [spec for spec in all_exported_specs if spec not in rejected_specs] | [
"def",
"compute_exported_specifications",
"(",
"svc_ref",
")",
":",
"# type: (pelix.framework.ServiceReference) -> List[str]",
"if",
"svc_ref",
".",
"get_property",
"(",
"pelix",
".",
"remote",
".",
"PROP_EXPORT_NONE",
")",
":",
"# The export of this service is explicitly forbi... | 36.886364 | 0.0012 |
def get_books_by_comment(self, comment_id):
"""Gets the list of ``Book`` objects mapped to a ``Comment``.
arg: comment_id (osid.id.Id): ``Id`` of a ``Comment``
return: (osid.commenting.BookList) - list of books
raise: NotFound - ``comment_id`` is not found
raise: NullArgument - ``comment_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bins_by_resource
mgr = self._get_provider_manager('COMMENTING', local=True)
lookup_session = mgr.get_book_lookup_session(proxy=self._proxy)
return lookup_session.get_books_by_ids(
self.get_book_ids_by_comment(comment_id)) | [
"def",
"get_books_by_comment",
"(",
"self",
",",
"comment_id",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceBinSession.get_bins_by_resource",
"mgr",
"=",
"self",
".",
"_get_provider_manager",
"(",
"'COMMENTING'",
",",
"local",
"=",
"True",
")",
"l... | 48.777778 | 0.002235 |
def keyEvent(self, key, down=1):
"""For most ordinary keys, the "keysym" is the same as the corresponding ASCII value.
Other common keys are shown in the KEY_ constants."""
self.transport.write(pack("!BBxxI", 4, down, key)) | [
"def",
"keyEvent",
"(",
"self",
",",
"key",
",",
"down",
"=",
"1",
")",
":",
"self",
".",
"transport",
".",
"write",
"(",
"pack",
"(",
"\"!BBxxI\"",
",",
"4",
",",
"down",
",",
"key",
")",
")"
] | 61 | 0.012146 |
def memory_usage(method):
"""Log memory usage before and after a method."""
def wrapper(*args, **kwargs):
logging.info('Memory before method %s is %s.',
method.__name__, runtime.memory_usage().current())
result = method(*args, **kwargs)
logging.info('Memory after method %s is %s',
method.__name__, runtime.memory_usage().current())
return result
return wrapper | [
"def",
"memory_usage",
"(",
"method",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"logging",
".",
"info",
"(",
"'Memory before method %s is %s.'",
",",
"method",
".",
"__name__",
",",
"runtime",
".",
"memory_usage",
"("... | 40.8 | 0.009592 |
def main():
"""Main function"""
# Configure the logger
log = logging.getLogger('SIP.EC.PCI.DB')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
'%(name)-20s | %(filename)-15s | %(levelname)-5s | %(message)s'))
log.addHandler(handler)
log.setLevel(os.getenv('SIP_PCI_LOG_LEVEL', 'INFO'))
# Get the number of Scheduling Block Instances to generate
num_blocks = int(sys.argv[1]) if len(sys.argv) == 2 else 3
add_scheduling_blocks(num_blocks) | [
"def",
"main",
"(",
")",
":",
"# Configure the logger",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"'SIP.EC.PCI.DB'",
")",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"'... | 38.538462 | 0.001949 |
def tropocollagen(
cls, aa=28, major_radius=5.0, major_pitch=85.0, auto_build=True):
"""Creates a model of a collagen triple helix.
Parameters
----------
aa : int, optional
Number of amino acids per minor helix.
major_radius : float, optional
Radius of super helix.
major_pitch : float, optional
Pitch of super helix.
auto_build : bool, optional
If `True`, the model will be built as part of instantiation.
"""
instance = cls.from_parameters(
n=3, aa=aa, major_radius=major_radius, major_pitch=major_pitch,
phi_c_alpha=0.0, minor_helix_type='collagen', auto_build=False)
instance.major_handedness = ['r'] * 3
# default z-shifts taken from rise_per_residue of collagen helix
rpr_collagen = _helix_parameters['collagen'][1]
instance.z_shifts = [-rpr_collagen * 2, -rpr_collagen, 0.0]
instance.minor_repeats = [None] * 3
if auto_build:
instance.build()
return instance | [
"def",
"tropocollagen",
"(",
"cls",
",",
"aa",
"=",
"28",
",",
"major_radius",
"=",
"5.0",
",",
"major_pitch",
"=",
"85.0",
",",
"auto_build",
"=",
"True",
")",
":",
"instance",
"=",
"cls",
".",
"from_parameters",
"(",
"n",
"=",
"3",
",",
"aa",
"=",
... | 40.961538 | 0.001835 |
def SayString(self, text, delay=0):
"""Enter some text.
:param text: the text you want to enter.
"""
self._delay(delay)
cmd = Command("SayString", 'SayString "%s"' % text)
self.add(cmd) | [
"def",
"SayString",
"(",
"self",
",",
"text",
",",
"delay",
"=",
"0",
")",
":",
"self",
".",
"_delay",
"(",
"delay",
")",
"cmd",
"=",
"Command",
"(",
"\"SayString\"",
",",
"'SayString \"%s\"'",
"%",
"text",
")",
"self",
".",
"add",
"(",
"cmd",
")"
] | 29.375 | 0.012397 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.