text stringlengths 75 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 0.18 |
|---|---|---|---|
def parse_api_datetime(value):
""" parse a datetime returned from the salesforce API.
in python 3 we should just use a strptime %z, but until then we're just going
to assert that its a fixed offset of +0000 since thats the observed behavior. getting
python 2 to support fixed offset parsing is too complicated for what we need imo."""
dt = datetime.strptime(value[0:DATETIME_LEN], API_DATE_FORMAT)
offset_str = value[DATETIME_LEN:]
assert offset_str in ["+0000", "Z"], "The Salesforce API returned a weird timezone."
return dt | [
"def",
"parse_api_datetime",
"(",
"value",
")",
":",
"dt",
"=",
"datetime",
".",
"strptime",
"(",
"value",
"[",
"0",
":",
"DATETIME_LEN",
"]",
",",
"API_DATE_FORMAT",
")",
"offset_str",
"=",
"value",
"[",
"DATETIME_LEN",
":",
"]",
"assert",
"offset_str",
"... | 55 | 0.008945 |
def get_node_sum(self, age=None):
"""Get sum of all branches in the tree.
Returns:
int: The sum of all nodes grown until the age.
"""
if age is None:
age = self.age
return age if self.comp == 1 else int((pow(self.comp, age+1) - 1) / (self.comp - 1)) | [
"def",
"get_node_sum",
"(",
"self",
",",
"age",
"=",
"None",
")",
":",
"if",
"age",
"is",
"None",
":",
"age",
"=",
"self",
".",
"age",
"return",
"age",
"if",
"self",
".",
"comp",
"==",
"1",
"else",
"int",
"(",
"(",
"pow",
"(",
"self",
".",
"com... | 30.6 | 0.009524 |
def check_bipole(inp, name):
r"""Check di-/bipole parameters.
This check-function is called from one of the modelling routines in
:mod:`model`. Consult these modelling routines for a detailed description
of the input parameters.
Parameters
----------
inp : list of floats or arrays
Coordinates of inp (m):
[dipole-x, dipole-y, dipole-z, azimuth, dip] or.
[bipole-x0, bipole-x1, bipole-y0, bipole-y1, bipole-z0, bipole-z1].
name : str, {'src', 'rec'}
Pole-type.
Returns
-------
inp : list
As input, checked for type and length.
ninp : int
Number of inp.
ninpz : int
Number of inp depths (ninpz is either 1 or ninp).
isdipole : bool
True if inp is a dipole.
"""
def chck_dipole(inp, name):
r"""Check inp for shape and type."""
# Check x
inp[0] = _check_var(inp[0], float, 1, name+'-x')
# Check y and ensure it has same dimension as x
inp[1] = _check_var(inp[1], float, 1, name+'-y', inp[0].shape)
# Check z
inp[2] = _check_var(inp[2], float, 1, name+'-z', (1,), inp[0].shape)
# Check if all depths are the same, if so replace by one value
if np.all(np.isclose(inp[2]-inp[2][0], 0)):
inp[2] = np.array([inp[2][0]])
return inp
# Check length of inp.
narr = len(inp)
if narr not in [5, 6]:
print('* ERROR :: Parameter ' + name + ' has wrong length! : ' +
str(narr) + ' instead of 5 (dipole) or 6 (bipole).')
raise ValueError(name)
# Flag if it is a dipole or not
isdipole = narr == 5
if isdipole: # dipole checks
# Check x, y, and z
inp = chck_dipole(inp, name)
# Check azimuth and dip (must be floats, otherwise use ``bipole``)
inp[3] = _check_var(inp[3], float, 1, 'azimuth', (1,))
inp[4] = _check_var(inp[4], float, 1, 'dip', (1,))
# How many different depths
inpz = inp[2].size
else: # bipole checks
# Check each pole for x, y, and z
inp0 = chck_dipole(inp[::2], name+'-1') # [x0, y0, z0]
inp1 = chck_dipole(inp[1::2], name+'-2') # [x1, y1, z1]
# If one pole has a single depth, but the other has various
# depths, we have to repeat the single depth, as we will have
# to loop over them.
if inp0[2].size != inp1[2].size:
if inp0[2].size == 1:
inp0[2] = np.repeat(inp0[2], inp1[2].size)
else:
inp1[2] = np.repeat(inp1[2], inp0[2].size)
# Check if inp is a dipole instead of a bipole
# (This is a problem, as we would could not define the angles then.)
if not np.all((inp0[0] != inp1[0]) + (inp0[1] != inp1[1]) +
(inp0[2] != inp1[2])):
print("* ERROR :: At least one of <" + name + "> is a point " +
"dipole, use the format [x, y, z, azimuth, dip] instead " +
"of [x0, x1, y0, y1, z0, z1].")
raise ValueError('Bipole: bipole-' + name)
# Collect elements
inp = [inp0[0], inp1[0], inp0[1], inp1[1], inp0[2], inp1[2]]
# How many different depths
inpz = inp[4].size
return inp, inp[0].size, inpz, isdipole | [
"def",
"check_bipole",
"(",
"inp",
",",
"name",
")",
":",
"def",
"chck_dipole",
"(",
"inp",
",",
"name",
")",
":",
"r\"\"\"Check inp for shape and type.\"\"\"",
"# Check x",
"inp",
"[",
"0",
"]",
"=",
"_check_var",
"(",
"inp",
"[",
"0",
"]",
",",
"float",
... | 31.950495 | 0.000301 |
def annotateTree(bT, fn):
"""
annotate a tree in an external array using the given function
"""
l = [None]*bT.traversalID.midEnd
def fn2(bT):
l[bT.traversalID.mid] = fn(bT)
if bT.internal:
fn2(bT.left)
fn2(bT.right)
fn2(bT)
return l | [
"def",
"annotateTree",
"(",
"bT",
",",
"fn",
")",
":",
"l",
"=",
"[",
"None",
"]",
"*",
"bT",
".",
"traversalID",
".",
"midEnd",
"def",
"fn2",
"(",
"bT",
")",
":",
"l",
"[",
"bT",
".",
"traversalID",
".",
"mid",
"]",
"=",
"fn",
"(",
"bT",
")"... | 24.083333 | 0.01 |
def update(self, res, pk, depth=1, since=None):
"""
Try to sync an object to the local database, in case of failure
where a referenced object is not found, attempt to fetch said
object from the REST api
"""
fetch = lambda: self._fetcher.fetch_latest(res, pk, 1, since=since)
self._update(res, fetch, depth) | [
"def",
"update",
"(",
"self",
",",
"res",
",",
"pk",
",",
"depth",
"=",
"1",
",",
"since",
"=",
"None",
")",
":",
"fetch",
"=",
"lambda",
":",
"self",
".",
"_fetcher",
".",
"fetch_latest",
"(",
"res",
",",
"pk",
",",
"1",
",",
"since",
"=",
"si... | 44.375 | 0.008287 |
def Point2HexColor(a, lfrac, tfrac):
"""
Return web-safe hex triplets.
"""
[H,S,V] = [math.floor(360 * a), lfrac, tfrac]
RGB = hsvToRGB(H, S, V)
H = [hex(int(math.floor(255 * x))) for x in RGB]
HEX = [a[a.find('x') + 1:] for a in H]
HEX = ['0' + h if len(h) == 1 else h for h in HEX]
return '#' + ''.join(HEX) | [
"def",
"Point2HexColor",
"(",
"a",
",",
"lfrac",
",",
"tfrac",
")",
":",
"[",
"H",
",",
"S",
",",
"V",
"]",
"=",
"[",
"math",
".",
"floor",
"(",
"360",
"*",
"a",
")",
",",
"lfrac",
",",
"tfrac",
"]",
"RGB",
"=",
"hsvToRGB",
"(",
"H",
",",
"... | 21 | 0.008547 |
def untlpydict2dcformatteddict(untl_dict, **kwargs):
"""Convert a UNTL data dictionary to a formatted DC data dictionary."""
ark = kwargs.get('ark', None)
domain_name = kwargs.get('domain_name', None)
scheme = kwargs.get('scheme', 'http')
resolve_values = kwargs.get('resolve_values', None)
resolve_urls = kwargs.get('resolve_urls', None)
verbose_vocabularies = kwargs.get('verbose_vocabularies', None)
# Get the UNTL object.
untl_py = untldict2py(untl_dict)
# Convert it to a DC object.
dc_py = untlpy2dcpy(
untl_py,
ark=ark,
domain_name=domain_name,
resolve_values=resolve_values,
resolve_urls=resolve_urls,
verbose_vocabularies=verbose_vocabularies,
scheme=scheme
)
# Return a formatted DC dictionary.
return dcpy2formatteddcdict(dc_py) | [
"def",
"untlpydict2dcformatteddict",
"(",
"untl_dict",
",",
"*",
"*",
"kwargs",
")",
":",
"ark",
"=",
"kwargs",
".",
"get",
"(",
"'ark'",
",",
"None",
")",
"domain_name",
"=",
"kwargs",
".",
"get",
"(",
"'domain_name'",
",",
"None",
")",
"scheme",
"=",
... | 37.727273 | 0.001175 |
def discover_files(base_path, sub_path='', ext='', trim_base_path=False):
"""Discovers all files with certain extension in given paths."""
file_list = []
for root, dirs, files in walk(path.join(base_path, sub_path)):
if trim_base_path:
root = path.relpath(root, base_path)
file_list.extend([path.join(root, file_name)
for file_name in files
if file_name.endswith(ext)])
return sorted(file_list) | [
"def",
"discover_files",
"(",
"base_path",
",",
"sub_path",
"=",
"''",
",",
"ext",
"=",
"''",
",",
"trim_base_path",
"=",
"False",
")",
":",
"file_list",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"walk",
"(",
"path",
".",
"join",... | 48.1 | 0.002041 |
def set_item(filename, item):
"""
Save entry to JSON file
"""
with atomic_write(os.fsencode(str(filename))) as temp_file:
with open(os.fsencode(str(filename))) as products_file:
# load the JSON data into memory
products_data = json.load(products_file)
# check if UUID already exists
uuid_list = [i for i in filter(
lambda z: z["uuid"] == str(item["uuid"]), products_data)]
if len(uuid_list) == 0:
# add the new item to the JSON file
products_data.append(item)
# save the new JSON to the temp file
json.dump(products_data, temp_file)
return True
return None | [
"def",
"set_item",
"(",
"filename",
",",
"item",
")",
":",
"with",
"atomic_write",
"(",
"os",
".",
"fsencode",
"(",
"str",
"(",
"filename",
")",
")",
")",
"as",
"temp_file",
":",
"with",
"open",
"(",
"os",
".",
"fsencode",
"(",
"str",
"(",
"filename"... | 38.388889 | 0.001412 |
def parse_doc(obj: dict) -> BioCDocument:
"""Deserialize a dict obj to a BioCDocument object"""
doc = BioCDocument()
doc.id = obj['id']
doc.infons = obj['infons']
for passage in obj['passages']:
doc.add_passage(parse_passage(passage))
for annotation in obj['annotations']:
doc.add_annotation(parse_annotation(annotation))
for relation in obj['relations']:
doc.add_relation(parse_relation(relation))
return doc | [
"def",
"parse_doc",
"(",
"obj",
":",
"dict",
")",
"->",
"BioCDocument",
":",
"doc",
"=",
"BioCDocument",
"(",
")",
"doc",
".",
"id",
"=",
"obj",
"[",
"'id'",
"]",
"doc",
".",
"infons",
"=",
"obj",
"[",
"'infons'",
"]",
"for",
"passage",
"in",
"obj"... | 38.75 | 0.002101 |
def printable_str(text, keep_newlines=False):
'''Escape any control or non-ASCII characters from string.
This function is intended for use with strings from an untrusted
source such as writing to a console or writing to logs. It is
designed to prevent things like ANSI escape sequences from
showing.
Use :func:`repr` or :func:`ascii` instead for things such as
Exception messages.
'''
if isinstance(text, str):
new_text = ascii(text)[1:-1]
else:
new_text = ascii(text)
if keep_newlines:
new_text = new_text.replace('\\r', '\r').replace('\\n', '\n')
return new_text | [
"def",
"printable_str",
"(",
"text",
",",
"keep_newlines",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"str",
")",
":",
"new_text",
"=",
"ascii",
"(",
"text",
")",
"[",
"1",
":",
"-",
"1",
"]",
"else",
":",
"new_text",
"=",
"ascii... | 31.05 | 0.001563 |
def get_type_data(name):
"""Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type
"""
name = name.upper()
try:
return {
'authority': 'okapia.net',
'namespace': 'TextFormats',
'identifier': name,
'domain': 'DisplayText Formats',
'display_name': FORMAT_TYPES[name] + ' Format Type',
'display_label': FORMAT_TYPES[name],
'description': ('The display text format type for the ' +
FORMAT_TYPES[name] + ' format.')
}
except KeyError:
raise NotFound('Format Type:' + name) | [
"def",
"get_type_data",
"(",
"name",
")",
":",
"name",
"=",
"name",
".",
"upper",
"(",
")",
"try",
":",
"return",
"{",
"'authority'",
":",
"'okapia.net'",
",",
"'namespace'",
":",
"'TextFormats'",
",",
"'identifier'",
":",
"name",
",",
"'domain'",
":",
"... | 32.7 | 0.001486 |
def name(self):
"""The process name."""
name = self._platform_impl.get_process_name()
if os.name == 'posix':
# On UNIX the name gets truncated to the first 15 characters.
# If it matches the first part of the cmdline we return that
# one instead because it's usually more explicative.
# Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
try:
cmdline = self.cmdline
except AccessDenied:
pass
else:
if cmdline:
extended_name = os.path.basename(cmdline[0])
if extended_name.startswith(name):
name = extended_name
# XXX - perhaps needs refactoring
self._platform_impl._process_name = name
return name | [
"def",
"name",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"_platform_impl",
".",
"get_process_name",
"(",
")",
"if",
"os",
".",
"name",
"==",
"'posix'",
":",
"# On UNIX the name gets truncated to the first 15 characters.",
"# If it matches the first part of the cm... | 41.5 | 0.002356 |
def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs):
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
convert : bool, default True
Whether to convert negative indices into positive ones.
For example, ``-1`` would map to the ``len(axis) - 1``.
The conversions are similar to the behavior of indexing a
regular Python list.
.. deprecated:: 0.21.0
In the future, negative indices will always be converted.
is_copy : bool, default True
Whether to return a copy of the original object or not.
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if convert is not None:
msg = ("The 'convert' parameter is deprecated "
"and will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
nv.validate_take(tuple(), kwargs)
return self._take(indices, axis=axis, is_copy=is_copy) | [
"def",
"take",
"(",
"self",
",",
"indices",
",",
"axis",
"=",
"0",
",",
"convert",
"=",
"None",
",",
"is_copy",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"convert",
"is",
"not",
"None",
":",
"msg",
"=",
"(",
"\"The 'convert' parameter is ... | 38.582418 | 0.000555 |
def to_array(self, itaper, normalization='4pi', csphase=1):
"""
Return the spherical harmonic coefficients of taper i as a numpy
array.
Usage
-----
coeffs = x.to_array(itaper, [normalization, csphase])
Returns
-------
coeffs : ndarray, shape (2, lwin+1, lwin+11)
3-D numpy ndarray of the spherical harmonic coefficients of the
window.
Parameters
----------
itaper : int
Taper number, where itaper=0 is the best concentrated.
normalization : str, optional, default = '4pi'
Normalization of the output coefficients: '4pi', 'ortho' or
'schmidt' for geodesy 4pi normalized, orthonormalized, or Schmidt
semi-normalized coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
"""
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt'):
raise ValueError(
"normalization must be '4pi', 'ortho' " +
"or 'schmidt'. Provided value was {:s}"
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value was {:s}"
.format(repr(csphase))
)
return self._to_array(
itaper, normalization=normalization.lower(), csphase=csphase) | [
"def",
"to_array",
"(",
"self",
",",
"itaper",
",",
"normalization",
"=",
"'4pi'",
",",
"csphase",
"=",
"1",
")",
":",
"if",
"type",
"(",
"normalization",
")",
"!=",
"str",
":",
"raise",
"ValueError",
"(",
"'normalization must be a string. '",
"+",
"'Input t... | 37.804348 | 0.001121 |
def ice_refractive(file):
"""
Interpolator for the refractive indices of ice.
Inputs:
File to read the refractive index lookup table from.
This is supplied as "ice_refr.dat", retrieved from
http://www.atmos.washington.edu/ice_optical_constants/
Returns:
A callable object that takes as parameters the wavelength [mm]
and the snow density [g/cm^3].
"""
D = np.loadtxt(file)
log_wl = np.log10(D[:,0]/1000)
re = D[:,1]
log_im = np.log10(D[:,2])
iobj_re = interpolate.interp1d(log_wl, re)
iobj_log_im = interpolate.interp1d(log_wl, log_im)
def ref(wl, snow_density):
lwl = np.log10(wl)
try:
len(lwl)
except TypeError:
mi_sqr = complex(iobj_re(lwl), 10**iobj_log_im(lwl))**2
else:
mi_sqr = np.array([complex(a,b) for (a,b) in zip(iobj_re(lwl),
10**iobj_log_im(lwl))])**2
c = (mi_sqr-1)/(mi_sqr+2) * snow_density/ice_density
return np.sqrt( (1+2*c) / (1-c) )
return ref | [
"def",
"ice_refractive",
"(",
"file",
")",
":",
"D",
"=",
"np",
".",
"loadtxt",
"(",
"file",
")",
"log_wl",
"=",
"np",
".",
"log10",
"(",
"D",
"[",
":",
",",
"0",
"]",
"/",
"1000",
")",
"re",
"=",
"D",
"[",
":",
",",
"1",
"]",
"log_im",
"="... | 28.416667 | 0.009452 |
def _match_magic(self, full_path):
"""Return the first magic that matches this path or None."""
for magic in self.magics:
if magic.matches(full_path):
return magic | [
"def",
"_match_magic",
"(",
"self",
",",
"full_path",
")",
":",
"for",
"magic",
"in",
"self",
".",
"magics",
":",
"if",
"magic",
".",
"matches",
"(",
"full_path",
")",
":",
"return",
"magic"
] | 40.6 | 0.009662 |
def create(cls, photo, title, description=''):
"""Create a new photoset.
photo - primary photo
"""
if not isinstance(photo, Photo):
raise TypeError, "Photo expected"
method = 'flickr.photosets.create'
data = _dopost(method, auth=True, title=title,\
description=description,\
primary_photo_id=photo.id)
set = Photoset(data.rsp.photoset.id, title, Photo(photo.id),
photos=1, description=description)
return set | [
"def",
"create",
"(",
"cls",
",",
"photo",
",",
"title",
",",
"description",
"=",
"''",
")",
":",
"if",
"not",
"isinstance",
"(",
"photo",
",",
"Photo",
")",
":",
"raise",
"TypeError",
",",
"\"Photo expected\"",
"method",
"=",
"'flickr.photosets.create'",
... | 34.8125 | 0.012238 |
def _any(self, memory, addr, **kwargs):
"""
Gets any solution of an address.
"""
return memory.state.solver.eval(addr, exact=kwargs.pop('exact', self._exact), **kwargs) | [
"def",
"_any",
"(",
"self",
",",
"memory",
",",
"addr",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"memory",
".",
"state",
".",
"solver",
".",
"eval",
"(",
"addr",
",",
"exact",
"=",
"kwargs",
".",
"pop",
"(",
"'exact'",
",",
"self",
".",
"_exa... | 39.2 | 0.015 |
def dim_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0
\frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m
(\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m
(\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega
\tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c
\frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau)
(\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c
\pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\
np.sin(self.ang)
nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang)
term1 = (nom1a + nom1b) / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result | [
"def",
"dim_dc",
"(",
"self",
",",
"pars",
")",
":",
"self",
".",
"_set_parameters",
"(",
"pars",
")",
"# term1",
"nom1a",
"=",
"-",
"self",
".",
"m",
"*",
"np",
".",
"log",
"(",
"self",
".",
"w",
"*",
"self",
".",
"tau",
")",
"*",
"self",
".",... | 45.724138 | 0.001477 |
def load_configuration():
"""Load the configuration"""
(belbio_conf_fp, belbio_secrets_fp) = get_belbio_conf_files()
log.info(f"Using conf: {belbio_conf_fp} and secrets files: {belbio_secrets_fp} ")
config = {}
if belbio_conf_fp:
with open(belbio_conf_fp, "r") as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
config["source_files"] = {}
config["source_files"]["conf"] = belbio_conf_fp
if belbio_secrets_fp:
with open(belbio_secrets_fp, "r") as f:
secrets = yaml.load(f, Loader=yaml.SafeLoader)
config["secrets"] = copy.deepcopy(secrets)
if "source_files" in config:
config["source_files"]["secrets"] = belbio_secrets_fp
get_versions(config)
# TODO - needs to be completed
# add_environment_vars(config)
return config | [
"def",
"load_configuration",
"(",
")",
":",
"(",
"belbio_conf_fp",
",",
"belbio_secrets_fp",
")",
"=",
"get_belbio_conf_files",
"(",
")",
"log",
".",
"info",
"(",
"f\"Using conf: {belbio_conf_fp} and secrets files: {belbio_secrets_fp} \"",
")",
"config",
"=",
"{",
"}",
... | 32.5 | 0.002299 |
def export_pdf(self, filename):
"""
Export the report in PDF format. Specify a path for which
to save the file, including the trailing filename.
:param str filename: path including filename
:return: None
"""
self.make_request(
raw_result=True,
resource='export',
filename=filename,
headers = {'accept': 'application/pdf'}) | [
"def",
"export_pdf",
"(",
"self",
",",
"filename",
")",
":",
"self",
".",
"make_request",
"(",
"raw_result",
"=",
"True",
",",
"resource",
"=",
"'export'",
",",
"filename",
"=",
"filename",
",",
"headers",
"=",
"{",
"'accept'",
":",
"'application/pdf'",
"}... | 32.615385 | 0.013761 |
def apply_grad_processors(opt, gradprocs):
"""
Wrapper around optimizers to apply gradient processors.
Args:
opt (tf.train.Optimizer):
gradprocs (list[GradientProcessor]): gradient processors to add to the
optimizer.
Returns:
a :class:`tf.train.Optimizer` instance which runs the gradient
processors before updating the variables.
"""
assert isinstance(gradprocs, (list, tuple)), gradprocs
for gp in gradprocs:
assert isinstance(gp, GradientProcessor), gp
class _ApplyGradientProcessor(ProxyOptimizer):
def __init__(self, opt, gradprocs):
self._gradprocs = gradprocs[:]
super(_ApplyGradientProcessor, self).__init__(opt)
def apply_gradients(self, grads_and_vars,
global_step=None, name=None):
g = self._apply(grads_and_vars)
return self._opt.apply_gradients(g, global_step, name)
def _apply(self, g):
for proc in self._gradprocs:
g = proc.process(g)
return g
return _ApplyGradientProcessor(opt, gradprocs) | [
"def",
"apply_grad_processors",
"(",
"opt",
",",
"gradprocs",
")",
":",
"assert",
"isinstance",
"(",
"gradprocs",
",",
"(",
"list",
",",
"tuple",
")",
")",
",",
"gradprocs",
"for",
"gp",
"in",
"gradprocs",
":",
"assert",
"isinstance",
"(",
"gp",
",",
"Gr... | 33.545455 | 0.000878 |
def _get_broker_offsets(self, instance, topics):
"""
Fetch highwater offsets for each topic/partition from Kafka cluster.
Do this for all partitions in the cluster because even if it has no
consumers, we may want to measure whether producers are successfully
producing. No need to limit this for performance because fetching broker
offsets from Kafka is a relatively inexpensive operation.
Sends one OffsetRequest per broker to get offsets for all partitions
where that broker is the leader:
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetAPI(AKAListOffset)
Can we cleanup connections on agent restart?
Brokers before 0.9 - accumulate stale connections on restarts.
In 0.9 Kafka added connections.max.idle.ms
https://issues.apache.org/jira/browse/KAFKA-1282
"""
# Connect to Kafka
highwater_offsets = {}
topic_partitions_without_a_leader = []
topics_to_fetch = defaultdict(set)
cli = self._get_kafka_client(instance)
for topic, partitions in iteritems(topics):
# if no partitions are provided
# we're falling back to all available partitions (?)
if len(partitions) == 0:
partitions = cli.cluster.available_partitions_for_topic(topic)
topics_to_fetch[topic].update(partitions)
leader_tp = defaultdict(lambda: defaultdict(set))
for topic, partitions in iteritems(topics_to_fetch):
for partition in partitions:
partition_leader = cli.cluster.leader_for_partition(TopicPartition(topic, partition))
if partition_leader is not None and partition_leader >= 0:
leader_tp[partition_leader][topic].add(partition)
max_offsets = 1
for node_id, tps in iteritems(leader_tp):
# Construct the OffsetRequest
request = OffsetRequest[0](
replica_id=-1,
topics=[
(topic, [(partition, OffsetResetStrategy.LATEST, max_offsets) for partition in partitions])
for topic, partitions in iteritems(tps)
],
)
response = self._make_blocking_req(cli, request, node_id=node_id)
offsets, unled = self._process_highwater_offsets(response)
highwater_offsets.update(offsets)
topic_partitions_without_a_leader.extend(unled)
return highwater_offsets, list(set(topic_partitions_without_a_leader)) | [
"def",
"_get_broker_offsets",
"(",
"self",
",",
"instance",
",",
"topics",
")",
":",
"# Connect to Kafka",
"highwater_offsets",
"=",
"{",
"}",
"topic_partitions_without_a_leader",
"=",
"[",
"]",
"topics_to_fetch",
"=",
"defaultdict",
"(",
"set",
")",
"cli",
"=",
... | 45.982143 | 0.001901 |
def kong_61_2007():
r"""Kong 61 pt Hankel filter, as published in [Kong07]_.
Taken from file ``FilterModules.f90`` provided with 1DCSEM_.
License: `Apache License, Version 2.0,
<http://www.apache.org/licenses/LICENSE-2.0>`_.
"""
dlf = DigitalFilter('Kong 61', 'kong_61_2007')
dlf.base = np.array([
2.3517745856009100e-02, 2.6649097336355482e-02,
3.0197383422318501e-02, 3.4218118311666032e-02,
3.8774207831722009e-02, 4.3936933623407420e-02,
4.9787068367863938e-02, 5.6416139503777350e-02,
6.3927861206707570e-02, 7.2439757034251456e-02,
8.2084998623898800e-02, 9.3014489210663506e-02,
1.0539922456186430e-01, 1.1943296826671961e-01,
1.3533528323661270e-01, 1.5335496684492850e-01,
1.7377394345044520e-01, 1.9691167520419400e-01,
2.2313016014842979e-01, 2.5283959580474641e-01,
2.8650479686019009e-01, 3.2465246735834979e-01,
3.6787944117144239e-01, 4.1686201967850839e-01,
4.7236655274101469e-01, 5.3526142851899028e-01,
6.0653065971263342e-01, 6.8728927879097224e-01,
7.7880078307140488e-01, 8.8249690258459546e-01,
1.0000000000000000e+00, 1.1331484530668261e+00,
1.2840254166877421e+00, 1.4549914146182010e+00,
1.6487212707001280e+00, 1.8682459574322221e+00,
2.1170000166126748e+00, 2.3988752939670981e+00,
2.7182818284590451e+00, 3.0802168489180310e+00,
3.4903429574618419e+00, 3.9550767229205772e+00,
4.4816890703380636e+00, 5.0784190371800806e+00,
5.7546026760057307e+00, 6.5208191203301116e+00,
7.3890560989306504e+00, 8.3728974881272649e+00,
9.4877358363585262e+00, 1.0751013186076360e+01,
1.2182493960703470e+01, 1.3804574186067100e+01,
1.5642631884188170e+01, 1.7725424121461639e+01,
2.0085536923187671e+01, 2.2759895093526730e+01,
2.5790339917193059e+01, 2.9224283781234941e+01,
3.3115451958692312e+01, 3.7524723159601002e+01,
4.2521082000062783e+01])
dlf.factor = np.array([1.1331484530668261])
dlf.j0 = np.array([
1.4463210615326699e+02, -1.1066222143752420e+03,
3.7030010025325978e+03, -6.8968188464424520e+03,
7.1663544112656937e+03, -2.4507884783377681e+03,
-4.0166567754046082e+03, 6.8623845298546094e+03,
-5.0013321011775661e+03, 2.1291291365196648e+03,
-1.3845222435542289e+03, 2.1661554291595580e+03,
-2.2260393789657141e+03, 8.0317156013986391e+02,
1.0142221718890841e+03, -1.9350455051432630e+03,
1.6601169447226580e+03, -7.5159684285420133e+02,
-9.0315984178183285e+01, 5.0705574889546148e+02,
-5.1207646422722519e+02, 2.9722959494490038e+02,
-5.0248319908072993e+01, -1.2290725861955920e+02,
1.9695244755899429e+02, -1.9175679966946601e+02,
1.4211755630338590e+02, -7.7463216543224149e+01,
1.7638009334931201e+01, 2.8855056499202671e+01,
-5.9225643887809561e+01, 7.5987941373668960e+01,
-8.1687962781233580e+01, 8.0599209238447102e+01,
-7.4895905328771619e+01, 6.7516291538794434e+01,
-5.9325033647358048e+01, 5.1617042242841528e+01,
-4.4664967446820263e+01, 3.8366152052928278e+01,
-3.3308787868993100e+01, 2.8278671651033459e+01,
-2.4505863388620480e+01, 2.0469632532079750e+01,
-1.7074034940700429e+01, 1.4206119215530070e+01,
-1.0904435643084650e+01, 8.7518389425802283e+00,
-6.7721665239085622e+00, 4.5096884588095891e+00,
-3.2704247166629590e+00, 2.6827195063720430e+00,
-1.8406031821386459e+00, 9.1586697140412443e-01,
-3.2436011485890798e-01, 8.0675176189581893e-02,
-1.2881307195759690e-02, 7.0489137468452920e-04,
2.3846917590855061e-04, -6.9102205995825531e-05,
6.7792635718095777e-06])
dlf.j1 = np.array([
4.6440396425864918e+01, -4.5034239857914162e+02,
1.7723440076223640e+03, -3.7559735516994660e+03,
4.4736494009764137e+03, -2.2476603569606068e+03,
-1.5219842155931799e+03, 3.4904608559273802e+03,
-2.4814243247472318e+03, 5.7328164634108396e+02,
5.3132044837659631e-01, 6.8895205008006235e+02,
-1.2012013872160269e+03, 7.9679138423597340e+02,
4.9874460187939818e+01, -5.6367338332457007e+02,
4.7971936503711203e+02, -5.8979702298044558e+01,
-3.1935800954986922e+02, 4.5762551999442371e+02,
-3.7239927283248380e+02, 1.8255852885279569e+02,
-2.3504740340815669e-01, -1.1588151583545380e+02,
1.5740956677133170e+02, -1.4334746114883359e+02,
9.9857411013284818e+01, -4.8246322019171487e+01,
2.0371404343057380e+00, 3.3003938094974323e+01,
-5.5476151884197712e+01, 6.7354852323852583e+01,
-7.0735403363284121e+01, 6.8872932663164747e+01,
-6.3272750944993042e+01, 5.6501568721817442e+01,
-4.8706577819918110e+01, 4.1737211284663481e+01,
-3.4776621242200903e+01, 2.9161717578906430e+01,
-2.3886749056000909e+01, 1.9554007583544220e+01,
-1.5966397353366460e+01, 1.2429310210239199e+01,
-1.0139180791868180e+01, 7.4716493393871861e+00,
-5.5509479014742613e+00, 4.3380799768234208e+00,
-2.5911516181746550e+00, 1.6300524630626780e+00,
-1.4041567266387460e+00, 7.5225141726873213e-01,
4.6808777208492733e-02, -3.6630197849601159e-01,
2.8948389902792782e-01, -1.3705521898064801e-01,
4.6292091649913013e-02, -1.1721281347435180e-02,
2.2002397354029149e-03, -2.8146036357227600e-04,
1.8788896009128770e-05])
return dlf | [
"def",
"kong_61_2007",
"(",
")",
":",
"dlf",
"=",
"DigitalFilter",
"(",
"'Kong 61'",
",",
"'kong_61_2007'",
")",
"dlf",
".",
"base",
"=",
"np",
".",
"array",
"(",
"[",
"2.3517745856009100e-02",
",",
"2.6649097336355482e-02",
",",
"3.0197383422318501e-02",
",",
... | 51.77193 | 0.000166 |
def _generate_tokens(pat: GenericAny, text: str) -> Iterator[Token]:
"""Generate a sequence of tokens from `text` that match `pat`
Parameters
----------
pat : compiled regex
The pattern to use for tokenization
text : str
The text to tokenize
"""
rules = _TYPE_RULES
keys = _TYPE_KEYS
groupindex = pat.groupindex
scanner = pat.scanner(text)
for m in iter(scanner.match, None):
lastgroup = m.lastgroup
func = rules[keys[groupindex[lastgroup] - 1]]
if func is not None:
yield func(m.group(lastgroup)) | [
"def",
"_generate_tokens",
"(",
"pat",
":",
"GenericAny",
",",
"text",
":",
"str",
")",
"->",
"Iterator",
"[",
"Token",
"]",
":",
"rules",
"=",
"_TYPE_RULES",
"keys",
"=",
"_TYPE_KEYS",
"groupindex",
"=",
"pat",
".",
"groupindex",
"scanner",
"=",
"pat",
... | 28.8 | 0.001681 |
def delete_item(self, item):
''' removes an item from the db '''
for relation, dst in self.relations_of(item, True):
self.delete_relation(item, relation, dst)
#print(item, relation, dst)
for src, relation in self.relations_to(item, True):
self.delete_relation(src, relation, item)
#print(src, relation, item)
h = self._item_hash(item)
if item in self:
#print('deleting item:', item)
self.nodes[h].clear()
del self.nodes[h] | [
"def",
"delete_item",
"(",
"self",
",",
"item",
")",
":",
"for",
"relation",
",",
"dst",
"in",
"self",
".",
"relations_of",
"(",
"item",
",",
"True",
")",
":",
"self",
".",
"delete_relation",
"(",
"item",
",",
"relation",
",",
"dst",
")",
"#print(item,... | 41.076923 | 0.009158 |
def load_images(self, search_file, source_file):
"""加载待匹配图片."""
self.search_file, self.source_file = search_file, source_file
self.im_search, self.im_source = imread(self.search_file), imread(self.source_file)
# 初始化对象
self.check_macthing_object = CheckKeypointResult(self.im_search, self.im_source) | [
"def",
"load_images",
"(",
"self",
",",
"search_file",
",",
"source_file",
")",
":",
"self",
".",
"search_file",
",",
"self",
".",
"source_file",
"=",
"search_file",
",",
"source_file",
"self",
".",
"im_search",
",",
"self",
".",
"im_source",
"=",
"imread",
... | 55.5 | 0.011834 |
async def create_virtual_environment(loop=None):
"""
Create a virtual environment, and return the path to the virtual env
directory, which should contain a "bin" directory with the `python` and
`pip` binaries that can be used to a test install of a software package.
:return: the path to the virtual environment, its python, and its site pkgs
"""
tmp_dir = tempfile.mkdtemp()
venv_dir = os.path.join(tmp_dir, VENV_NAME)
proc1 = await asyncio.create_subprocess_shell(
'virtualenv {}'.format(venv_dir), loop=loop)
await proc1.communicate()
if sys.platform == 'win32':
python = os.path.join(venv_dir, 'Scripts', 'python.exe')
else:
python = os.path.join(venv_dir, 'bin', 'python')
venv_site_pkgs = install_dependencies(python)
log.info("Created virtual environment at {}".format(venv_dir))
return venv_dir, python, venv_site_pkgs | [
"async",
"def",
"create_virtual_environment",
"(",
"loop",
"=",
"None",
")",
":",
"tmp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"venv_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"VENV_NAME",
")",
"proc1",
"=",
"await",
"asynci... | 40.545455 | 0.001095 |
def read(key, root=''):
'''
Read from SysFS
:param key: file or path in SysFS; if key is a list then root will be prefixed on each key
:return: the full (tree of) SysFS attributes under key
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/net/em1/statistics
'''
if not isinstance(key, six.string_types):
res = {}
for akey in key:
ares = read(os.path.join(root, akey))
if ares is not False:
res[akey] = ares
return res
key = target(os.path.join(root, key))
if key is False:
return False
elif os.path.isdir(key):
keys = interfaces(key)
result = {}
for subkey in keys['r'] + keys['rw']:
subval = read(os.path.join(key, subkey))
if subval is not False:
subkeys = subkey.split('/')
subkey = subkeys.pop()
subresult = result
if subkeys:
for skey in subkeys:
if skey not in subresult:
subresult[skey] = {}
subresult = subresult[skey]
subresult[subkey] = subval
return result
else:
try:
log.trace('Reading %s...', key)
# Certain things in SysFS are pipes 'n such.
# This opens it non-blocking, which prevents indefinite blocking
with os.fdopen(os.open(key, os.O_RDONLY | os.O_NONBLOCK)) as treader:
# alternative method for the same idea, but only works for completely empty pipes
# treader = select.select([treader], [], [], 1)[0][0]
val = treader.read().strip()
if not val:
return False
try:
val = int(val)
except Exception:
try:
val = float(val)
except Exception:
pass
return val
except Exception:
return False | [
"def",
"read",
"(",
"key",
",",
"root",
"=",
"''",
")",
":",
"if",
"not",
"isinstance",
"(",
"key",
",",
"six",
".",
"string_types",
")",
":",
"res",
"=",
"{",
"}",
"for",
"akey",
"in",
"key",
":",
"ares",
"=",
"read",
"(",
"os",
".",
"path",
... | 32.349206 | 0.001905 |
def mainClassDoc():
"""Function decorator used to automatic adiction of base class MEoS in
subclass __doc__"""
def decorator(f):
# __doc__ is only writable in python3.
# The doc build must be done with python3 so this snnippet do the work
py_version = platform.python_version()
if py_version[0] == "3":
doc = f.__doc__.split(os.linesep)
try:
ind = doc.index("")
except ValueError:
ind = 1
doc1 = os.linesep.join(doc[:ind])
doc3 = os.linesep.join(doc[ind:])
doc2 = os.linesep.join(MEoS.__doc__.split(os.linesep)[3:])
f.__doc__ = doc1 + os.linesep + os.linesep + \
doc2 + os.linesep + os.linesep + doc3
return f
return decorator | [
"def",
"mainClassDoc",
"(",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"# __doc__ is only writable in python3.",
"# The doc build must be done with python3 so this snnippet do the work",
"py_version",
"=",
"platform",
".",
"python_version",
"(",
")",
"if",
"py_versio... | 34.565217 | 0.001224 |
def collapsed_spectrum(fitsfile, ns1, ns2,
method='mean', nwin_background=0,
reverse=False, out_sp=None, debugplot=0):
"""Compute a collapsed spectrum from a 2D image using scans in [ns1,ns2].
Parameters
----------
fitsfile : file object
File name of FITS file containing the spectra to be calibrated.
ns1 : int
First scan (from 1 to NAXIS2).
ns2 : int
Last scan (from 1 to NAXIS2).
method : string
Indicates collapsing method. Possible values are "mean" or
"median".
nwin_background : int
Window size for the computation of background using a median
filtering with that window width. This background is computed
and subtracted only if this parameter is > 0.
reverse : bool
If True, reserve wavelength direction prior to wavelength
calibration.
out_sp : string or None
File name to save the selected spectrum in FITS format before
performing the wavelength calibration.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
sp : 1d numpy array
Collapsed spectrum.
"""
# read FITS file
with fits.open(fitsfile) as hdulist:
image2d = hdulist[0].data
naxis2, naxis1 = image2d.shape
if abs(debugplot) >= 10:
print('>>> Reading file:', fitsfile.name)
print('>>> NAXIS1:', naxis1)
print('>>> NAXIS2:', naxis2)
if 1 <= ns1 <= ns2 <= naxis2:
# extract spectrum
if method == "mean":
sp = np.mean(image2d[(ns1 - 1):ns2], axis=0)
elif method == "median":
sp = np.median(image2d[(ns1 - 1):ns2], axis=0)
else:
raise ValueError("Invalid method '" + str(method) + "'")
# reverse spectrum if necessary
if reverse:
sp = sp[::-1]
# fit and subtract background
if nwin_background > 0:
background = ndimage.filters.median_filter(
sp, size=nwin_background
)
sp -= background
# save spectrum before wavelength calibration in external
# FITS file
if out_sp is not None:
hdu = fits.PrimaryHDU(sp)
hdu.writeto(out_sp, overwrite=True)
else:
raise ValueError("Invalid ns1=" + str(ns1) + ", ns2=" + str(ns2) +
" values")
return sp | [
"def",
"collapsed_spectrum",
"(",
"fitsfile",
",",
"ns1",
",",
"ns2",
",",
"method",
"=",
"'mean'",
",",
"nwin_background",
"=",
"0",
",",
"reverse",
"=",
"False",
",",
"out_sp",
"=",
"None",
",",
"debugplot",
"=",
"0",
")",
":",
"# read FITS file",
"wit... | 32.38961 | 0.000389 |
def print_computation_log(self, aggregate = False):
"""
Print the computation log of a simulation.
If ``aggregate`` is ``False`` (default), print the value of each computed vector.
If ``aggregate`` is ``True``, only print the minimum, maximum, and average value of each computed vector.
This mode is more suited for simulations on a large population.
"""
for line in self.computation_log(aggregate):
print(line) | [
"def",
"print_computation_log",
"(",
"self",
",",
"aggregate",
"=",
"False",
")",
":",
"for",
"line",
"in",
"self",
".",
"computation_log",
"(",
"aggregate",
")",
":",
"print",
"(",
"line",
")"
] | 44.272727 | 0.012072 |
def norm2(self):
"""Squared norm of the vector"""
return self.x * self.x + self.y * self.y + self.z * self.z | [
"def",
"norm2",
"(",
"self",
")",
":",
"return",
"self",
".",
"x",
"*",
"self",
".",
"x",
"+",
"self",
".",
"y",
"*",
"self",
".",
"y",
"+",
"self",
".",
"z",
"*",
"self",
".",
"z"
] | 40.666667 | 0.016129 |
def define_residues_for_plotting_topology(self,cutoff):
"""
This function defines the residues for plotting in case only a topology file has been submitted.
In this case the residence time analysis in not necessary and it is enough just to find all
residues within a cutoff distance.
Takes:
* cutoff * - cutoff distance in angstroms that defines native contacts
Output:
*
"""
#self.protein_selection = self.universe.select_atoms('all and around '+str(cutoff)+' (segid '+str(self.universe.ligand.segids[0])+' and resid '+str(self.universe.ligand.resids[0])+')')
#The previous line was not working on some examples for some reason - switch to more efficient Neighbour Search
n = AtomNeighborSearch(self.universe.select_atoms('protein and not name H* or (segid '+str(self.universe.ligand.segids[0])+' and resid '+str(self.universe.ligand.resids[0])+')'), bucket_size=10)
self.protein_selection = n.search(self.universe.ligand,cutoff,level="A")
for atom in self.protein_selection.atoms:
#for non-analysis plots
residue = (atom.resname, str(atom.resid), atom.segid)
if residue not in self.dict_of_plotted_res.keys() and atom not in self.universe.ligand.atoms:
self.dict_of_plotted_res[residue]=[1]
assert len(self.dict_of_plotted_res)!=0, "Nothing to draw for this ligand (residue number: "+ self.universe.ligand.resids[0] +" on the chain "+ self.universe.ligand.segids[0] +") - check the position of your ligand within the topology file." | [
"def",
"define_residues_for_plotting_topology",
"(",
"self",
",",
"cutoff",
")",
":",
"#self.protein_selection = self.universe.select_atoms('all and around '+str(cutoff)+' (segid '+str(self.universe.ligand.segids[0])+' and resid '+str(self.universe.ligand.resids[0])+')')",
"#The previous line was ... | 77.47619 | 0.015179 |
def _save_private_file(filename, json_contents):
"""Saves a file with read-write permissions on for the owner.
Args:
filename: String. Absolute path to file.
json_contents: JSON serializable object to be saved.
"""
temp_filename = tempfile.mktemp()
file_desc = os.open(temp_filename, os.O_WRONLY | os.O_CREAT, 0o600)
with os.fdopen(file_desc, 'w') as file_handle:
json.dump(json_contents, file_handle, sort_keys=True,
indent=2, separators=(',', ': '))
shutil.move(temp_filename, filename) | [
"def",
"_save_private_file",
"(",
"filename",
",",
"json_contents",
")",
":",
"temp_filename",
"=",
"tempfile",
".",
"mktemp",
"(",
")",
"file_desc",
"=",
"os",
".",
"open",
"(",
"temp_filename",
",",
"os",
".",
"O_WRONLY",
"|",
"os",
".",
"O_CREAT",
",",
... | 42.076923 | 0.001789 |
def set_read_only(self, value):
"""
Sets whether model could be modified or not
"""
if self.__read_only__ != value:
self.__read_only__ = value
self._update_read_only() | [
"def",
"set_read_only",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"__read_only__",
"!=",
"value",
":",
"self",
".",
"__read_only__",
"=",
"value",
"self",
".",
"_update_read_only",
"(",
")"
] | 31 | 0.008969 |
def pretty_format(obj, indent=None):
"""
Pretty formats the given object as a string which is returned.
If indent is None, a single line will be returned.
"""
if indent is None:
if isinstance(obj, TLObject):
obj = obj.to_dict()
if isinstance(obj, dict):
return '{}({})'.format(obj.get('_', 'dict'), ', '.join(
'{}={}'.format(k, TLObject.pretty_format(v))
for k, v in obj.items() if k != '_'
))
elif isinstance(obj, str) or isinstance(obj, bytes):
return repr(obj)
elif hasattr(obj, '__iter__'):
return '[{}]'.format(
', '.join(TLObject.pretty_format(x) for x in obj)
)
else:
return repr(obj)
else:
result = []
if isinstance(obj, TLObject):
obj = obj.to_dict()
if isinstance(obj, dict):
result.append(obj.get('_', 'dict'))
result.append('(')
if obj:
result.append('\n')
indent += 1
for k, v in obj.items():
if k == '_':
continue
result.append('\t' * indent)
result.append(k)
result.append('=')
result.append(TLObject.pretty_format(v, indent))
result.append(',\n')
result.pop() # last ',\n'
indent -= 1
result.append('\n')
result.append('\t' * indent)
result.append(')')
elif isinstance(obj, str) or isinstance(obj, bytes):
result.append(repr(obj))
elif hasattr(obj, '__iter__'):
result.append('[\n')
indent += 1
for x in obj:
result.append('\t' * indent)
result.append(TLObject.pretty_format(x, indent))
result.append(',\n')
indent -= 1
result.append('\t' * indent)
result.append(']')
else:
result.append(repr(obj))
return ''.join(result) | [
"def",
"pretty_format",
"(",
"obj",
",",
"indent",
"=",
"None",
")",
":",
"if",
"indent",
"is",
"None",
":",
"if",
"isinstance",
"(",
"obj",
",",
"TLObject",
")",
":",
"obj",
"=",
"obj",
".",
"to_dict",
"(",
")",
"if",
"isinstance",
"(",
"obj",
","... | 35.753846 | 0.000838 |
def run(self, progress=True, verbose=False):
"""Compute all steps of the simulation. Be careful: if tmax is not set,
this function will result in an infinit loop.
Returns
-------
(t, fields):
last time and result fields.
"""
total_iter = int((self.tmax // self.user_dt) if self.tmax else None)
log = logging.info if verbose else logging.debug
if progress:
with tqdm(initial=(self.i if self.i < total_iter else total_iter),
total=total_iter) as pbar:
for t, fields in self:
pbar.update(1)
log("%s running: t: %g" % (self.id, t))
try:
return t, fields
except UnboundLocalError:
warnings.warn("Simulation already ended")
for t, fields in self:
log("%s running: t: %g" % (self.id, t))
try:
return t, fields
except UnboundLocalError:
warnings.warn("Simulation already ended") | [
"def",
"run",
"(",
"self",
",",
"progress",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
":",
"total_iter",
"=",
"int",
"(",
"(",
"self",
".",
"tmax",
"//",
"self",
".",
"user_dt",
")",
"if",
"self",
".",
"tmax",
"else",
"None",
")",
"log",
"=... | 37.535714 | 0.001855 |
def create_installer(self, rpm_py_version, **kwargs):
"""Create Installer object."""
return DebianInstaller(rpm_py_version, self.python, self.rpm, **kwargs) | [
"def",
"create_installer",
"(",
"self",
",",
"rpm_py_version",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"DebianInstaller",
"(",
"rpm_py_version",
",",
"self",
".",
"python",
",",
"self",
".",
"rpm",
",",
"*",
"*",
"kwargs",
")"
] | 56.666667 | 0.011628 |
def basic_stats(self):
"""Return a markdown representation of simple statistics."""
comment_score = sum(comment.score for comment in self.comments)
if self.comments:
comment_duration = (self.comments[-1].created_utc -
self.comments[0].created_utc)
comment_rate = self._rate(len(self.comments), comment_duration)
else:
comment_rate = 0
submission_duration = self.max_date - self.min_date
submission_rate = self._rate(len(self.submissions),
submission_duration)
submission_score = sum(sub.score for sub in self.submissions.values())
values = [('Total', len(self.submissions), len(self.comments)),
('Rate (per day)', '{:.2f}'.format(submission_rate),
'{:.2f}'.format(comment_rate)),
('Unique Redditors', len(self.submitters),
len(self.commenters)),
('Combined Score', submission_score, comment_score)]
retval = 'Period: {:.2f} days\n\n'.format(submission_duration / 86400.)
retval += '||Submissions|Comments|\n:-:|--:|--:\n'
for quad in values:
retval += '__{}__|{}|{}\n'.format(*quad)
return retval + '\n' | [
"def",
"basic_stats",
"(",
"self",
")",
":",
"comment_score",
"=",
"sum",
"(",
"comment",
".",
"score",
"for",
"comment",
"in",
"self",
".",
"comments",
")",
"if",
"self",
".",
"comments",
":",
"comment_duration",
"=",
"(",
"self",
".",
"comments",
"[",
... | 47.592593 | 0.001526 |
def _create_kube_dns_instance(self, instance):
"""
Set up kube_dns instance so it can be used in OpenMetricsBaseCheck
"""
kube_dns_instance = deepcopy(instance)
# kube_dns uses 'prometheus_endpoint' and not 'prometheus_url', so we have to rename the key
kube_dns_instance['prometheus_url'] = instance.get('prometheus_endpoint', None)
kube_dns_instance.update(
{
'namespace': 'kubedns',
# Note: the count metrics were moved to specific functions list below to be submitted
# as both gauges and monotonic_counts
'metrics': [
{
# metrics have been renamed to kubedns in kubernetes 1.6.0
'kubedns_kubedns_dns_response_size_bytes': 'response_size.bytes',
'kubedns_kubedns_dns_request_duration_seconds': 'request_duration.seconds',
# metrics names for kubernetes < 1.6.0
'skydns_skydns_dns_response_size_bytes': 'response_size.bytes',
'skydns_skydns_dns_request_duration_seconds': 'request_duration.seconds',
}
],
# Defaults that were set when kube_dns was based on PrometheusCheck
'send_monotonic_counter': instance.get('send_monotonic_counter', False),
'health_service_check': instance.get('health_service_check', False),
}
)
return kube_dns_instance | [
"def",
"_create_kube_dns_instance",
"(",
"self",
",",
"instance",
")",
":",
"kube_dns_instance",
"=",
"deepcopy",
"(",
"instance",
")",
"# kube_dns uses 'prometheus_endpoint' and not 'prometheus_url', so we have to rename the key",
"kube_dns_instance",
"[",
"'prometheus_url'",
"]... | 49.258065 | 0.008349 |
def select(self, crit):
"""
Select subset of values that match a given index criterion.
Parameters
----------
crit : function, list, str, int
Criterion function to map to indices, specific index value,
or list of indices.
"""
import types
# handle lists, strings, and ints
if not isinstance(crit, types.FunctionType):
# set("foo") -> {"f", "o"}; wrap in list to prevent:
if isinstance(crit, string_types):
critlist = set([crit])
else:
try:
critlist = set(crit)
except TypeError:
# typically means crit is not an iterable type; for instance, crit is an int
critlist = set([crit])
crit = lambda x: x in critlist
# if only one index, return it directly or throw an error
index = self.index
if size(index) == 1:
if crit(index[0]):
return self
else:
raise Exception('No indices found matching criterion')
# determine new index and check the result
newindex = [i for i in index if crit(i)]
if len(newindex) == 0:
raise Exception('No indices found matching criterion')
if array(newindex == index).all():
return self
# use fast logical indexing to get the new values
subinds = where([crit(i) for i in index])
new = self.map(lambda x: x[subinds], index=newindex)
# if singleton, need to check whether it's an array or a scalar/int
# if array, recompute a new set of indices
if len(newindex) == 1:
new = new.map(lambda x: x[0], index=newindex)
val = new.first()
if size(val) == 1:
newindex = [newindex[0]]
else:
newindex = arange(0, size(val))
new._index = newindex
return new | [
"def",
"select",
"(",
"self",
",",
"crit",
")",
":",
"import",
"types",
"# handle lists, strings, and ints",
"if",
"not",
"isinstance",
"(",
"crit",
",",
"types",
".",
"FunctionType",
")",
":",
"# set(\"foo\") -> {\"f\", \"o\"}; wrap in list to prevent:",
"if",
"isins... | 34.210526 | 0.001994 |
def decode_raw_stream(self, text, decode_raw, known_encoding, filename):
"""given string/unicode or bytes/string, determine encoding
from magic encoding comment, return body as unicode
or raw if decode_raw=False
"""
if isinstance(text, compat.text_type):
m = self._coding_re.match(text)
encoding = m and m.group(1) or known_encoding or 'ascii'
return encoding, text
if text.startswith(codecs.BOM_UTF8):
text = text[len(codecs.BOM_UTF8):]
parsed_encoding = 'utf-8'
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m is not None and m.group(1) != 'utf-8':
raise exceptions.CompileException(
"Found utf-8 BOM in file, with conflicting "
"magic encoding comment of '%s'" % m.group(1),
text.decode('utf-8', 'ignore'),
0, 0, filename)
else:
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m:
parsed_encoding = m.group(1)
else:
parsed_encoding = known_encoding or 'ascii'
if decode_raw:
try:
text = text.decode(parsed_encoding)
except UnicodeDecodeError:
raise exceptions.CompileException(
"Unicode decode operation of encoding '%s' failed" %
parsed_encoding,
text.decode('utf-8', 'ignore'),
0, 0, filename)
return parsed_encoding, text | [
"def",
"decode_raw_stream",
"(",
"self",
",",
"text",
",",
"decode_raw",
",",
"known_encoding",
",",
"filename",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"compat",
".",
"text_type",
")",
":",
"m",
"=",
"self",
".",
"_coding_re",
".",
"match",
"(",... | 42.205128 | 0.001188 |
def forward_list(self):
'''adb forward --list'''
version = self.version()
if int(version[1]) <= 1 and int(version[2]) <= 0 and int(version[3]) < 31:
raise EnvironmentError("Low adb version.")
lines = self.raw_cmd("forward", "--list").communicate()[0].decode("utf-8").strip().splitlines()
return [line.strip().split() for line in lines] | [
"def",
"forward_list",
"(",
"self",
")",
":",
"version",
"=",
"self",
".",
"version",
"(",
")",
"if",
"int",
"(",
"version",
"[",
"1",
"]",
")",
"<=",
"1",
"and",
"int",
"(",
"version",
"[",
"2",
"]",
")",
"<=",
"0",
"and",
"int",
"(",
"version... | 54.428571 | 0.010336 |
def release_branches(self):
"""A dictionary that maps branch names to :class:`Release` objects."""
self.ensure_release_scheme('branches')
return dict((r.revision.branch, r) for r in self.releases.values()) | [
"def",
"release_branches",
"(",
"self",
")",
":",
"self",
".",
"ensure_release_scheme",
"(",
"'branches'",
")",
"return",
"dict",
"(",
"(",
"r",
".",
"revision",
".",
"branch",
",",
"r",
")",
"for",
"r",
"in",
"self",
".",
"releases",
".",
"values",
"(... | 56.5 | 0.008734 |
def full_name(self):
"""Return full name of member"""
if self.prefix is not None:
return '.'.join([self.prefix, self.member])
return self.member | [
"def",
"full_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"prefix",
"is",
"not",
"None",
":",
"return",
"'.'",
".",
"join",
"(",
"[",
"self",
".",
"prefix",
",",
"self",
".",
"member",
"]",
")",
"return",
"self",
".",
"member"
] | 35.2 | 0.011111 |
def _get_type_description(annotation):
'''
Given an annotation, return the (type, description) for the parameter.
If you provide an annotation that is somehow both a string and a callable,
the behavior is undefined.
'''
if annotation is _empty:
return None, None
elif callable(annotation):
return annotation, None
elif isinstance(annotation, str):
return None, annotation
elif isinstance(annotation, tuple):
try:
arg1, arg2 = annotation
except ValueError as e:
raise AnnotationError(annotation) from e
else:
if callable(arg1) and isinstance(arg2, str):
return arg1, arg2
elif isinstance(arg1, str) and callable(arg2):
return arg2, arg1
raise AnnotationError(annotation) | [
"def",
"_get_type_description",
"(",
"annotation",
")",
":",
"if",
"annotation",
"is",
"_empty",
":",
"return",
"None",
",",
"None",
"elif",
"callable",
"(",
"annotation",
")",
":",
"return",
"annotation",
",",
"None",
"elif",
"isinstance",
"(",
"annotation",
... | 33.958333 | 0.001193 |
def parse_args(args):
"""
Parse command line parameters
:param args: command line parameters as list of strings
:return: command line parameters as :obj:`argparse.Namespace`
"""
parser = argparse.ArgumentParser(
description="Just a Hello World demonstration")
parser.add_argument(
'-v',
'--version',
action='version',
version='twip {ver}'.format(ver=__version__))
return parser.parse_args(args) | [
"def",
"parse_args",
"(",
"args",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Just a Hello World demonstration\"",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--version'",
",",
"action",
"=",
"'version'",
"... | 30.2 | 0.002141 |
def getAllEncodings(self):
"""Returns encodings for all the records"""
numEncodings=self.fields[0].numEncodings
assert (all(field.numEncodings==numEncodings for field in self.fields))
encodings = [self.getEncoding(index) for index in range(numEncodings)]
return encodings | [
"def",
"getAllEncodings",
"(",
"self",
")",
":",
"numEncodings",
"=",
"self",
".",
"fields",
"[",
"0",
"]",
".",
"numEncodings",
"assert",
"(",
"all",
"(",
"field",
".",
"numEncodings",
"==",
"numEncodings",
"for",
"field",
"in",
"self",
".",
"fields",
"... | 35.75 | 0.010239 |
def calc_2dsplinecoeffs_c(array2d):
"""
NAME:
calc_2dsplinecoeffs_c
PURPOSE:
Use C to calculate spline coefficients for a 2D array
INPUT:
array2d
OUTPUT:
new array with spline coeffs
HISTORY:
2013-01-24 - Written - Bovy (IAS)
"""
#Set up result arrays
out= copy.copy(array2d)
out= numpy.require(out,dtype=numpy.float64,requirements=['C','W'])
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
interppotential_calc_2dsplinecoeffs= _lib.samples_to_coefficients
interppotential_calc_2dsplinecoeffs.argtypes= [ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ctypes.c_int]
#Run the C code
interppotential_calc_2dsplinecoeffs(out,out.shape[1],out.shape[0])
return out | [
"def",
"calc_2dsplinecoeffs_c",
"(",
"array2d",
")",
":",
"#Set up result arrays",
"out",
"=",
"copy",
".",
"copy",
"(",
"array2d",
")",
"out",
"=",
"numpy",
".",
"require",
"(",
"out",
",",
"dtype",
"=",
"numpy",
".",
"float64",
",",
"requirements",
"=",
... | 31.107143 | 0.018931 |
def setData(self, index, value, role=QtCore.Qt.EditRole):
"""Reimplemented from QtCore.QAbstractItemModel
You can only set the value.
:param index: the index to edit, column should be 1.
:type index: :class:`PySide.QtCore.QModelIndex`
:param value: the new value for the configobj
:type value: object
:param role: Optional - the ItemDataRole. Default is QtCore.Qt.EditRole
:type role: QtCore.Qt.ItemDataRole
:returns: True if index was edited, False if index could not be edited.
:rtype: bool
:raises: None
"""
if index.isValid():
if role == QtCore.Qt.EditRole:
if index.column() == 1:
p = index.internalPointer()
k = self.get_key(p, index.row())
# we could just set the value
# BUT for listvalues etc it will not work
strval = self._val_to_str(value)
# _handle_value will parse it correctly
# comments gets lost
(parsedval, comment) = self._conf._handle_value(strval)
p[k] = parsedval
self.dataChanged.emit(index, index)
return True
return False | [
"def",
"setData",
"(",
"self",
",",
"index",
",",
"value",
",",
"role",
"=",
"QtCore",
".",
"Qt",
".",
"EditRole",
")",
":",
"if",
"index",
".",
"isValid",
"(",
")",
":",
"if",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"EditRole",
":",
"if",
"index... | 41.258065 | 0.001528 |
def fix_sec_nseg(secs, dL):
""" Set nseg of sections based on dL param: section.nseg = 1 + 2 * int(section.L / (2*dL))
:param secs: netpyne dictionary with all sections
:param dL: dL from config file
"""
for secName in secs:
secs[secName]['geom']['nseg'] = 1 + 2 * int(secs[secName]['geom']['L'] / (2*dL)) | [
"def",
"fix_sec_nseg",
"(",
"secs",
",",
"dL",
")",
":",
"for",
"secName",
"in",
"secs",
":",
"secs",
"[",
"secName",
"]",
"[",
"'geom'",
"]",
"[",
"'nseg'",
"]",
"=",
"1",
"+",
"2",
"*",
"int",
"(",
"secs",
"[",
"secName",
"]",
"[",
"'geom'",
... | 41 | 0.01194 |
def generate_file_rst(fname, target_dir, src_dir, gallery_conf):
""" Generate the rst file for a given example.
Returns the amout of code (in characters) of the corresponding
files.
"""
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
image_dir = os.path.join(target_dir, 'images')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
base_image_name = os.path.splitext(fname)[0]
image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png'
image_path = os.path.join(image_dir, image_fname)
script_blocks = split_code_and_text_blocks(example_file)
amount_of_code = sum([len(bcontent)
for blabel, bcontent in script_blocks
if blabel == 'code'])
if _plots_are_current(example_file, image_path):
return amount_of_code
time_elapsed = 0
ref_fname = example_file.replace(os.path.sep, '_')
example_rst = """\n\n.. _sphx_glr_{0}:\n\n""".format(ref_fname)
example_nb = Notebook(fname, target_dir)
filename_pattern = gallery_conf.get('filename_pattern')
if re.search(filename_pattern, src_file) and gallery_conf['plot_gallery']:
# A lot of examples contains 'print(__doc__)' for example in
# scikit-learn so that running the example prints some useful
# information. Because the docstring has been separated from
# the code blocks in sphinx-gallery, __doc__ is actually
# __builtin__.__doc__ in the execution context and we do not
# want to print it
example_globals = {'__doc__': ''}
fig_count = 0
# A simple example has two blocks: one for the
# example introduction/explanation and one for the code
is_example_notebook_like = len(script_blocks) > 2
for blabel, bcontent in script_blocks:
if blabel == 'code':
code_output, rtime, fig_count = execute_script(bcontent,
example_globals,
image_path,
fig_count,
src_file,
gallery_conf)
time_elapsed += rtime
example_nb.add_code_cell(bcontent)
if is_example_notebook_like:
example_rst += codestr2rst(bcontent) + '\n'
example_rst += code_output
else:
example_rst += code_output
example_rst += codestr2rst(bcontent) + '\n'
else:
example_rst += text2string(bcontent) + '\n'
example_nb.add_markdown_cell(text2string(bcontent))
else:
for blabel, bcontent in script_blocks:
if blabel == 'code':
example_rst += codestr2rst(bcontent) + '\n'
example_nb.add_code_cell(bcontent)
else:
example_rst += bcontent + '\n'
example_nb.add_markdown_cell(text2string(bcontent))
save_thumbnail(image_path, base_image_name, gallery_conf)
time_m, time_s = divmod(time_elapsed, 60)
example_nb.save_file()
with open(os.path.join(target_dir, base_image_name + '.rst'), 'w') as f:
example_rst += CODE_DOWNLOAD.format(time_m, time_s, fname,
example_nb.file_name)
f.write(example_rst)
return amount_of_code | [
"def",
"generate_file_rst",
"(",
"fname",
",",
"target_dir",
",",
"src_dir",
",",
"gallery_conf",
")",
":",
"src_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src_dir",
",",
"fname",
")",
"example_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"t... | 40.772727 | 0.000272 |
def plot(self, series, series_diff=None, label='', color=None, style=None):
'''
:param pandas.Series series:
The series to be plotted, all values must be positive if stacked
is True.
:param pandas.Series series_diff:
The series representing the diff that will be plotted in the
bottom part.
:param string label:
The label for the series.
:param integer/string color:
Color for the plot. Can be an index for the color from COLORS
or a key(string) from CNAMES.
:param string style:
Style forwarded to the plt.plot.
'''
color = self.get_color(color)
if series_diff is None and self.autodiffs:
series_diff = series.diff()
if self.stacked:
series += self.running_sum
self.ax1.fill_between(series.index, self.running_sum, series,
facecolor=ALPHAS[color])
self.running_sum = series
self.ax1.set_ylim(bottom=0, top=int(series.max() * 1.05))
series.plot(label=label, c=COLORS[color], linewidth=2, style=style,
ax=self.ax1)
if series_diff is not None:
series_diff.plot(label=label, c=COLORS[color], linewidth=2,
style=style, ax=self.ax2) | [
"def",
"plot",
"(",
"self",
",",
"series",
",",
"series_diff",
"=",
"None",
",",
"label",
"=",
"''",
",",
"color",
"=",
"None",
",",
"style",
"=",
"None",
")",
":",
"color",
"=",
"self",
".",
"get_color",
"(",
"color",
")",
"if",
"series_diff",
"is... | 44.833333 | 0.001456 |
def json_conversion(obj: Any) -> JSON:
"""Encode additional objects to JSON."""
try:
# numpy isn't an explicit dependency of bowtie
# so we can't assume it's available
import numpy as np
if isinstance(obj, (np.ndarray, np.generic)):
return obj.tolist()
except ImportError:
pass
try:
# pandas isn't an explicit dependency of bowtie
# so we can't assume it's available
import pandas as pd
if isinstance(obj, pd.DatetimeIndex):
return [x.isoformat() for x in obj.to_pydatetime()]
if isinstance(obj, pd.Index):
return obj.tolist()
if isinstance(obj, pd.Series):
try:
return [x.isoformat() for x in obj.dt.to_pydatetime()]
except AttributeError:
return obj.tolist()
except ImportError:
pass
if isinstance(obj, (datetime, time, date)):
return obj.isoformat()
raise TypeError('Not sure how to serialize {} of type {}'.format(obj, type(obj))) | [
"def",
"json_conversion",
"(",
"obj",
":",
"Any",
")",
"->",
"JSON",
":",
"try",
":",
"# numpy isn't an explicit dependency of bowtie",
"# so we can't assume it's available",
"import",
"numpy",
"as",
"np",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"np",
".",
"ndar... | 34.366667 | 0.001887 |
def get_id(self, item_type, item=None, with_id=False, hostid=None, **args):
"""Return id or ids of zabbix objects.
:type item_type: str
:param item_type: Type of zabbix object. (eg host, item etc.)
:type item: str
:param item: Name of zabbix object. If it is `None`, return list of
all objects in the scope.
:type with_id: bool
:param with_id: Returned values will be in zabbix json `id` format.
Examlpe: `{'itemid: 128}`
:type name: bool
:param name: Return name instead of id.
:type hostid: int
:param hostid: Filter objects by specific hostid.
:type templateids: int
:param tempateids: Filter objects which only belong to specific
templates by template id.
:type app_name: str
:param app_name: Filter object which only belong to specific
application.
:rtype: int or list
:return: Return single `id`, `name` or list of values.
"""
result = None
name = args.get('name', False)
type_ = '{item_type}.get'.format(item_type=item_type)
item_filter_name = {
'mediatype': 'description',
'trigger': 'description',
'triggerprototype': 'description',
'user': 'alias',
'usermacro': 'macro',
}
item_id_name = {
'discoveryrule': 'item',
'graphprototype': 'graph',
'hostgroup': 'group',
'itemprototype': 'item',
'map': 'selement',
'triggerprototype': 'trigger',
'usergroup': 'usrgrp',
'usermacro': 'hostmacro',
}
filter_ = {
'filter': {
item_filter_name.get(item_type, 'name'): item,
},
'output': 'extend'}
if hostid:
filter_['filter'].update({'hostid': hostid})
if args.get('templateids'):
if item_type == 'usermacro':
filter_['hostids'] = args['templateids']
else:
filter_['templateids'] = args['templateids']
if args.get('app_name'):
filter_['application'] = args['app_name']
logger.debug(
'do_request( "{type}", {filter} )'.format(
type=type_,
filter=filter_))
response = self.do_request(type_, filter_)['result']
if response:
item_id_str = item_id_name.get(item_type, item_type)
item_id = '{item}id'.format(item=item_id_str)
result = []
for obj in response:
# Check if object not belong current template
if args.get('templateids'):
if (not obj.get('templateid') in ("0", None) or
not len(obj.get('templateids', [])) == 0):
continue
if name:
o = obj.get(item_filter_name.get(item_type, 'name'))
result.append(o)
elif with_id:
result.append({item_id: int(obj.get(item_id))})
else:
result.append(int(obj.get(item_id)))
list_types = (list, type(None))
if not isinstance(item, list_types):
result = result[0]
return result | [
"def",
"get_id",
"(",
"self",
",",
"item_type",
",",
"item",
"=",
"None",
",",
"with_id",
"=",
"False",
",",
"hostid",
"=",
"None",
",",
"*",
"*",
"args",
")",
":",
"result",
"=",
"None",
"name",
"=",
"args",
".",
"get",
"(",
"'name'",
",",
"Fals... | 31.644231 | 0.000589 |
def redo(self):
"""This is called when a new image arrives or the data in the
existing image changes.
"""
self.clear()
tab = self.channel.get_current_image()
if not isinstance(tab, AstroTable):
return
# Generate column indices
self.tab = tab.get_data()
self._idx = np.arange(len(self.tab))
# Populate combobox with table column names
self.cols = [self._idxname] + self.tab.colnames
self.x_col = self._set_combobox(
'xcombo', self.cols, default=self.settings.get('x_index', 1))
self.y_col = self._set_combobox(
'ycombo', self.cols, default=self.settings.get('y_index', 2))
# Automatically plot first two columns
self.plot_two_columns(reset_xlimits=True, reset_ylimits=True) | [
"def",
"redo",
"(",
"self",
")",
":",
"self",
".",
"clear",
"(",
")",
"tab",
"=",
"self",
".",
"channel",
".",
"get_current_image",
"(",
")",
"if",
"not",
"isinstance",
"(",
"tab",
",",
"AstroTable",
")",
":",
"return",
"# Generate column indices",
"self... | 35.217391 | 0.002404 |
def to_dict(self, lev=0):
"""
Return a dictionary representation of the class
:return: A dict
"""
_spec = self.c_param
_res = {}
lev += 1
for key, val in self._dict.items():
try:
(_, req, _ser, _, null_allowed) = _spec[str(key)]
except KeyError:
try:
_key, lang = key.split("#")
(_, req, _ser, _, null_allowed) = _spec[_key]
except (ValueError, KeyError):
try:
(_, req, _ser, _, null_allowed) = _spec['*']
except KeyError:
_ser = None
if _ser:
val = _ser(val, "dict", lev)
if isinstance(val, Message):
_res[key] = val.to_dict(lev + 1)
elif isinstance(val, list) and isinstance(
next(iter(val or []), None), Message):
_res[key] = [v.to_dict(lev) for v in val]
else:
_res[key] = val
return _res | [
"def",
"to_dict",
"(",
"self",
",",
"lev",
"=",
"0",
")",
":",
"_spec",
"=",
"self",
".",
"c_param",
"_res",
"=",
"{",
"}",
"lev",
"+=",
"1",
"for",
"key",
",",
"val",
"in",
"self",
".",
"_dict",
".",
"items",
"(",
")",
":",
"try",
":",
"(",
... | 29.638889 | 0.001815 |
def translate_to_dbus_type(typeof, value):
"""
Helper function to map values from their native Python types
to Dbus types.
:param type typeof: Target for type conversion e.g., 'dbus.Dictionary'
:param value: Value to assign using type 'typeof'
:return: 'value' converted to type 'typeof'
:rtype: typeof
"""
if ((isinstance(value, types.UnicodeType) or
isinstance(value, str)) and typeof is not dbus.String):
# FIXME: This is potentially dangerous since it evaluates
# a string in-situ
return typeof(eval(value))
else:
return typeof(value) | [
"def",
"translate_to_dbus_type",
"(",
"typeof",
",",
"value",
")",
":",
"if",
"(",
"(",
"isinstance",
"(",
"value",
",",
"types",
".",
"UnicodeType",
")",
"or",
"isinstance",
"(",
"value",
",",
"str",
")",
")",
"and",
"typeof",
"is",
"not",
"dbus",
"."... | 35.529412 | 0.001613 |
def _layout(self, node):
"""ETE calls this function to style each node before rendering.
- ETE terms:
- A Style is a specification for how to render the node itself
- A Face defines extra information that is rendered outside of the node
- Face objects are used here to provide more control on how to draw the nodes.
"""
def set_edge_style():
"""Set the style for edges and make the node invisible."""
node_style = ete3.NodeStyle()
node_style["vt_line_color"] = EDGE_COLOR
node_style["hz_line_color"] = EDGE_COLOR
node_style["vt_line_width"] = EDGE_WIDTH
node_style["hz_line_width"] = EDGE_WIDTH
node_style["size"] = 0
node.set_style(node_style)
def style_subject_node(color="Black"):
"""Specify the appearance of Subject nodes."""
face = ete3.TextFace(node.name, fsize=SUBJECT_NODE_FONT_SIZE, fgcolor=color)
set_face_margin(face)
node.add_face(face, column=0, position="branch-right")
def style_type_node(color="Black"):
"""Specify the appearance of Type nodes."""
face = ete3.CircleFace(
radius=TYPE_NODE_RADIUS,
color=TYPE_NODE_COLOR_DICT.get(node.name, "White"),
style="circle",
label={
"text": node.name,
"color": color,
"fontsize": (
TYPE_NODE_FONT_SIZE_FILE
if self._render_type == "file"
else TYPE_NODE_FONT_SIZE_BROWSE
),
},
)
set_face_margin(face)
node.add_face(face, column=0, position="branch-right")
def set_face_margin(face):
"""Add margins to Face object.
- Add space between inner_border and border on TextFace.
- Add space outside bounding area of CircleFace.
"""
face.margin_left = 5
face.margin_right = 5
# face.margin_top = 5
# face.margin_bottom = 5
set_edge_style()
if hasattr(node, SUBJECT_NODE_TAG):
style_subject_node()
elif hasattr(node, TYPE_NODE_TAG):
style_type_node()
else:
raise AssertionError("Unknown node type") | [
"def",
"_layout",
"(",
"self",
",",
"node",
")",
":",
"def",
"set_edge_style",
"(",
")",
":",
"\"\"\"Set the style for edges and make the node invisible.\"\"\"",
"node_style",
"=",
"ete3",
".",
"NodeStyle",
"(",
")",
"node_style",
"[",
"\"vt_line_color\"",
"]",
"=",... | 36.461538 | 0.001643 |
def forward(self, seconds, vx=5):
"""
Move continuously in simulator for seconds and velocity vx.
"""
self.vx = vx
self.sleep(seconds)
self.vx = 0 | [
"def",
"forward",
"(",
"self",
",",
"seconds",
",",
"vx",
"=",
"5",
")",
":",
"self",
".",
"vx",
"=",
"vx",
"self",
".",
"sleep",
"(",
"seconds",
")",
"self",
".",
"vx",
"=",
"0"
] | 26.857143 | 0.010309 |
def is_fifo(name):
'''
Check if a file exists and is a FIFO.
CLI Example:
.. code-block:: bash
salt '*' file.is_fifo /dev/fifo
'''
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the fifo does not exist in the first place
return False
else:
raise
return stat.S_ISFIFO(stat_structure.st_mode) | [
"def",
"is_fifo",
"(",
"name",
")",
":",
"name",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"name",
")",
"stat_structure",
"=",
"None",
"try",
":",
"stat_structure",
"=",
"os",
".",
"stat",
"(",
"name",
")",
"except",
"OSError",
"as",
"exc",
":"... | 21.863636 | 0.001992 |
def parse_body(self, text):
"""Parse the function body text."""
re_raise = re.findall(r'[ \t]raise ([a-zA-Z0-9_]*)', text)
if len(re_raise) > 0:
self.raise_list = [x.strip() for x in re_raise]
# remove duplicates from list while keeping it in the order
# in python 2.7
# stackoverflow.com/questions/7961363/removing-duplicates-in-lists
self.raise_list = list(OrderedDict.fromkeys(self.raise_list))
re_yield = re.search(r'[ \t]yield ', text)
if re_yield:
self.has_yield = True
# get return value
pattern_return = r'return |yield '
line_list = text.split('\n')
is_found_return = False
line_return_tmp = ''
for line in line_list:
line = line.strip()
if is_found_return is False:
if re.match(pattern_return, line):
is_found_return = True
if is_found_return:
line_return_tmp += line
# check the integrity of line
try:
pos_quote = self._find_quote_position(line_return_tmp)
if line_return_tmp[-1] == '\\':
line_return_tmp = line_return_tmp[:-1]
continue
self._find_bracket_position(line_return_tmp, '(', ')',
pos_quote)
self._find_bracket_position(line_return_tmp, '{', '}',
pos_quote)
self._find_bracket_position(line_return_tmp, '[', ']',
pos_quote)
except IndexError:
continue
return_value = re.sub(pattern_return, '', line_return_tmp)
self.return_value_in_body.append(return_value)
is_found_return = False
line_return_tmp = '' | [
"def",
"parse_body",
"(",
"self",
",",
"text",
")",
":",
"re_raise",
"=",
"re",
".",
"findall",
"(",
"r'[ \\t]raise ([a-zA-Z0-9_]*)'",
",",
"text",
")",
"if",
"len",
"(",
"re_raise",
")",
">",
"0",
":",
"self",
".",
"raise_list",
"=",
"[",
"x",
".",
... | 39.313725 | 0.000973 |
def runGenomeSGE(bfile, freqFile, nbJob, outPrefix, options):
"""Runs the genome command from plink, on SGE.
:param bfile: the prefix of the input file.
:param freqFile: the name of the frequency file (from Plink).
:param nbJob: the number of jobs to launch.
:param outPrefix: the prefix of all the output files.
:param options: the options.
:type bfile: str
:type freqFile: str
:type nbJob: int
:type outPrefix: str
:type options: argparse.Namespace
Runs Plink with the ``genome`` options on the cluster (using SGE).
"""
# Add the environment variable for DRMAA package
if "DRMAA_LIBRARY_PATH" not in os.environ:
msg = "could not load drmaa: set DRMAA_LIBRARY_PATH"
raise ProgramError(msg)
# Import the python drmaa library
try:
import drmaa
except ImportError:
raise ProgramError("drmaa is not install, install drmaa")
# Initializing a session
s = drmaa.Session()
s.initialize()
# Run for each sub task...
jobIDs = []
jobTemplates = []
for i in xrange(1, nbJob + 1):
for j in xrange(i, nbJob + 1):
# The command to run
plinkCommand = ["plink", "--noweb", "--bfile", bfile,
"--read-freq", freqFile, "--genome",
"--genome-full", "--genome-lists",
"{}_tmp.list{}".format(outPrefix, i),
"{}_tmp.list{}".format(outPrefix, j), "--out",
"{}_output.sub.{}.{}".format(outPrefix, i, j)]
# Creating the job template
jt = s.createJobTemplate()
jt.remoteCommand = plinkCommand[0]
jt.workingDirectory = os.getcwd()
jt.jobEnvironment = os.environ
jt.args = plinkCommand[1:]
jt.jobName = "_plink_genome_{}_{}".format(i, j)
# Cluster specifics
if options.sge_walltime is not None:
jt.hardWallclockTimeLimit = options.sge_walltime
if options.sge_nodes is not None:
native_spec = "-l nodes={}:ppn={}".format(options.sge_nodes[0],
options.sge_nodes[1])
jt.nativeSpecification = native_spec
jobIDs.append(s.runJob(jt))
jobTemplates.append(jt)
# Waiting for the jobs to finish
hadProblems = []
for jobID in jobIDs:
retVal = s.wait(jobID, drmaa.Session.TIMEOUT_WAIT_FOREVER)
hadProblems.append(retVal.exitStatus == 0)
# Deleting the jobs
for jt in jobTemplates:
s.deleteJobTemplate(jt)
# Closing the session
s.exit()
# Checking for problems
for hadProblem in hadProblems:
if not hadProblem:
msg = "Some SGE jobs had errors..."
raise ProgramError(msg) | [
"def",
"runGenomeSGE",
"(",
"bfile",
",",
"freqFile",
",",
"nbJob",
",",
"outPrefix",
",",
"options",
")",
":",
"# Add the environment variable for DRMAA package",
"if",
"\"DRMAA_LIBRARY_PATH\"",
"not",
"in",
"os",
".",
"environ",
":",
"msg",
"=",
"\"could not load ... | 33.819277 | 0.000346 |
def get(self, key, default=None):
"""Return the value at key ``key``, or default value ``default``
which is None by default.
>>> dc = Dictator()
>>> dc['l0'] = [1, 2, 3, 4]
>>> dc.get('l0')
['1', '2', '3', '4']
>>> dc['l0']
['1', '2', '3', '4']
>>> dc.clear()
:param key: key of value to return
:type key: str
:param default: value of any type to return of key doesn't exist.
:type default: Any
:return: value of given key
:rtype: Any
"""
try:
value = self.__getitem__(key)
except KeyError:
value = None
# Py3 Redis compatibiility
if isinstance(value, bytes):
value = value.decode()
return value or default | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"value",
"=",
"self",
".",
"__getitem__",
"(",
"key",
")",
"except",
"KeyError",
":",
"value",
"=",
"None",
"# Py3 Redis compatibiility",
"if",
"isinstance",
"(",
... | 28.071429 | 0.00246 |
def extra_args_parser(parser=None, skip_args=None, **kwargs):
"""Create a parser to parse sampler-specific arguments for loading
samples.
Parameters
----------
parser : argparse.ArgumentParser, optional
Instead of creating a parser, add arguments to the given one. If
none provided, will create one.
skip_args : list, optional
Don't parse the given options. Options should be given as the
option string, minus the '--'. For example,
``skip_args=['iteration']`` would cause the ``--iteration``
argument not to be included.
\**kwargs :
All other keyword arguments are passed to the parser that is
created.
Returns
-------
parser : argparse.ArgumentParser
An argument parser with th extra arguments added.
actions : list of argparse.Action
A list of the actions that were added.
"""
if parser is None:
parser = argparse.ArgumentParser(**kwargs)
elif kwargs:
raise ValueError("No other keyword arguments should be provded if "
"a parser is provided.")
if skip_args is None:
skip_args = []
actions = []
if 'thin-start' not in skip_args:
act = parser.add_argument(
"--thin-start", type=int, default=None,
help="Sample number to start collecting samples to plot. If "
"none provided, will use the input file's `thin_start` "
"attribute.")
actions.append(act)
if 'thin-interval' not in skip_args:
act = parser.add_argument(
"--thin-interval", type=int, default=None,
help="Interval to use for thinning samples. If none provided, "
"will use the input file's `thin_interval` attribute.")
actions.append(act)
if 'thin-end' not in skip_args:
act = parser.add_argument(
"--thin-end", type=int, default=None,
help="Sample number to stop collecting samples to plot. If "
"none provided, will use the input file's `thin_end` "
"attribute.")
actions.append(act)
if 'iteration' not in skip_args:
act = parser.add_argument(
"--iteration", type=int, default=None,
help="Only retrieve the given iteration. To load "
"the last n-th sampe use -n, e.g., -1 will "
"load the last iteration. This overrides "
"the thin-start/interval/end options.")
actions.append(act)
if 'walkers' not in skip_args:
act = parser.add_argument(
"--walkers", type=int, nargs="+", default=None,
help="Only retrieve samples from the listed "
"walkers. Default is to retrieve from all "
"walkers.")
actions.append(act)
return parser, actions | [
"def",
"extra_args_parser",
"(",
"parser",
"=",
"None",
",",
"skip_args",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"parser",
"is",
"None",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"*",
"*",
"kwargs",
")",
"elif",
"kwargs... | 44.623188 | 0.000953 |
def tophat(args):
"""
%prog tophat folder reference
Run tophat on a folder of reads.
"""
from jcvi.apps.bowtie import check_index
from jcvi.formats.fastq import guessoffset
p = OptionParser(tophat.__doc__)
p.add_option("--gtf", help="Reference annotation [default: %default]")
p.add_option("--single", default=False, action="store_true",
help="Single end mapping")
p.add_option("--intron", default=15000, type="int",
help="Max intron size [default: %default]")
p.add_option("--dist", default=-50, type="int",
help="Mate inner distance [default: %default]")
p.add_option("--stdev", default=50, type="int",
help="Mate standard deviation [default: %default]")
p.set_phred()
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
num = 1 if opts.single else 2
folder, reference = args
reference = check_index(reference)
for p, prefix in iter_project(folder, n=num):
outdir = "{0}_tophat".format(prefix)
outfile = op.join(outdir, "accepted_hits.bam")
if op.exists(outfile):
logging.debug("File `{0}` found. Skipping.".format(outfile))
continue
cmd = "tophat -p {0}".format(opts.cpus)
if opts.gtf:
cmd += " -G {0}".format(opts.gtf)
cmd += " -o {0}".format(outdir)
if num == 1: # Single-end
a, = p
else: # Paired-end
a, b = p
cmd += " --max-intron-length {0}".format(opts.intron)
cmd += " --mate-inner-dist {0}".format(opts.dist)
cmd += " --mate-std-dev {0}".format(opts.stdev)
phred = opts.phred or str(guessoffset([a]))
if phred == "64":
cmd += " --phred64-quals"
cmd += " {0} {1}".format(reference, " ".join(p))
sh(cmd) | [
"def",
"tophat",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"apps",
".",
"bowtie",
"import",
"check_index",
"from",
"jcvi",
".",
"formats",
".",
"fastq",
"import",
"guessoffset",
"p",
"=",
"OptionParser",
"(",
"tophat",
".",
"__doc__",
")",
"p",
".",
... | 33.981818 | 0.00052 |
def main(league, time, standings, team, live, use12hour, players,
output_format, output_file, upcoming, lookup, listcodes, apikey):
"""
A CLI for live and past football scores from various football leagues.
League codes:
\b
- WC: World Cup
- EC: European Championship
- CL: Champions League
- PL: English Premier League
- ELC: English Championship
- FL1: French Ligue 1
- BL: German Bundesliga
- SA: Serie A
- DED: Eredivisie
- PPL: Primeira Liga
- PD: Primera Division
- BSA: Brazil Serie A
"""
headers = {'X-Auth-Token': apikey}
try:
if output_format == 'stdout' and output_file:
raise IncorrectParametersException('Printing output to stdout and '
'saving to a file are mutually exclusive')
writer = get_writer(output_format, output_file)
rh = RequestHandler(headers, LEAGUE_IDS, TEAM_NAMES, writer)
if listcodes:
list_team_codes()
return
if live:
rh.get_live_scores(use12hour)
return
if standings:
if not league:
raise IncorrectParametersException('Please specify a league. '
'Example --standings --league=PL')
if league == 'CL':
raise IncorrectParametersException('Standings for CL - '
'Champions League not supported')
rh.get_standings(league)
return
if team:
if lookup:
map_team_id(team)
return
if players:
rh.get_team_players(team)
return
else:
rh.get_team_scores(team, time, upcoming, use12hour)
return
rh.get_league_scores(league, time, upcoming, use12hour)
except IncorrectParametersException as e:
click.secho(str(e), fg="red", bold=True) | [
"def",
"main",
"(",
"league",
",",
"time",
",",
"standings",
",",
"team",
",",
"live",
",",
"use12hour",
",",
"players",
",",
"output_format",
",",
"output_file",
",",
"upcoming",
",",
"lookup",
",",
"listcodes",
",",
"apikey",
")",
":",
"headers",
"=",
... | 31.903226 | 0.001962 |
def rgstr_stamps_root(rgstr_stamps):
"""
Register stamps with the root timer (see subdivision()).
Args:
rgstr_stamps (list, tuple): Collection of identifiers, passed through
set(), then each is passed through str().
Returns:
list: Implemented registered stamp collection.
"""
rgstr_stamps = sanitize_rgstr_stamps(rgstr_stamps)
f.root.rgstr_stamps = rgstr_stamps
return rgstr_stamps | [
"def",
"rgstr_stamps_root",
"(",
"rgstr_stamps",
")",
":",
"rgstr_stamps",
"=",
"sanitize_rgstr_stamps",
"(",
"rgstr_stamps",
")",
"f",
".",
"root",
".",
"rgstr_stamps",
"=",
"rgstr_stamps",
"return",
"rgstr_stamps"
] | 30.714286 | 0.002257 |
def get_hdrgo2usrgos(self, hdrgos):
"""Return a subset of hdrgo2usrgos."""
get_usrgos = self.hdrgo2usrgos.get
hdrgos_actual = self.get_hdrgos().intersection(hdrgos)
return {h:get_usrgos(h) for h in hdrgos_actual} | [
"def",
"get_hdrgo2usrgos",
"(",
"self",
",",
"hdrgos",
")",
":",
"get_usrgos",
"=",
"self",
".",
"hdrgo2usrgos",
".",
"get",
"hdrgos_actual",
"=",
"self",
".",
"get_hdrgos",
"(",
")",
".",
"intersection",
"(",
"hdrgos",
")",
"return",
"{",
"h",
":",
"get... | 48 | 0.012295 |
def dumps(obj, *args, **kwargs):
"""Serialize a object to string
Basic Usage:
>>> import simplekit.objson
>>> obj = {'name':'wendy'}
>>> print simplekit.objson.dumps(obj)
:param obj: a object which need to dump
:param args: Optional arguments that :func:`json.dumps` takes.
:param kwargs: Keys arguments that :py:func:`json.dumps` takes.
:return: string
"""
kwargs['default'] = object2dict
return json.dumps(obj, *args, **kwargs) | [
"def",
"dumps",
"(",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'default'",
"]",
"=",
"object2dict",
"return",
"json",
".",
"dumps",
"(",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 25.833333 | 0.002075 |
def _bind_posix_socket(socket_name=None):
"""
Find a socket to listen on and return it.
Returns (socket_name, sock_obj)
"""
assert socket_name is None or isinstance(socket_name, six.text_type)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if socket_name:
s.bind(socket_name)
return socket_name, s
else:
i = 0
while True:
try:
socket_name = '%s/pymux.sock.%s.%i' % (
tempfile.gettempdir(), getpass.getuser(), i)
s.bind(socket_name)
return socket_name, s
except (OSError, socket.error):
i += 1
# When 100 times failed, cancel server
if i == 100:
logger.warning('100 times failed to listen on posix socket. '
'Please clean up old sockets.')
raise | [
"def",
"_bind_posix_socket",
"(",
"socket_name",
"=",
"None",
")",
":",
"assert",
"socket_name",
"is",
"None",
"or",
"isinstance",
"(",
"socket_name",
",",
"six",
".",
"text_type",
")",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_UNIX",
",",... | 31.241379 | 0.002141 |
def digest(instr, checksum='md5'):
'''
Return a checksum digest for a string
instr
A string
checksum : ``md5``
The hashing algorithm to use to generate checksums. Valid options: md5,
sha256, sha512.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest 'get salted'
'''
hashing_funcs = {
'md5': __salt__['hashutil.md5_digest'],
'sha256': __salt__['hashutil.sha256_digest'],
'sha512': __salt__['hashutil.sha512_digest'],
}
hash_func = hashing_funcs.get(checksum)
if hash_func is None:
raise salt.exceptions.CommandExecutionError(
"Hash func '{0}' is not supported.".format(checksum))
return hash_func(instr) | [
"def",
"digest",
"(",
"instr",
",",
"checksum",
"=",
"'md5'",
")",
":",
"hashing_funcs",
"=",
"{",
"'md5'",
":",
"__salt__",
"[",
"'hashutil.md5_digest'",
"]",
",",
"'sha256'",
":",
"__salt__",
"[",
"'hashutil.sha256_digest'",
"]",
",",
"'sha512'",
":",
"__s... | 25.642857 | 0.001342 |
def __send_buffer(self):
"""
Sends the contents of self.__out_buffer to serial device
:return: Number of bytes written
"""
bytes_written = self.serial.write(self.__out_buffer.raw)
if self.DEBUG_MODE:
print("Wrote: '{}'".format(binascii.hexlify(self.__out_buffer.raw)))
if bytes_written != len(self.__out_buffer):
raise IOError("{} bytes written for output buffer of size {}".format(bytes_written,
len(self.__out_buffer)))
return bytes_written | [
"def",
"__send_buffer",
"(",
"self",
")",
":",
"bytes_written",
"=",
"self",
".",
"serial",
".",
"write",
"(",
"self",
".",
"__out_buffer",
".",
"raw",
")",
"if",
"self",
".",
"DEBUG_MODE",
":",
"print",
"(",
"\"Wrote: '{}'\"",
".",
"format",
"(",
"binas... | 50 | 0.008183 |
def send_file(self, file):
"""
Send a file to the client, it is a convenient method to avoid duplicated code
"""
if self.logger:
self.logger.debug("[ioc.extra.tornado.RouterHandler] send file %s" % file)
self.send_file_header(file)
fp = open(file, 'rb')
self.write(fp.read())
fp.close() | [
"def",
"send_file",
"(",
"self",
",",
"file",
")",
":",
"if",
"self",
".",
"logger",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"[ioc.extra.tornado.RouterHandler] send file %s\"",
"%",
"file",
")",
"self",
".",
"send_file_header",
"(",
"file",
")",
"fp... | 27.153846 | 0.010959 |
def coupl_model5(self):
""" Toggle switch.
"""
self.Coupl = -0.2*self.Adj
self.Coupl[2,0] *= -1
self.Coupl[3,0] *= -1
self.Coupl[4,1] *= -1
self.Coupl[5,1] *= -1 | [
"def",
"coupl_model5",
"(",
"self",
")",
":",
"self",
".",
"Coupl",
"=",
"-",
"0.2",
"*",
"self",
".",
"Adj",
"self",
".",
"Coupl",
"[",
"2",
",",
"0",
"]",
"*=",
"-",
"1",
"self",
".",
"Coupl",
"[",
"3",
",",
"0",
"]",
"*=",
"-",
"1",
"sel... | 26.25 | 0.02765 |
def nn_filter(S, rec=None, aggregate=None, axis=-1, **kwargs):
'''Filtering by nearest-neighbors.
Each data point (e.g, spectrogram column) is replaced
by aggregating its nearest neighbors in feature space.
This can be useful for de-noising a spectrogram or feature matrix.
The non-local means method [1]_ can be recovered by providing a
weighted recurrence matrix as input and specifying `aggregate=np.average`.
Similarly, setting `aggregate=np.median` produces sparse de-noising
as in REPET-SIM [2]_.
.. [1] Buades, A., Coll, B., & Morel, J. M.
(2005, June). A non-local algorithm for image denoising.
In Computer Vision and Pattern Recognition, 2005.
CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.
.. [2] Rafii, Z., & Pardo, B.
(2012, October). "Music/Voice Separation Using the Similarity Matrix."
International Society for Music Information Retrieval Conference, 2012.
Parameters
----------
S : np.ndarray
The input data (spectrogram) to filter
rec : (optional) scipy.sparse.spmatrix or np.ndarray
Optionally, a pre-computed nearest-neighbor matrix
as provided by `librosa.segment.recurrence_matrix`
aggregate : function
aggregation function (default: `np.mean`)
If `aggregate=np.average`, then a weighted average is
computed according to the (per-row) weights in `rec`.
For all other aggregation functions, all neighbors
are treated equally.
axis : int
The axis along which to filter (by default, columns)
kwargs
Additional keyword arguments provided to
`librosa.segment.recurrence_matrix` if `rec` is not provided
Returns
-------
S_filtered : np.ndarray
The filtered data
Raises
------
ParameterError
if `rec` is provided and its shape is incompatible with `S`.
See also
--------
decompose
hpss
librosa.segment.recurrence_matrix
Notes
-----
This function caches at level 30.
Examples
--------
De-noise a chromagram by non-local median filtering.
By default this would use euclidean distance to select neighbors,
but this can be overridden directly by setting the `metric` parameter.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=10)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> chroma_med = librosa.decompose.nn_filter(chroma,
... aggregate=np.median,
... metric='cosine')
To use non-local means, provide an affinity matrix and `aggregate=np.average`.
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',
... metric='cosine', sparse=True)
>>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,
... aggregate=np.average)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 8))
>>> plt.subplot(5, 1, 1)
>>> librosa.display.specshow(chroma, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Unfiltered')
>>> plt.subplot(5, 1, 2)
>>> librosa.display.specshow(chroma_med, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Median-filtered')
>>> plt.subplot(5, 1, 3)
>>> librosa.display.specshow(chroma_nlm, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Non-local means')
>>> plt.subplot(5, 1, 4)
>>> librosa.display.specshow(chroma - chroma_med,
... y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Original - median')
>>> plt.subplot(5, 1, 5)
>>> librosa.display.specshow(chroma - chroma_nlm,
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Original - NLM')
>>> plt.tight_layout()
'''
if aggregate is None:
aggregate = np.mean
if rec is None:
kwargs = dict(kwargs)
kwargs['sparse'] = True
rec = segment.recurrence_matrix(S, axis=axis, **kwargs)
elif not scipy.sparse.issparse(rec):
rec = scipy.sparse.csr_matrix(rec)
if rec.shape[0] != S.shape[axis] or rec.shape[0] != rec.shape[1]:
raise ParameterError('Invalid self-similarity matrix shape '
'rec.shape={} for S.shape={}'.format(rec.shape,
S.shape))
return __nn_filter_helper(rec.data, rec.indices, rec.indptr,
S.swapaxes(0, axis), aggregate).swapaxes(0, axis) | [
"def",
"nn_filter",
"(",
"S",
",",
"rec",
"=",
"None",
",",
"aggregate",
"=",
"None",
",",
"axis",
"=",
"-",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"aggregate",
"is",
"None",
":",
"aggregate",
"=",
"np",
".",
"mean",
"if",
"rec",
"is",
"... | 34.185185 | 0.000632 |
def _get_profile(self, user_account):
"""Retrieves a user's profile"""
try:
# TODO: Load active profile, not just any
user_profile = objectmodels['profile'].find_one(
{'owner': str(user_account.uuid)})
self.log("Profile: ", user_profile,
user_account.uuid, lvl=debug)
except Exception as e:
self.log("No profile due to error: ", e, type(e),
lvl=error)
user_profile = None
if not user_profile:
default = {
'uuid': std_uuid(),
'owner': user_account.uuid,
'userdata': {
'notes': 'Default profile of ' + user_account.name
}
}
user_profile = objectmodels['profile'](default)
user_profile.save()
return user_profile | [
"def",
"_get_profile",
"(",
"self",
",",
"user_account",
")",
":",
"try",
":",
"# TODO: Load active profile, not just any",
"user_profile",
"=",
"objectmodels",
"[",
"'profile'",
"]",
".",
"find_one",
"(",
"{",
"'owner'",
":",
"str",
"(",
"user_account",
".",
"u... | 33.730769 | 0.002217 |
def drawcircle(self, x, y, r = 10, colour = None, label = None):
"""
Draws a circle centered on (x, y) with radius r. All these are in the coordinates of your initial image !
You give these x and y in the usual ds9 pixels, (0,0) is bottom left.
I will convert this into the right PIL coordiates.
"""
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
(pilx, pily) = self.pilcoords((x,y))
pilr = self.pilscale(r)
self.draw.ellipse([(pilx-pilr+1, pily-pilr+1), (pilx+pilr+1, pily+pilr+1)], outline = colour)
if label != None:
# The we write it :
self.loadlabelfont()
textwidth = self.draw.textsize(label, font = self.labelfont)[0]
self.draw.text((pilx - float(textwidth)/2.0 + 2, pily + pilr + 4), label, fill = colour, font = self.labelfont) | [
"def",
"drawcircle",
"(",
"self",
",",
"x",
",",
"y",
",",
"r",
"=",
"10",
",",
"colour",
"=",
"None",
",",
"label",
"=",
"None",
")",
":",
"self",
".",
"checkforpilimage",
"(",
")",
"colour",
"=",
"self",
".",
"defaultcolour",
"(",
"colour",
")",
... | 43.727273 | 0.025432 |
def counter(self, key, **dims):
"""Adds counter with dimensions to the registry"""
return super(RegexRegistry, self).counter(self._get_key(key), **dims) | [
"def",
"counter",
"(",
"self",
",",
"key",
",",
"*",
"*",
"dims",
")",
":",
"return",
"super",
"(",
"RegexRegistry",
",",
"self",
")",
".",
"counter",
"(",
"self",
".",
"_get_key",
"(",
"key",
")",
",",
"*",
"*",
"dims",
")"
] | 55.333333 | 0.011905 |
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while True:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.debug("Harvesting DAG parsing results")
simple_dags = self.processor_agent.harvest_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if len(simple_dags) > 0:
try:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
continue
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
# Exit early for a test mode, run one additional scheduler loop
# to reduce the possibility that parsed DAG was put into the queue
# by the DAG manager but not yet received by DAG agent.
if self.processor_agent.done:
self._last_loop = True
if self._last_loop:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1 and not is_unit_test:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove() | [
"def",
"_execute_helper",
"(",
"self",
")",
":",
"self",
".",
"executor",
".",
"start",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Resetting orphaned tasks for active dag runs\"",
")",
"self",
".",
"reset_state_for_orphaned_tasks",
"(",
")",
"# Start after... | 43.007092 | 0.002095 |
def get_word_level_vocab(self):
"""Provides word level vocabulary
Returns
-------
Vocab
Word level vocabulary
"""
def simple_tokenize(source_str, token_delim=' ', seq_delim='\n'):
return list(filter(None, re.split(token_delim + '|' + seq_delim, source_str)))
return VocabProvider._create_squad_vocab(simple_tokenize, self._dataset) | [
"def",
"get_word_level_vocab",
"(",
"self",
")",
":",
"def",
"simple_tokenize",
"(",
"source_str",
",",
"token_delim",
"=",
"' '",
",",
"seq_delim",
"=",
"'\\n'",
")",
":",
"return",
"list",
"(",
"filter",
"(",
"None",
",",
"re",
".",
"split",
"(",
"toke... | 30.923077 | 0.009662 |
def reset(self, label=None):
"""
clears all measurements, allowing the object to be reused
Args:
label (str, optional) : optionally change the label
Example:
>>> from timerit import Timerit
>>> import math
>>> ti = Timerit(num=10, unit='us', verbose=True)
>>> _ = ti.reset(label='10!').call(math.factorial, 10)
Timed best=...s, mean=...s for 10!
>>> _ = ti.reset(label='20!').call(math.factorial, 20)
Timed best=...s, mean=...s for 20!
>>> _ = ti.reset().call(math.factorial, 20)
Timed best=...s, mean=...s for 20!
"""
if label:
self.label = label
self.times = []
self.n_loops = None
self.total_time = None
return self | [
"def",
"reset",
"(",
"self",
",",
"label",
"=",
"None",
")",
":",
"if",
"label",
":",
"self",
".",
"label",
"=",
"label",
"self",
".",
"times",
"=",
"[",
"]",
"self",
".",
"n_loops",
"=",
"None",
"self",
".",
"total_time",
"=",
"None",
"return",
... | 33.708333 | 0.002404 |
async def status(dev: Device):
"""Display status information."""
power = await dev.get_power()
click.echo(click.style("%s" % power, bold=power))
vol = await dev.get_volume_information()
click.echo(vol.pop())
play_info = await dev.get_play_info()
if not play_info.is_idle:
click.echo("Playing %s" % play_info)
else:
click.echo("Not playing any media")
outs = await dev.get_inputs()
for out in outs:
if out.active:
click.echo("Active output: %s" % out)
sysinfo = await dev.get_system_info()
click.echo("System information: %s" % sysinfo) | [
"async",
"def",
"status",
"(",
"dev",
":",
"Device",
")",
":",
"power",
"=",
"await",
"dev",
".",
"get_power",
"(",
")",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"\"%s\"",
"%",
"power",
",",
"bold",
"=",
"power",
")",
")",
"vol",
"=... | 28.714286 | 0.001605 |
def aggregationDivide(dividend, divisor):
"""
Return the result from dividing two dicts that represent date and time.
Both dividend and divisor are dicts that contain one or more of the following
keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',
'milliseconds', 'microseconds'.
For example:
::
aggregationDivide({'hours': 4}, {'minutes': 15}) == 16
:param dividend: (dict) The numerator, as a dict representing a date and time
:param divisor: (dict) the denominator, as a dict representing a date and time
:returns: (float) number of times divisor goes into dividend
"""
# Convert each into microseconds
dividendMonthSec = aggregationToMonthsSeconds(dividend)
divisorMonthSec = aggregationToMonthsSeconds(divisor)
# It is a usage error to mix both months and seconds in the same operation
if (dividendMonthSec['months'] != 0 and divisorMonthSec['seconds'] != 0) \
or (dividendMonthSec['seconds'] != 0 and divisorMonthSec['months'] != 0):
raise RuntimeError("Aggregation dicts with months/years can only be "
"inter-operated with other aggregation dicts that contain "
"months/years")
if dividendMonthSec['months'] > 0:
return float(dividendMonthSec['months']) / divisor['months']
else:
return float(dividendMonthSec['seconds']) / divisorMonthSec['seconds'] | [
"def",
"aggregationDivide",
"(",
"dividend",
",",
"divisor",
")",
":",
"# Convert each into microseconds",
"dividendMonthSec",
"=",
"aggregationToMonthsSeconds",
"(",
"dividend",
")",
"divisorMonthSec",
"=",
"aggregationToMonthsSeconds",
"(",
"divisor",
")",
"# It is a usag... | 35.702703 | 0.010317 |
def _distance_sqr_stats_naive_generic(x, y, matrix_centered, product,
exponent=1):
"""Compute generic squared stats."""
a = matrix_centered(x, exponent=exponent)
b = matrix_centered(y, exponent=exponent)
covariance_xy_sqr = product(a, b)
variance_x_sqr = product(a, a)
variance_y_sqr = product(b, b)
denominator_sqr = np.absolute(variance_x_sqr * variance_y_sqr)
denominator = _sqrt(denominator_sqr)
# Comparisons using a tolerance can change results if the
# covariance has a similar order of magnitude
if denominator == 0.0:
correlation_xy_sqr = 0.0
else:
correlation_xy_sqr = covariance_xy_sqr / denominator
return Stats(covariance_xy=covariance_xy_sqr,
correlation_xy=correlation_xy_sqr,
variance_x=variance_x_sqr,
variance_y=variance_y_sqr) | [
"def",
"_distance_sqr_stats_naive_generic",
"(",
"x",
",",
"y",
",",
"matrix_centered",
",",
"product",
",",
"exponent",
"=",
"1",
")",
":",
"a",
"=",
"matrix_centered",
"(",
"x",
",",
"exponent",
"=",
"exponent",
")",
"b",
"=",
"matrix_centered",
"(",
"y"... | 36.791667 | 0.001104 |
def get(session, api_key, **kwargs):
"""
Выполняет доступ к API.
session - модуль requests или сессия из него
api_key - строка ключа доступа к API
rate - тариф, может быть `informers` или `forecast`
lat, lon - широта и долгота
```
import yandex_weather_api
import requests as req
yandex_weather_api.get(req, "ЗАМЕНИ_МЕНЯ_КЛЮЧОМ", lat=55.10, lon=60.10)
```
"""
args, kwargs = validate_args(api_key, **kwargs)
resp = session.get(*args, **kwargs)
return WeatherAnswer.validate(resp.json()) | [
"def",
"get",
"(",
"session",
",",
"api_key",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
",",
"kwargs",
"=",
"validate_args",
"(",
"api_key",
",",
"*",
"*",
"kwargs",
")",
"resp",
"=",
"session",
".",
"get",
"(",
"*",
"args",
",",
"*",
"*",
"kwar... | 27.947368 | 0.001821 |
def handleThumbDblClick( self, item ):
"""
Handles when a thumbnail item is double clicked on.
:param item | <QListWidgetItem>
"""
if ( isinstance(item, RecordListWidgetItem) ):
self.emitRecordDoubleClicked(item.record()) | [
"def",
"handleThumbDblClick",
"(",
"self",
",",
"item",
")",
":",
"if",
"(",
"isinstance",
"(",
"item",
",",
"RecordListWidgetItem",
")",
")",
":",
"self",
".",
"emitRecordDoubleClicked",
"(",
"item",
".",
"record",
"(",
")",
")"
] | 35.875 | 0.02381 |
def parsed_stream(self, content: str, name: str=None):
"""Push a new Stream into the parser.
All subsequent called functions will parse this new stream,
until the 'popStream' function is called.
"""
self._streams.append(Stream(content, name)) | [
"def",
"parsed_stream",
"(",
"self",
",",
"content",
":",
"str",
",",
"name",
":",
"str",
"=",
"None",
")",
":",
"self",
".",
"_streams",
".",
"append",
"(",
"Stream",
"(",
"content",
",",
"name",
")",
")"
] | 46.166667 | 0.014184 |
def _remove_legacy_bootstrap():
"""Remove bootstrap projects from old path, they'd be really stale by now."""
home = os.environ['HOME']
old_base_dir = os.path.join(home, '.config', 'classpath_project_ensime')
if os.path.isdir(old_base_dir):
shutil.rmtree(old_base_dir, ignore_errors=True) | [
"def",
"_remove_legacy_bootstrap",
"(",
")",
":",
"home",
"=",
"os",
".",
"environ",
"[",
"'HOME'",
"]",
"old_base_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"home",
",",
"'.config'",
",",
"'classpath_project_ensime'",
")",
"if",
"os",
".",
"path",
... | 54.5 | 0.012048 |
async def getUpdates(self,
offset=None,
limit=None,
timeout=None,
allowed_updates=None):
""" See: https://core.telegram.org/bots/api#getupdates """
p = _strip(locals())
return await self._api_request('getUpdates', _rectify(p)) | [
"async",
"def",
"getUpdates",
"(",
"self",
",",
"offset",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"allowed_updates",
"=",
"None",
")",
":",
"p",
"=",
"_strip",
"(",
"locals",
"(",
")",
")",
"return",
"await",
"self",... | 42.875 | 0.017143 |
def OpenAndRead(relative_path='debugger-blacklist.yaml'):
"""Attempts to find the yaml configuration file, then read it.
Args:
relative_path: Optional relative path override.
Returns:
A Config object if the open and read were successful, None if the file
does not exist (which is not considered an error).
Raises:
Error (some subclass): As thrown by the called Read() function.
"""
# Note: This logic follows the convention established by source-context.json
try:
with open(os.path.join(sys.path[0], relative_path), 'r') as f:
return Read(f)
except IOError:
return None | [
"def",
"OpenAndRead",
"(",
"relative_path",
"=",
"'debugger-blacklist.yaml'",
")",
":",
"# Note: This logic follows the convention established by source-context.json",
"try",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sys",
".",
"path",
"[",
"0",
... | 30 | 0.009693 |
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code) | [
"def",
"set_code_exprs",
"(",
"self",
",",
"codes",
")",
":",
"self",
".",
"code_objs",
"=",
"dict",
"(",
")",
"self",
".",
"_codes",
"=",
"[",
"]",
"for",
"code",
"in",
"codes",
":",
"self",
".",
"append_code_expr",
"(",
"code",
")"
] | 36.166667 | 0.009009 |
def rotation_matrix_from_point(point, ret_inv=False):
"""Compute rotation matrix to go from [0,0,1] to `point`.
First, the matrix rotates to in the polar direction. Then,
a rotation about the y-axis is performed to match the
azimuthal angle in the x-z-plane.
This rotation matrix is required for the correct 3D orientation
of the backpropagated projections.
Parameters
----------
points: list-like, length 3
The coordinates of the point in 3D.
ret_inv: bool
Also return the inverse of the rotation matrix. The inverse
is required for :func:`scipy.ndimage.interpolation.affine_transform`
which maps the output coordinates to the input coordinates.
Returns
-------
Rmat [, Rmat_inv]: 3x3 ndarrays
The rotation matrix that rotates [0,0,1] to `point` and
optionally its inverse.
"""
x, y, z = point
# azimuthal angle
phi = np.arctan2(x, z)
# angle in polar direction (negative)
theta = -np.arctan2(y, np.sqrt(x**2+z**2))
# Rotation in polar direction
Rtheta = np.array([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
])
# rotation in x-z-plane
Rphi = np.array([
[np.cos(phi), 0, -np.sin(phi)],
[0, 1, 0],
[np.sin(phi), 0, np.cos(phi)],
])
D = np.dot(Rphi, Rtheta)
# The inverse of D
Dinv = np.dot(Rtheta.T, Rphi.T)
if ret_inv:
return D, Dinv
else:
return D | [
"def",
"rotation_matrix_from_point",
"(",
"point",
",",
"ret_inv",
"=",
"False",
")",
":",
"x",
",",
"y",
",",
"z",
"=",
"point",
"# azimuthal angle",
"phi",
"=",
"np",
".",
"arctan2",
"(",
"x",
",",
"z",
")",
"# angle in polar direction (negative)",
"theta"... | 28.54717 | 0.000639 |
def new_tbl(cls, rows, cols, width, height, tableStyleId=None):
"""Return a new ``<p:tbl>`` element tree."""
# working hypothesis is this is the default table style GUID
if tableStyleId is None:
tableStyleId = '{5C22544A-7EE6-4342-B048-85BDC9FD1C3A}'
xml = cls._tbl_tmpl() % (tableStyleId)
tbl = parse_xml(xml)
# add specified number of rows and columns
rowheight = height//rows
colwidth = width//cols
for col in range(cols):
# adjust width of last col to absorb any div error
if col == cols-1:
colwidth = width - ((cols-1) * colwidth)
tbl.tblGrid.add_gridCol(width=colwidth)
for row in range(rows):
# adjust height of last row to absorb any div error
if row == rows-1:
rowheight = height - ((rows-1) * rowheight)
tr = tbl.add_tr(height=rowheight)
for col in range(cols):
tr.add_tc()
return tbl | [
"def",
"new_tbl",
"(",
"cls",
",",
"rows",
",",
"cols",
",",
"width",
",",
"height",
",",
"tableStyleId",
"=",
"None",
")",
":",
"# working hypothesis is this is the default table style GUID",
"if",
"tableStyleId",
"is",
"None",
":",
"tableStyleId",
"=",
"'{5C2254... | 35.857143 | 0.00194 |
def feed_amount(self, amount):
'''Calling this function sets the form feed amount to the specified setting.
Args:
amount: the form feed setting you desire. Options are '1/8', '1/6', 'x/180', and 'x/60',
with x being your own desired amount. X must be a minimum of 24 for 'x/180' and 8 for 'x/60'
Returns:
None
Raises:
None
'''
n = None
if amount=='1/8':
amount = '0'
elif amount=='1/6':
amount = '2'
elif re.search('/180', amount):
n = re.search(r"(\d+)/180", amount)
n = n.group(1)
amount = '3'
elif re.search('/60', amount):
n = re.search(r"(\d+)/60", amount)
n = n.group(1)
amount = 'A'
if n:
self.send(chr(27)+amount+n)
else:
self.send(chr(27)+amount) | [
"def",
"feed_amount",
"(",
"self",
",",
"amount",
")",
":",
"n",
"=",
"None",
"if",
"amount",
"==",
"'1/8'",
":",
"amount",
"=",
"'0'",
"elif",
"amount",
"==",
"'1/6'",
":",
"amount",
"=",
"'2'",
"elif",
"re",
".",
"search",
"(",
"'/180'",
",",
"am... | 32.035714 | 0.008658 |
def sls_id(id_, mods, test=None, queue=False, **kwargs):
'''
Call a single ID from the named module(s) and handle all requisites
The state ID comes *before* the module ID(s) on the command line.
id
ID to call
mods
Comma-delimited list of modules to search for given id and its requisites
.. versionadded:: 2014.7.0
saltenv : base
Specify a salt fileserver environment to be used when applying states
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.sls_id my_state my_module pillar='{"foo": "bar"}'
.. note::
Values passed this way will override existing Pillar values set via
``pillar_roots`` or an external Pillar source. Pillar values that
are not included in the kwarg will not be overwritten.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' state.sls_id my_state my_module
salt '*' state.sls_id my_state my_module,a_common_module
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts['test'] = _get_test_value(test, **kwargs)
# Since this is running a specific ID within a specific SLS file, fall back
# to the 'base' saltenv if none is configured and none was passed.
if opts['saltenv'] is None:
opts['saltenv'] = 'base'
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar_override is not None \
and not isinstance(pillar_override, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
return ['Pillar failed to render with the following messages:'] + errors
split_mods = salt.utils.args.split_input(mods)
st_.push_active()
try:
high_, errors = st_.render_highstate({opts['saltenv']: split_mods})
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
# Apply requisites to high data
high_, req_in_errors = st_.state.requisite_in(high_)
if req_in_errors:
# This if statement should not be necessary if there were no errors,
# but it is required to get the unit tests to pass.
errors.extend(req_in_errors)
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
chunks = st_.state.compile_high_data(high_)
ret = {}
for chunk in chunks:
if chunk.get('__id__', '') == id_:
ret.update(st_.state.call_chunk(chunk, {}, chunks))
_set_retcode(ret, highstate=highstate)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__['test'] = orig_test
if not ret:
raise SaltInvocationError(
'No matches for ID \'{0}\' found in SLS \'{1}\' within saltenv '
'\'{2}\''.format(id_, mods, opts['saltenv'])
)
return ret | [
"def",
"sls_id",
"(",
"id_",
",",
"mods",
",",
"test",
"=",
"None",
",",
"queue",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"conflict",
"=",
"_check_queue",
"(",
"queue",
",",
"kwargs",
")",
"if",
"conflict",
"is",
"not",
"None",
":",
"retur... | 35.863248 | 0.000928 |
def calc_b_value(magnitudes, completeness, max_mag=None, plotvar=True):
"""
Calculate the b-value for a range of completeness magnitudes.
Calculates a power-law fit to given magnitudes for each completeness
magnitude. Plots the b-values and residuals for the fitted catalogue
against the completeness values. Computes fits using numpy.polyfit,
which uses a least-squares technique.
:type magnitudes: list
:param magnitudes: Magnitudes to compute the b-value for.
:type completeness: list
:param completeness: list of completeness values to compute b-values for.
:type max_mag: float
:param max_mag: Maximum magnitude to attempt to fit in magnitudes.
:type plotvar: bool
:param plotvar: Turn plotting on or off.
:rtype: list
:return: List of tuples of (completeness, b-value, residual,\
number of magnitudes used)
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.mag_calc import calc_b_value
>>> client = Client('IRIS')
>>> t1 = UTCDateTime('2012-03-26T00:00:00')
>>> t2 = t1 + (3 * 86400)
>>> catalog = client.get_events(starttime=t1, endtime=t2, minmagnitude=3)
>>> magnitudes = [event.magnitudes[0].mag for event in catalog]
>>> b_values = calc_b_value(magnitudes, completeness=np.arange(3, 7, 0.2),
... plotvar=False)
>>> round(b_values[4][1])
1.0
>>> # We can set a maximum magnitude:
>>> b_values = calc_b_value(magnitudes, completeness=np.arange(3, 7, 0.2),
... plotvar=False, max_mag=5)
>>> round(b_values[4][1])
1.0
"""
b_values = []
# Calculate the cdf for all magnitudes
counts = Counter(magnitudes)
cdf = np.zeros(len(counts))
mag_steps = np.zeros(len(counts))
for i, magnitude in enumerate(sorted(counts.keys(), reverse=True)):
mag_steps[i] = magnitude
if i > 0:
cdf[i] = cdf[i - 1] + counts[magnitude]
else:
cdf[i] = counts[magnitude]
if not max_mag:
max_mag = max(magnitudes)
for m_c in completeness:
if m_c >= max_mag or m_c >= max(magnitudes):
warnings.warn('Not computing completeness at %s, above max_mag' %
str(m_c))
break
complete_mags = []
complete_freq = []
for i, mag in enumerate(mag_steps):
if mag >= m_c <= max_mag:
complete_mags.append(mag)
complete_freq.append(np.log10(cdf[i]))
if len(complete_mags) < 4:
warnings.warn('Not computing completeness above ' + str(m_c) +
', fewer than 4 events')
break
fit = np.polyfit(complete_mags, complete_freq, 1, full=True)
# Calculate the residuals according to the Wiemer & Wys 2000 definition
predicted_freqs = [fit[0][1] - abs(fit[0][0] * M)
for M in complete_mags]
r = 100 - ((np.sum([abs(complete_freq[i] - predicted_freqs[i])
for i in range(len(complete_freq))]) * 100) /
np.sum(complete_freq))
b_values.append((m_c, abs(fit[0][0]), r, str(len(complete_mags))))
if plotvar:
fig, ax1 = plt.subplots()
b_vals = ax1.scatter(list(zip(*b_values))[0], list(zip(*b_values))[1],
c='k')
resid = ax1.scatter(list(zip(*b_values))[0],
[100 - b for b in list(zip(*b_values))[2]], c='r')
ax1.set_ylabel('b-value and residual')
plt.xlabel('Completeness magnitude')
ax2 = ax1.twinx()
ax2.set_ylabel('Number of events used in fit')
n_ev = ax2.scatter(list(zip(*b_values))[0], list(zip(*b_values))[3],
c='g')
fig.legend((b_vals, resid, n_ev),
('b-values', 'residuals', 'number of events'),
'lower right')
ax1.set_title('Possible completeness values')
plt.show()
return b_values | [
"def",
"calc_b_value",
"(",
"magnitudes",
",",
"completeness",
",",
"max_mag",
"=",
"None",
",",
"plotvar",
"=",
"True",
")",
":",
"b_values",
"=",
"[",
"]",
"# Calculate the cdf for all magnitudes",
"counts",
"=",
"Counter",
"(",
"magnitudes",
")",
"cdf",
"="... | 41.381443 | 0.000243 |
def need_permissions(object_getter, action, hidden=True):
"""Get permission for buckets or abort.
:param object_getter: The function used to retrieve the object and pass it
to the permission factory.
:param action: The action needed.
:param hidden: Determine which kind of error to return. (Default: ``True``)
"""
def decorator_builder(f):
@wraps(f)
def decorate(*args, **kwargs):
check_permission(current_permission_factory(
object_getter(*args, **kwargs),
action(*args, **kwargs) if callable(action) else action,
), hidden=hidden)
return f(*args, **kwargs)
return decorate
return decorator_builder | [
"def",
"need_permissions",
"(",
"object_getter",
",",
"action",
",",
"hidden",
"=",
"True",
")",
":",
"def",
"decorator_builder",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"decorate",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",... | 37.421053 | 0.001372 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.