Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
300 | def marker(self, marker_name=None, label=None,
color=None, retina=False):
if marker_name is None:
raise ValidationError(
"marker_name is a required argument"
)
marker_name = self._validate_marker_name(marker_name)
retina = self._validate_retina(retina)
path_values = dict(
marker_name=marker_name
)
path_part = "/marker/{marker_name}"
if label is not None:
label = self._validate_label(label)
path_values["label"] = label
path_part += "-{label}"
if color is not None:
color = self._validate_color(color)
path_values["color"] = color
path_part += "+{color}"
uri = URITemplate(self.base_uri + path_part).expand(**path_values)
path_part = "{}.png".format(retina)
uri += path_part
response = self.session.get(uri)
self.handle_http_error(response)
return response | Returns a single marker image without any
background map.
Parameters
----------
marker_name : str
The marker's shape and size.
label : str, optional
The marker's alphanumeric label.
Options are a through z, 0 through 99, or the
name of a valid Maki icon.
color : str, optional
The marker's color.
Options are three- or six-digit hexadecimal
color codes.
retina : bool, optional
The marker's scale, where True indicates Retina scale
(double scale) and False indicates regular scale.
The default value is false.
Returns
-------
request.Response
The response object with the specified marker. |
301 | def logical_xor(self, other):
return self.operation(other, lambda x, y: int(bool(x) ^ bool(y))) | logical_xor(t) = self(t) ^ other(t). |
302 | def process_raw_file(self, raw_file_name, field_names):
dist_vals = []
group_dat = []
events = []
with open(raw_file_name) as csvfile:
reader = csv.DictReader(csvfile, fieldnames = field_names)
for num_lines, row in enumerate(reader):
for col_num, fld in enumerate(field_names):
try:
if self.maps[col_num].val == :
group_dat.append(str(row[fld]))
elif self.maps[col_num].val == :
events.append(str(row[fld]))
except Exception as ex:
print(, str(ex))
dist_vals = sorted(list(set(group_dat)))
return num_lines, dist_vals, group_dat, sorted(list(set(events))) | takes the filename to be read and uses the maps setup
on class instantiation to process the file.
This is a top level function and uses self.maps which
should be the column descriptions (in order). |
303 | def gmres_mgs(A, b, x0=None, tol=1e-5, restrt=None, maxiter=None, xtype=None,
M=None, callback=None, residuals=None, reorth=False):
A, M, x, b, postprocess = make_system(A, M, x0, b)
dimen = A.shape[0]
import warnings
warnings.filterwarnings(, module=)
if not hasattr(A, ):
Atype = upcast(x.dtype, b.dtype)
else:
Atype = A.dtype
if not hasattr(M, ):
Mtype = upcast(x.dtype, b.dtype)
else:
Mtype = M.dtype
xtype = upcast(Atype, x.dtype, b.dtype, Mtype)
if restrt is not None:
restrt = int(restrt)
if maxiter is not None:
maxiter = int(maxiter)
[lartg] = get_lapack_funcs([], [x])
if np.iscomplexobj(np.zeros((1,), dtype=xtype)):
[axpy, dotu, dotc, scal] =\
get_blas_funcs([, , , ], [x])
else:
[axpy, dotu, dotc, scal] =\
get_blas_funcs([, , , ], [x])
def norm(z):
return np.sqrt(np.real(dotc(z, z)))
if residuals == []:
keep_r = True
else:
keep_r = False
if restrt:
if maxiter:
max_outer = maxiter
else:
max_outer = 1
if restrt > dimen:
warn()
restrt = dimen
max_inner = restrt
else:
max_outer = 1
if maxiter > dimen:
warn()
maxiter = dimen
elif maxiter is None:
maxiter = min(dimen, 40)
max_inner = maxiter
if dimen == 1:
entry = np.ravel(A*np.array([1.0], dtype=xtype))
return (postprocess(b/entry), 0)
r = b - np.ravel(A*x)
r = np.ravel(M*r)
normr = norm(r)
if keep_r:
residuals.append(normr)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
if normr != 0.0:
tol = tol*normr
niter = 0
for outer in range(max_outer):
if inner < max_inner-1:
normr = np.abs(g[inner+1])
if normr < tol:
break
if callback is not None:
callback(x)
if keep_r:
residuals.append(normr)
y = sp.linalg.solve(H[0:inner+1, 0:inner+1].T, g[0:inner+1])
update = np.ravel(np.mat(V[:inner+1, :]).T*y.reshape(-1, 1))
x = x + update
r = b - np.ravel(A*x)
r = np.ravel(M*r)
normr = norm(r)
if callback is not None:
callback(x)
if keep_r:
residuals.append(normr)
indices = (x != 0)
if indices.any():
change = np.max(np.abs(update[indices] / x[indices]))
if change < 1e-12:
return (postprocess(x), -1)
if normr < tol:
return (postprocess(x), 0)
return (postprocess(x), niter) | Generalized Minimum Residual Method (GMRES) based on MGS.
GMRES iteratively refines the initial solution guess to the system
Ax = b
Modified Gram-Schmidt version
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the norm
of the initial preconditioned residual
restrt : None, int
- if int, restrt is max number of inner iterations
and maxiter is the max number of outer iterations
- if None, do not restart GMRES, and max number of inner iterations
is maxiter
maxiter : None, int
- if restrt is None, maxiter is the max number of inner iterations
and GMRES does not restart
- if restrt is int, maxiter is the max number of outer iterations,
and restrt is the max number of inner iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the preconditioned residual norm history,
including the initial residual.
reorth : boolean
If True, then a check is made whether to re-orthogonalize the Krylov
space each GMRES iteration
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of gmres
== =============================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead. This value
is precisely the order of the Krylov space.
<0 numerical breakdown, or illegal input
== =============================================
Notes
-----
- The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
- For robustness, modified Gram-Schmidt is used to orthogonalize the
Krylov Space Givens Rotations are used to provide the residual norm
each iteration
Examples
--------
>>> from pyamg.krylov import gmres
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = gmres(A,b, maxiter=2, tol=1e-8, orthog='mgs')
>>> print norm(b - A*x)
>>> 6.5428213057
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 151-172, pp. 272-275, 2003
http://www-users.cs.umn.edu/~saad/books.html
.. [2] C. T. Kelley, http://www4.ncsu.edu/~ctk/matlab_roots.html |
304 | def logout(config):
state = read(config.configfile)
if state.get("BUGZILLA"):
remove(config.configfile, "BUGZILLA")
success_out("Forgotten")
else:
error_out("No stored Bugzilla credentials") | Remove and forget your Bugzilla credentials |
305 | def flatten(l, types=(list, )):
if not isinstance(l, types):
return l
return list(flattened_iterator(l, types)) | Given a list/tuple that potentially contains nested lists/tuples of arbitrary nesting,
flatten into a single dimension. In other words, turn [(5, 6, [8, 3]), 2, [2, 1, (3, 4)]]
into [5, 6, 8, 3, 2, 2, 1, 3, 4]
This is safe to call on something not a list/tuple - the original input is returned as a list |
306 | def Percentile(pmf, percentage):
p = percentage / 100.0
total = 0
for val, prob in pmf.Items():
total += prob
if total >= p:
return val | Computes a percentile of a given Pmf.
percentage: float 0-100 |
307 | def detect_version(env, cc):
cc = env.subst(cc)
if not cc:
return None
version = None
pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + [],
stdin = ,
stderr = ,
stdout = subprocess.PIPE)
while SCons.Util.to_str(pipe.stdout.readline()):
pass
ret = pipe.wait()
if ret != 0:
return None
return version | Return the version of the GNU compiler, or None if it is not a GNU compiler. |
308 | def calculate_lvgd_voltage_current_stats(nw):
nw.control_circuit_breakers(mode=)
nodes_idx = 0
nodes_dict = {}
branches_idx = 0
branches_dict = {}
for mv_district in nw.mv_grid_districts():
for LA in mv_district.lv_load_areas():
if not LA.is_aggregated:
for lv_district in LA.lv_grid_districts():
crit_nodes = get_critical_voltage_at_nodes(lv_district.lv_grid)
for node in crit_nodes:
nodes_idx += 1
nodes_dict[nodes_idx] = {
: mv_district.mv_grid.id_db,
: lv_district.lv_grid.id_db,
: LA.id_db,
: node[].__repr__(),
: node[][0],
: node[][1],
: ,
: ,
: lv_district.lv_grid.v_level,
}
critical_branches, critical_stations = get_critical_line_loading(lv_district.lv_grid)
for branch in critical_branches:
branches_idx += 1
branches_dict[branches_idx] = {
: mv_district.mv_grid.id_db,
: lv_district.lv_grid.id_db,
: LA.id_db,
: branch[].__repr__(),
: branch[][0],
: branch[][1],
}
for node in critical_stations:
nodes_idx += 1
nodes_dict[nodes_idx] = {
: mv_district.mv_grid.id_db,
: lv_district.lv_grid.id_db,
: LA.id_db,
: node[].__repr__(),
: node[][0],
: node[][1],
: ,
: ,
}
nodes_df = pd.DataFrame.from_dict(nodes_dict, orient=)
branches_df = pd.DataFrame.from_dict(branches_dict, orient=)
if not nodes_df.empty:
nodes_df = nodes_df.set_index()
nodes_df = nodes_df.fillna(0)
nodes_df = nodes_df[sorted(nodes_df.columns.tolist())]
nodes_df.sort_index(inplace=True)
if not branches_df.empty:
branches_df = branches_df.set_index()
branches_df = branches_df.fillna(0)
branches_df = branches_df[sorted(branches_df.columns.tolist())]
branches_df.sort_index(inplace=True)
return nodes_df, branches_df | LV Voltage and Current Statistics for an arbitrary network
Note
----
Aggregated Load Areas are excluded.
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
Returns
-------
pandas.DataFrame
nodes_df : Dataframe containing voltage, respectively current, statis
for every critical node, resp. every critical station, in every LV grid
in nw.
pandas.DataFrame
lines_df : Dataframe containing current statistics for every critical
line, in every LV grid in nw. |
309 | def jco_from_pestpp_runstorage(rnj_filename,pst_filename):
header_dtype = np.dtype([("n_runs",np.int64),("run_size",np.int64),("p_name_size",np.int64),
("o_name_size",np.int64)])
pst = pyemu.Pst(pst_filename)
par = pst.parameter_data
log_pars = set(par.loc[par.partrans=="log","parnme"].values)
with open(rnj_filename,) as f:
header = np.fromfile(f,dtype=header_dtype,count=1)
try:
base_par,base_obs = read_pestpp_runstorage(rnj_filename,irun=0)
except:
raise Exception("couldnt created during jco filling...")
parnme = par_diff[par_diff.parval1 != 0].index[0]
parval = par_diff.parval1.loc[parnme]
jco_col = obs_diff / parval
print("processing par {0}: {1}...".format(irun, parnme))
print("%nzsens: {0}%...".format((jco_col[abs(jco_col.obsval)>1e-8].shape[0] / jco_col.shape[0])*100.))
jco_cols[parnme] = jco_col.obsval
jco_cols = pd.DataFrame.from_records(data=jco_cols, index=list(obs_diff.index.values))
jco_cols = pyemu.Jco.from_dataframe(jco_cols)
return jco_cols | read pars and obs from a pest++ serialized run storage file (e.g., .rnj) and return
pyemu.Jco. This can then be passed to Jco.to_binary or Jco.to_coo, etc., to write jco file
in a subsequent step to avoid memory resource issues associated with very large problems.
Parameters
----------
rnj_filename : str
the name of the run storage file
pst_filename : str
the name of the pst file
Returns
-------
jco_cols : pyemu.Jco
TODO
----
Check rnj file contains transformed par vals (i.e., in model input space)
Currently only returns pyemu.Jco; doesn't write jco file due to memory
issues associated with very large problems
Compare rnj and jco from Freyberg problem in autotests |
310 | def find_file_regex(root_dir,re_expression,return_abs_path = True,search_sub_directories = True):
compiled = re.compile(re_expression)
result = []
for dirpath, dirnames, files in os.walk(root_dir) :
for file in files :
if compiled.match(file):
result.append(os.path.join(dirpath,file) if return_abs_path else file )
if not search_sub_directories :
break
return result | Finds all the files with the specified root directory with the name matching the regex expression.
Args :
root_dir : The root directory.
re_expression : The regex expression.
return_abs_path : If set to true, returns the absolute path of the files, else returns the name of the files.
search_sub_directories : If set to true, searches sub directories recursivly. |
311 | def regression():
Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run()
storybook = _storybook({}).only_uninherited()
Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run()
storybook.with_params(**{"python version": "3.5.0"}).ordered_by_name().play()
lint() | Run regression testing - lint and then run all tests. |
312 | def check_dataset(dataset):
if isinstance(dataset, numpy.ndarray) and not len(dataset.shape) == 4:
check_dataset_shape(dataset)
check_dataset_range(dataset)
else:
for i, d in enumerate(dataset):
if not isinstance(d, numpy.ndarray):
raise ValueError(
)
try:
check_dataset_shape(d)
check_dataset_range(d)
except ValueError as err:
raise ValueError(
.format(err, i)
) | Confirm shape (3 colors x rows x cols) and values [0 to 255] are OK. |
313 | def get(self, sid):
return FaxMediaContext(self._version, fax_sid=self._solution[], sid=sid, ) | Constructs a FaxMediaContext
:param sid: The unique string that identifies the resource to fetch
:returns: twilio.rest.fax.v1.fax.fax_media.FaxMediaContext
:rtype: twilio.rest.fax.v1.fax.fax_media.FaxMediaContext |
314 | def create_bv_bitmap(dot_product_vector: str, dot_product_bias: str) -> Dict[str, str]:
n_bits = len(dot_product_vector)
bit_map = {}
for bit_val in range(2 ** n_bits):
bit_map[np.binary_repr(bit_val, width=n_bits)] = str(
(int(utils.bitwise_dot_product(np.binary_repr(bit_val, width=n_bits),
dot_product_vector))
+ int(dot_product_bias, 2)) % 2
)
return bit_map | This function creates a map from bitstring to function value for a boolean formula :math:`f`
with a dot product vector :math:`a` and a dot product bias :math:`b`
.. math::
f:\\{0,1\\}^n\\rightarrow \\{0,1\\}
\\mathbf{x}\\rightarrow \\mathbf{a}\\cdot\\mathbf{x}+b\\pmod{2}
(\\mathbf{a}\\in\\{0,1\\}^n, b\\in\\{0,1\\})
:param dot_product_vector: a string of 0's and 1's that represents the dot-product
partner in :math:`f`
:param dot_product_bias: 0 or 1 as a string representing the bias term in :math:`f`
:return: A dictionary containing all possible bitstring of length equal to :math:`a` and the
function value :math:`f` |
315 | def _node_add_with_peer_list(self, child_self, child_other):
parent_self = child_self.getparent()
s_node = self.device.get_schema_node(child_self)
if child_other.get(operation_tag) != and \
child_other.get(operation_tag) != and \
s_node.get() == and \
child_other.get(insert_tag) is not None:
if child_other.get(insert_tag) == :
scope = parent_self.getchildren()
siblings = self._get_sequence(scope, child_other.tag,
parent_self)
if siblings[0] != child_self:
siblings[0].addprevious(child_self)
elif child_other.get(insert_tag) == :
scope = parent_self.getchildren()
siblings = self._get_sequence(scope, child_other.tag,
parent_self)
if siblings[-1] != child_self:
siblings[-1].addnext(child_self)
elif child_other.get(insert_tag) == :
if child_other.get(key_tag) is None:
_inserterror(, self.device.get_xpath(child_other),
)
sibling = parent_self.find(child_other.tag +
child_other.get(key_tag),
namespaces=child_other.nsmap)
if sibling is None:
path = self.device.get_xpath(child_other)
key = child_other.get(key_tag)
_inserterror(, path, , key)
if sibling != child_self:
sibling.addprevious(child_self)
elif child_other.get(insert_tag) == :
if child_other.get(key_tag) is None:
_inserterror(, self.device.get_xpath(child_other),
)
sibling = parent_self.find(child_other.tag +
child_other.get(key_tag),
namespaces=child_other.nsmap)
if sibling is None:
path = self.device.get_xpath(child_other)
key = child_other.get(key_tag)
_inserterror(, path, , key)
if sibling != child_self:
sibling.addnext(child_self)
if child_other.get(operation_tag) is None or \
child_other.get(operation_tag) == :
self.node_add(child_self, child_other)
elif child_other.get(operation_tag) == :
e = deepcopy(child_other)
parent_self.replace(child_self, self._del_attrib(e))
elif child_other.get(operation_tag) == :
raise ConfigDeltaError( \
\
.format(self.device.get_xpath(child_other)))
elif child_other.get(operation_tag) == or \
child_other.get(operation_tag) == :
parent_self.remove(child_self)
else:
raise ConfigDeltaError("unknown operation: node {} contains " \
"operation " \
.format(self.device.get_xpath(child_other),
child_other.get(operation_tag))) | _node_add_with_peer_list
Low-level api: Apply delta child_other to child_self when child_self is
the peer of child_other. Element child_self and child_other are list
nodes. Element child_self will be modified during the process. RFC6020
section 7.8.6 is a reference of this method.
Parameters
----------
child_self : `Element`
A child of a config node in a config tree.
child_other : `Element`
A child of a config node in another config tree. child_self is
the peer of child_other.
Returns
-------
None
There is no return of this method. |
316 | def evaluate_scpd_xml(url):
try:
res = requests.get(url, timeout=2)
except requests.exceptions.RequestException as err:
_LOGGER.error(
"When trying to request %s the following error occurred: %s",
url, err)
raise ConnectionError
if res.status_code == 200:
try:
root = ET.fromstring(res.text)
_LOGGER.debug("Device %s has manufacturer %s", url,
root.find(SCPD_DEVICE).find(SCPD_MANUFACTURER).text)
if (root.find(SCPD_DEVICE).find(
SCPD_MANUFACTURER).text in SUPPORTED_MANUFACTURERS and
root.find(SCPD_DEVICE).find(
SCPD_DEVICETYPE).text == DEVICETYPE_DENON):
device = {}
device["host"] = urlparse(
root.find(SCPD_DEVICE).find(
SCPD_PRESENTATIONURL).text).hostname
device["presentationURL"] = (
root.find(SCPD_DEVICE).find(SCPD_PRESENTATIONURL).text)
device["modelName"] = (
root.find(SCPD_DEVICE).find(SCPD_MODELNAME).text)
device["friendlyName"] = (
root.find(SCPD_DEVICE).find(SCPD_FRIENDLYNAME).text)
return device
else:
return False
except (AttributeError, ValueError, ET.ParseError) as err:
_LOGGER.error(
"Error occurred during evaluation of SCPD XML: %s", err)
return False
else:
_LOGGER.error("Host returned HTTP status %s when connecting to %s",
res.status_code, url)
raise ConnectionError | Get and evaluate SCPD XML to identified URLs.
Returns dictionary with keys "host", "modelName", "friendlyName" and
"presentationURL" if a Denon AVR device was found and "False" if not. |
317 | def delete_rich_menu(self, rich_menu_id, timeout=None):
self._delete(
.format(rich_menu_id=rich_menu_id),
timeout=timeout
) | Call delete rich menu API.
https://developers.line.me/en/docs/messaging-api/reference/#delete-rich-menu
:param str rich_menu_id: ID of an uploaded rich menu
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float) |
318 | def deactivate(self):
LOGGER.debug("> Deactivating Component.".format(self.__class__.__name__))
self.__engine = None
self.__settings = None
self.__settings_section = None
self.__script_editor = None
self.activated = False
return True | Deactivates the Component.
:return: Method success.
:rtype: bool |
319 | def adduser(username, uid=None, system=False, no_login=True, no_password=False, group=False, gecos=None, **kwargs):
return _format_cmd(, username, __system=bool(system), __uid=uid, __group=bool(group), __gid=uid,
no_login=(no_login, _NO_CREATE_HOME, _NO_LOGIN),
__disabled_password=no_login or bool(no_password),
__gecos=gecos, **kwargs) | Formats an ``adduser`` command.
:param username: User name.
:type username: unicode | str
:param uid: Optional user id to use.
:type uid: long | int
:param system: Create a system user account.
:type system: bool
:param no_login: Disable the login for this user. Not compatible with CentOS. Implies setting '--no-create-home',
and ``no_password``.
:type no_login: bool
:param no_password: Disable the password for this user. Not compatible with CentOS.
:type no_password: bool
:param group: Create a group along with the user. Not compatible with CentOS.
:type group: bool
:param gecos: Set GECOS information in order to suppress an interactive prompt. On CentOS, use ``__comment``
instead.
:type gecos: unicode | str
:param kwargs: Additional keyword arguments which are converted to the command line.
:return: A formatted ``adduser`` command with arguments.
:rtype: unicode | str |
320 | def transaction(self):
self._depth -= 1
raise
except:
self._depth -= 1
if self._depth == 0:
self.mdr.rollback()
raise
if self._depth == 0:
self.mdr.commit() | Sets up a context where all the statements within it are ran within
a single database transaction. For internal use only. |
321 | def slides(self):
sldIdLst = self._element.get_or_add_sldIdLst()
self.part.rename_slide_parts([sldId.rId for sldId in sldIdLst])
return Slides(sldIdLst, self) | |Slides| object containing the slides in this presentation. |
322 | def job_conf(self, job_id):
path = .format(jobid=job_id)
return self.request(path) | A job configuration resource contains information about the job
configuration for this job.
:param str job_id: The job id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` |
323 | def get_agg_data(cls, obj, category=None):
paths = []
if isinstance(obj, Graph):
obj = obj.edgepaths
kdims = list(obj.kdims)
vdims = list(obj.vdims)
dims = obj.dimensions()[:2]
if isinstance(obj, Path):
glyph =
for p in obj.split(datatype=):
paths.append(p)
elif isinstance(obj, CompositeOverlay):
element = None
for key, el in obj.data.items():
x, y, element, glyph = cls.get_agg_data(el)
dims = (x, y)
df = PandasInterface.as_dframe(element)
if isinstance(obj, NdOverlay):
df = df.assign(**dict(zip(obj.dimensions(, True), key)))
paths.append(df)
if element is None:
dims = None
else:
kdims += element.kdims
vdims = element.vdims
elif isinstance(obj, Element):
glyph = if isinstance(obj, Curve) else
paths.append(PandasInterface.as_dframe(obj))
if dims is None or len(dims) != 2:
return None, None, None, None
else:
x, y = dims
if len(paths) > 1:
if glyph == :
path = paths[0][:1]
if isinstance(path, dd.DataFrame):
path = path.compute()
empty = path.copy()
empty.iloc[0, :] = (np.NaN,) * empty.shape[1]
paths = [elem for p in paths for elem in (p, empty)][:-1]
if all(isinstance(path, dd.DataFrame) for path in paths):
df = dd.concat(paths)
else:
paths = [p.compute() if isinstance(p, dd.DataFrame) else p for p in paths]
df = pd.concat(paths)
else:
df = paths[0] if paths else pd.DataFrame([], columns=[x.name, y.name])
if category and df[category].dtype.name != :
df[category] = df[category].astype()
is_dask = isinstance(df, dd.DataFrame)
if any((not is_dask and len(df[d.name]) and isinstance(df[d.name].values[0], cftime_types)) or
df[d.name].dtype.kind == for d in (x, y)):
df = df.copy()
for d in (x, y):
vals = df[d.name]
if not is_dask and len(vals) and isinstance(vals.values[0], cftime_types):
vals = cftime_to_timestamp(vals, )
elif df[d.name].dtype.kind == :
vals = vals.astype()
else:
continue
df[d.name] = vals.astype()
return x, y, Dataset(df, kdims=kdims, vdims=vdims), glyph | Reduces any Overlay or NdOverlay of Elements into a single
xarray Dataset that can be aggregated. |
324 | def list_spiders(self, project):
url = self._build_url(constants.LIST_SPIDERS_ENDPOINT)
params = {: project}
json = self.client.get(url, params=params, timeout=self.timeout)
return json[] | Lists all known spiders for a specific project. First class, maps
to Scrapyd's list spiders endpoint. |
325 | def retry(func, exception_type, quit_event):
while True:
if quit_event.is_set():
raise StopIteration
try:
return func()
except exception_type:
pass | Run the function, retrying when the specified exception_type occurs.
Poll quit_event on each iteration, to be responsive to an external
exit request. |
326 | def eigh(a, eigvec=True, rcond=None):
a = numpy.asarray(a)
if a.dtype != object:
val, vec = numpy.linalg.eigh(a)
return (val, vec) if eigvec else val
amean = gvar.mean(a)
if amean.ndim != 2 or amean.shape[0] != amean.shape[1]:
raise ValueError( + str(a.shape))
if rcond is None:
rcond = numpy.finfo(float).eps * max(a.shape)
da = a - amean
val0, vec0 = numpy.linalg.eigh(amean)
val = val0 + [
vec0[:, i].conjugate().dot(da.dot(vec0[:, i])) for i in range(vec0.shape[1])
]
if eigvec == True:
if vec0.dtype == complex:
raise ValueError()
vec = numpy.array(vec0, dtype=object)
for i in range(len(val)):
for j in range(len(val)):
dval = val0[i] - val0[j]
if abs(dval) < rcond * abs(val0[j] + val0[i]) or dval == 0.0:
continue
vec[:, i] += vec0[:, j] * (
vec0[:, j].dot(da.dot(vec0[:, i])) / dval
)
return val, vec
else:
return val | Eigenvalues and eigenvectors of symmetric matrix ``a``.
Args:
a: Two-dimensional, square Hermitian matrix/array of numbers
and/or :class:`gvar.GVar`\s. Array elements must be
real-valued if `gvar.GVar`\s are involved (i.e., symmetric
matrix).
eigvec (bool): If ``True`` (default), method returns a tuple
of arrays ``(val, vec)`` where ``val[i]`` are the
eigenvalues of ``a`` (in ascending order), and ``vec[:, i]``
are the corresponding eigenvectors of ``a``. Only ``val`` is
returned if ``eigvec=False``.
rcond (float): Eigenvalues whose difference is smaller than
``rcond`` times their sum are assumed to be degenerate
(and ignored) when computing variances for the eigvectors.
Default (``rcond=None``) is ``max(M,N)`` times machine precision.
Returns:
Tuple ``(val,vec)`` of eigenvalues and eigenvectors of
matrix ``a`` if parameter ``eigvec==True`` (default).
The eigenvalues ``val[i]`` are in ascending order and
``vec[:, i]`` are the corresponding eigenvalues. Only
the eigenvalues ``val`` are returned if ``eigvec=False``.
Raises:
ValueError: If matrix is not square and two-dimensional. |
327 | def store_records_for_package(self, entry_point, records):
pkg_module_records = self._dist_to_package_module_map(entry_point)
pkg_module_records.extend(records) | Store the records in a way that permit lookup by package |
328 | def get_params(self, deep=False):
attrs = self.__dict__
for attr in self._include:
attrs[attr] = getattr(self, attr)
if deep is True:
return attrs
return dict([(k,v) for k,v in list(attrs.items()) \
if (k[0] != ) \
and (k[-1] != ) \
and (k not in self._exclude)]) | returns a dict of all of the object's user-facing parameters
Parameters
----------
deep : boolean, default: False
when True, also gets non-user-facing paramters
Returns
-------
dict |
329 | def fetch_existing_token_of_user(self, client_id, grant_type, user_id):
token_data = self.fetchone(self.fetch_existing_token_of_user_query,
client_id, grant_type, user_id)
if token_data is None:
raise AccessTokenNotFound
scopes = self._fetch_scopes(access_token_id=token_data[0])
data = self._fetch_data(access_token_id=token_data[0])
return self._row_to_token(data=data, scopes=scopes, row=token_data) | Retrieve an access token issued to a client and user for a specific
grant.
:param client_id: The identifier of a client as a `str`.
:param grant_type: The type of grant.
:param user_id: The identifier of the user the access token has been
issued to.
:return: An instance of :class:`oauth2.datatype.AccessToken`.
:raises: :class:`oauth2.error.AccessTokenNotFound` if not access token
could be retrieved. |
330 | def parse_uniprot_txt_file(infile):
uniprot_metadata_dict = {}
metadata = old_parse_uniprot_txt_file(infile)
metadata_keys = list(metadata.keys())
if metadata_keys:
metadata_key = metadata_keys[0]
else:
return uniprot_metadata_dict
uniprot_metadata_dict[] = len(str(metadata[metadata_key][]))
uniprot_metadata_dict[] = metadata[metadata_key][]
uniprot_metadata_dict[] = metadata[metadata_key][]
uniprot_metadata_dict[] = metadata[metadata_key][]
if in metadata[metadata_key]:
uniprot_metadata_dict[] = metadata[metadata_key][]
if in metadata[metadata_key]:
uniprot_metadata_dict[] = metadata[metadata_key][]
if in metadata[metadata_key]:
uniprot_metadata_dict[] = metadata[metadata_key][]
if in metadata[metadata_key]:
uniprot_metadata_dict[] = metadata[metadata_key][]
if in metadata[metadata_key]:
uniprot_metadata_dict[] = metadata[metadata_key][]
if in metadata[metadata_key]:
uniprot_metadata_dict[] = metadata[metadata_key][]
if in metadata[metadata_key]:
uniprot_metadata_dict[] = list(set(metadata[metadata_key][]))
return uniprot_metadata_dict | Parse a raw UniProt metadata file and return a dictionary.
Args:
infile: Path to metadata file
Returns:
dict: Metadata dictionary |
331 | def splitEkmDate(dateint):
date_str = str(dateint)
dt = namedtuple(, [, , , , , , ])
if len(date_str) != 14:
dt.yy = dt.mm = dt.dd = dt.weekday = dt.hh = dt.minutes = dt.ss = 0
return dt
dt.yy = int(date_str[0:2])
dt.mm = int(date_str[2:4])
dt.dd = int(date_str[4:6])
dt.weekday = int(date_str[6:8])
dt.hh = int(date_str[8:10])
dt.minutes = int(date_str[10:12])
dt.ss = int(date_str[12:14])
return dt | Break out a date from Omnimeter read.
Note a corrupt date will raise an exception when you
convert it to int to hand to this method.
Args:
dateint (int): Omnimeter datetime as int.
Returns:
tuple: Named tuple which breaks out as followws:
========== =====================
yy Last 2 digits of year
mm Month 1-12
dd Day 1-31
weekday Zero based weekday
hh Hour 0-23
minutes Minutes 0-59
ss Seconds 0-59
========== ===================== |
332 | def rsa_base64_decrypt(self, cipher, b64=True):
with open(self.key_file) as fp:
key_ = RSA.importKey(fp.read())
_cip = PKCS1_v1_5.new(key_)
cipher = base64.b64decode(cipher) if b64 else cipher
plain = _cip.decrypt(cipher, Random.new().read(15 + SHA.digest_size))
return helper.to_str(plain) | 先base64 解码 再rsa 解密数据 |
333 | def clearAdvancedActions( self ):
self._advancedMap.clear()
margins = list(self.getContentsMargins())
margins[2] = 0
self.setContentsMargins(*margins) | Clears out the advanced action map. |
334 | def select_if(df, fun):
def _filter_f(col):
try:
return fun(df[col])
except:
return False
cols = list(filter(_filter_f, df.columns))
return df[cols] | Selects columns where fun(ction) is true
Args:
fun: a function that will be applied to columns |
335 | def forecast(stl, fc_func, steps=10, seasonal=False, **fc_func_kwargs):
forecast_array = np.array([])
trend_array = stl.trend
for step in range(steps):
pred = fc_func(np.append(trend_array, forecast_array), **fc_func_kwargs)
forecast_array = np.append(forecast_array, pred)
col_name = fc_func.__name__
observed_timedelta = stl.observed.index[-1] - stl.observed.index[-2]
forecast_idx_start = stl.observed.index[-1] + observed_timedelta
forecast_idx = pd.date_range(start=forecast_idx_start,
periods=steps,
freq=pd.tseries.frequencies.to_offset(observed_timedelta))
if seasonal:
seasonal_ix = 0
max_correlation = -np.inf
detrended_array = np.asanyarray(stl.observed - stl.trend).squeeze()
for i, x in enumerate(stl.period_averages):
if i == 0:
forecast_frame = pd.DataFrame(data=forecast_array, index=forecast_idx)
forecast_frame.columns = [col_name]
return forecast_frame | Forecast the given decomposition ``stl`` forward by ``steps`` steps using the forecasting
function ``fc_func``, optionally including the calculated seasonality.
This is an additive model, Y[t] = T[t] + S[t] + e[t]
Args:
stl (a modified statsmodels.tsa.seasonal.DecomposeResult): STL decomposition of observed time
series created using the ``stldecompose.decompose()`` method.
fc_func (function): Function which takes an array of observations and returns a single
valued forecast for the next point.
steps (int, optional): Number of forward steps to include in the forecast
seasonal (bool, optional): Include seasonal component in forecast
fc_func_kwargs: keyword arguments
All remaining arguments are passed to the forecasting function ``fc_func``
Returns:
forecast_frame (pd.Dataframe): A ``pandas.Dataframe`` containing forecast values and a
DatetimeIndex matching the observed index. |
336 | def all(self):
url = "{url_base}/resource/{pid}/files/".format(url_base=self.hs.url_base,
pid=self.pid)
r = self.hs._request(, url)
return r | :return:
array of file objects (200 status code) |
337 | def make_jira_blueprint(
base_url,
consumer_key=None,
rsa_key=None,
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
session_class=None,
storage=None,
):
if rsa_key and os.path.isfile(rsa_key):
with open(rsa_key) as f:
rsa_key = f.read()
base_url = URLObject(base_url)
jira_bp = OAuth1ConsumerBlueprint(
"jira",
__name__,
client_key=consumer_key,
rsa_key=rsa_key,
signature_method=SIGNATURE_RSA,
base_url=base_url,
request_token_url=base_url.relative("plugins/servlet/oauth/request-token"),
access_token_url=base_url.relative("plugins/servlet/oauth/access-token"),
authorization_url=base_url.relative("plugins/servlet/oauth/authorize"),
redirect_url=redirect_url,
redirect_to=redirect_to,
login_url=login_url,
authorized_url=authorized_url,
session_class=session_class or JsonOAuth1Session,
storage=storage,
)
jira_bp.from_config["client_key"] = "JIRA_OAUTH_CONSUMER_KEY"
jira_bp.from_config["rsa_key"] = "JIRA_OAUTH_RSA_KEY"
@jira_bp.before_app_request
def set_applocal_session():
ctx = stack.top
ctx.jira_oauth = jira_bp.session
return jira_bp | Make a blueprint for authenticating with JIRA using OAuth 1. This requires
a consumer key and RSA key for the JIRA application link. You should either
pass them to this constructor, or make sure that your Flask application
config defines them, using the variables :envvar:`JIRA_OAUTH_CONSUMER_KEY`
and :envvar:`JIRA_OAUTH_RSA_KEY`.
Args:
base_url (str): The base URL of your JIRA installation. For example,
for Atlassian's hosted Cloud JIRA, the base_url would be
``https://jira.atlassian.com``
consumer_key (str): The consumer key for your Application Link on JIRA
rsa_key (str or path): The RSA private key for your Application Link
on JIRA. This can be the contents of the key as a string, or a path
to the key file on disk.
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/jira``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/jira/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.contrib.jira.JsonOAuth1Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
:rtype: :class:`~flask_dance.consumer.OAuth1ConsumerBlueprint`
:returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app. |
338 | def deletePartials(self):
if self.dryrun:
self._client.listPartials()
else:
self._client.deletePartials() | Delete any old partial uploads/downloads in path. |
339 | def items(iterable):
if hasattr(iterable, ):
return (p for p in iterable.iteritems())
elif hasattr(iterable, ):
return (p for p in iterable.items())
else:
return (p for p in enumerate(iterable)) | Iterates over the items of a sequence. If the sequence supports the
dictionary protocol (iteritems/items) then we use that. Otherwise
we use the enumerate built-in function. |
340 | def get_part_name(self, undefined=""):
return _undefined_pattern(
"".join(self.get_subfields("245", "n")),
lambda x: x.strip() == "",
undefined
) | Args:
undefined (optional): Argument, which will be returned if the
`part_name` record is not found.
Returns:
str: Name of the part of the series. or `undefined` if `part_name`\
is not found. |
341 | def register_phonon_task(self, *args, **kwargs):
kwargs["task_class"] = PhononTask
return self.register_task(*args, **kwargs) | Register a phonon task. |
342 | def get_default_saver(max_to_keep: int=3) -> tf.train.Saver:
return tf.train.Saver(max_to_keep=max_to_keep) | Creates Tensorflow Saver object with 3 recent checkpoints to keep.
:param max_to_keep: Maximum number of recent checkpoints to keep, defaults to 3 |
343 | def generate_stimfunction(onsets,
event_durations,
total_time,
weights=[1],
timing_file=None,
temporal_resolution=100.0,
):
if timing_file is not None:
with open(timing_file) as f:
text = f.readlines()
onsets = list()
event_durations = list()
weights = list()
for line in text:
onset, duration, weight = line.strip().split()
upsampled_onset = float(onset) * temporal_resolution
if np.allclose(upsampled_onset, np.round(upsampled_onset)) == 0:
warning = + str(onset) + \
\
\
\
\
\
\
\
\
logger.warning(warning)
onsets.append(float(onset))
event_durations.append(float(duration))
weights.append(float(weight))
if len(event_durations) == 1:
event_durations = event_durations * len(onsets)
if len(weights) == 1:
weights = weights * len(onsets)
if np.max(onsets) > total_time:
raise ValueError()
stimfunction = np.zeros((int(round(total_time * temporal_resolution)), 1))
for onset_counter in list(range(len(onsets))):
onset_idx = int(np.floor(onsets[onset_counter] * temporal_resolution))
offset_idx = int(np.floor((onsets[onset_counter] + event_durations[
onset_counter]) * temporal_resolution))
stimfunction[onset_idx:offset_idx, 0] = [weights[onset_counter]]
return stimfunction | Return the function for the timecourse events
When do stimuli onset, how long for and to what extent should you
resolve the fMRI time course. There are two ways to create this, either
by supplying onset, duration and weight information or by supplying a
timing file (in the three column format used by FSL).
Parameters
----------
onsets : list, int
What are the timestamps (in s) for when an event you want to
generate onsets?
event_durations : list, int
What are the durations (in s) of the events you want to
generate? If there is only one value then this will be assigned
to all onsets
total_time : int
How long (in s) is the experiment in total.
weights : list, float
What is the weight for each event (how high is the box car)? If
there is only one value then this will be assigned to all onsets
timing_file : string
The filename (with path) to a three column timing file (FSL) to
make the events. Still requires total_time to work
temporal_resolution : float
How many elements per second are you modeling for the
timecourse. This is useful when you want to model the HRF at an
arbitrarily high resolution (and then downsample to your TR later).
Returns
----------
stim_function : 1 by timepoint array, float
The time course of stimulus evoked activation. This has a temporal
resolution of temporal resolution / 1.0 elements per second |
344 | def to_text(self, tree, force_root=False):
self.extract_tag_metadata(tree)
text = []
attributes = []
comments = []
blocks = []
if not (self.ignores.match(tree) if self.ignores else None):
capture = self.captures.match(tree) if self.captures is not None else None
if capture:
for attr in self.attributes:
value = tree.attrs.get(attr, ).strip()
if value:
sel = self.construct_selector(tree, attr=attr)
attributes.append((value, sel))
for child in tree.children:
string = str(child).strip()
is_comment = isinstance(child, bs4.Comment)
if isinstance(child, bs4.element.Tag):
t, b, a, c = self.to_text(child)
text.extend(t)
attributes.extend(a)
comments.extend(c)
blocks.extend(b)
elif not isinstance(child, NON_CONTENT) and (not is_comment or self.comments):
string = str(child).strip()
if string:
if is_comment:
sel = self.construct_selector(tree) +
comments.append((string, sel))
elif capture:
text.append(string)
text.append()
elif self.comments:
for child in tree.descendants:
if isinstance(child, bs4.Comment):
string = str(child).strip()
if string:
sel = self.construct_selector(tree) +
comments.append((string, sel))
text = self.store_blocks(tree, blocks, text, force_root)
if tree.parent is None or force_root:
return blocks, attributes, comments
else:
return text, blocks, attributes, comments | Extract text from tags.
Skip any selectors specified and include attributes if specified.
Ignored tags will not have their attributes scanned either. |
345 | def find_analyses(ar_or_sample):
bc = api.get_tool(CATALOG_ANALYSIS_REQUEST_LISTING)
ar = bc(portal_type=, id=ar_or_sample)
if len(ar) == 0:
ar = bc(portal_type=, getClientSampleID=ar_or_sample)
if len(ar) == 1:
obj = ar[0].getObject()
analyses = obj.getAnalyses(full_objects=True)
return analyses
return [] | This function is used to find keywords that are not on the analysis
but keywords that are on the interim fields.
This function and is is_keyword function should probably be in
resultsimport.py or somewhere central where it can be used by other
instrument interfaces. |
346 | def match_similar(base, items):
finds = list(find_similar(base, items))
if finds:
return max(finds, key=base.similarity)
return None | Get the most similar matching item from a list of items.
@param base: base item to locate best match
@param items: list of items for comparison
@return: most similar matching item or None |
347 | def dcounts(self):
print("WARNING: Distinct value count for all tables can take a long time...", file=sys.stderr)
sys.stderr.flush()
data = []
for t in self.tables():
for c in t.columns():
data.append([t.name(), c.name(), c.dcount(), t.size(), c.dcount() / float(t.size())])
df = pd.DataFrame(data, columns=["table", "column", "distinct", "size", "fraction"])
return df | :return: a data frame with names and distinct counts and fractions for all columns in the database |
348 | def select_unrectified_slitlet(image2d, islitlet, csu_bar_slit_center,
params, parmodel, maskonly):
if image2d.shape != (EMIR_NAXIS2, EMIR_NAXIS1):
raise ValueError("NAXIS1, NAXIS2 unexpected for EMIR detector")
image2d_output = np.zeros_like(image2d)
list_expected_frontiers = expected_distorted_frontiers(
islitlet, csu_bar_slit_center,
params, parmodel, numpts=101, deg=5, debugplot=0
)
pol_lower_expected = list_expected_frontiers[0].poly_funct
pol_upper_expected = list_expected_frontiers[1].poly_funct
for j in range(EMIR_NAXIS1):
xchannel = j + 1
y0_lower = pol_lower_expected(xchannel)
y0_upper = pol_upper_expected(xchannel)
n1, n2 = nscan_minmax_frontiers(y0_frontier_lower=y0_lower,
y0_frontier_upper=y0_upper,
resize=True)
if maskonly:
image2d_output[(n1 - 1):n2, j] = np.repeat(
[1.0], (n2 - n1 + 1)
)
else:
image2d_output[(n1 - 1):n2, j] = image2d[(n1 - 1):n2, j]
return image2d_output | Returns image with the indicated slitlet (zero anywhere else).
Parameters
----------
image2d : numpy array
Initial image from which the slitlet data will be extracted.
islitlet : int
Slitlet number.
csu_bar_slit_center : float
CSU bar slit center.
params : :class:`~lmfit.parameter.Parameters`
Parameters to be employed in the prediction of the distorted
boundaries.
parmodel : str
Model to be assumed. Allowed values are 'longslit' and
'multislit'.
maskonly : bool
If True, returns simply a mask (1 in the slitlet region and
zero anywhere else.
Returns
-------
image2d_output : numpy array
2D image with the pixel information corresponding to the
selected slitlet and zero everywhere else. |
349 | def CreateJarBuilder(env):
try:
java_jar = env[][]
except KeyError:
fs = SCons.Node.FS.get_default_fs()
jar_com = SCons.Action.Action(, )
java_jar = SCons.Builder.Builder(action = jar_com,
suffix = ,
src_suffix = ,
src_builder = ,
source_factory = fs.Entry)
env[][] = java_jar
return java_jar | The Jar builder expects a list of class files
which it can package into a jar file.
The jar tool provides an interface for passing other types
of java files such as .java, directories or swig interfaces
and will build them to class files in which it can package
into the jar. |
350 | def _init_data_map(self):
if self._data_map is not None:
return
if self._xml_tree is None:
agis_root = ARCGIS_ROOTS[0]
else:
agis_root = get_element_name(self._xml_tree)
if agis_root not in ARCGIS_ROOTS:
raise InvalidContent(, root=agis_root)
agis_data_map = {: agis_root}
agis_data_map.update(_agis_tag_formats)
agis_data_structures = {}
ad_format = agis_data_map[ATTRIBUTES]
agis_data_structures[ATTRIBUTES] = format_xpaths(
_agis_definitions[ATTRIBUTES],
label=ad_format.format(ad_path=),
aliases=ad_format.format(ad_path=),
definition=ad_format.format(ad_path=),
definition_src=ad_format.format(ad_path=)
)
bb_format = agis_data_map[BOUNDING_BOX]
agis_data_structures[BOUNDING_BOX] = format_xpaths(
_agis_definitions[BOUNDING_BOX],
east=bb_format.format(bbox_path=),
south=bb_format.format(bbox_path=),
west=bb_format.format(bbox_path=),
north=bb_format.format(bbox_path=)
)
ct_format = agis_data_map[CONTACTS]
agis_data_structures[CONTACTS] = format_xpaths(
_agis_definitions[CONTACTS],
name=ct_format.format(ct_path=),
organization=ct_format.format(ct_path=),
position=ct_format.format(ct_path=),
email=ct_format.format(ct_path=)
)
dt_format = agis_data_map[DATES]
agis_data_structures[DATES] = {
DATE_TYPE_MULTIPLE: dt_format.format(type_path=),
+ DATE_TYPE_MULTIPLE: dt_format.format(type_path=),
DATE_TYPE_RANGE_BEGIN: dt_format.format(type_path=),
+ DATE_TYPE_RANGE_BEGIN: dt_format.format(type_path=),
DATE_TYPE_RANGE_END: dt_format.format(type_path=),
+ DATE_TYPE_RANGE_END: dt_format.format(type_path=),
DATE_TYPE_SINGLE: dt_format.format(type_path=),
+ DATE_TYPE_SINGLE: dt_format.format(type_path=)
}
agis_data_structures[DATES][DATE_TYPE_RANGE] = [
agis_data_structures[DATES][DATE_TYPE_RANGE_BEGIN],
agis_data_structures[DATES][DATE_TYPE_RANGE_END]
]
agis_data_structures[DATES][ + DATE_TYPE_RANGE] = [
agis_data_structures[DATES][ + DATE_TYPE_RANGE_BEGIN],
agis_data_structures[DATES][ + DATE_TYPE_RANGE_END]
]
df_format = agis_data_map[DIGITAL_FORMS]
agis_data_structures[DIGITAL_FORMS] = format_xpaths(
_agis_definitions[DIGITAL_FORMS],
name=df_format.format(df_path=),
content=df_format.format(df_path=),
decompression=df_format.format(df_path=),
version=df_format.format(df_path=),
specification=df_format.format(df_path=),
access_desc=agis_data_map[],
access_instrs=agis_data_map[],
network_resource=agis_data_map[]
)
lw_format = agis_data_map[LARGER_WORKS]
agis_data_structures[LARGER_WORKS] = format_xpaths(
_agis_definitions[LARGER_WORKS],
title=lw_format.format(lw_path=),
edition=lw_format.format(lw_path=),
origin=lw_format.format(lw_path=),
online_linkage=lw_format.format(lw_path=),
other_citation=lw_format.format(lw_path=),
date=lw_format.format(lw_path=),
place=lw_format.format(lw_path=),
info=lw_format.format(lw_path=)
)
ps_format = agis_data_map[PROCESS_STEPS]
agis_data_structures[PROCESS_STEPS] = format_xpaths(
_agis_definitions[PROCESS_STEPS],
description=ps_format.format(ps_path=),
date=ps_format.format(ps_path=),
sources=ps_format.format(ps_path=)
)
ri_format = agis_data_map[RASTER_INFO]
agis_data_structures[RASTER_INFO] = format_xpaths(
_agis_definitions[RASTER_DIMS],
type=ri_format.format(ri_path=),
size=ri_format.format(ri_path=),
value=ri_format.format(ri_path=),
units=ri_format.format(ri_path=)
)
for prop, xpath in iteritems(dict(agis_data_map)):
if prop in (ATTRIBUTES, CONTACTS, PROCESS_STEPS):
agis_data_map[prop] = ParserProperty(self._parse_complex_list, self._update_complex_list)
elif prop in (BOUNDING_BOX, LARGER_WORKS):
agis_data_map[prop] = ParserProperty(self._parse_complex, self._update_complex)
elif prop in (, ):
agis_data_map[prop] = ParserProperty(self._parse_report_item, self._update_report_item)
elif prop == DATES:
agis_data_map[prop] = ParserProperty(self._parse_dates, self._update_dates)
elif prop == DIGITAL_FORMS:
agis_data_map[prop] = ParserProperty(self._parse_digital_forms, self._update_digital_forms)
elif prop == RASTER_INFO:
agis_data_map[prop] = ParserProperty(self._parse_raster_info, self._update_raster_info)
else:
agis_data_map[prop] = xpath
self._data_map = agis_data_map
self._data_structures = agis_data_structures | OVERRIDDEN: Initialize required FGDC data map with XPATHS and specialized functions |
351 | def printBoundingBox(self):
print ("Bounding Latitude: ")
print (self.startlatitude)
print (self.endlatitude)
print ("Bounding Longitude: ")
print (self.startlongitude)
print (self.endlongitude) | Print the bounding box that this DEM covers |
352 | def Send(self, url, opname, pyobj, nsdict={}, soapaction=None, chain=None,
**kw):
url = url or self.url
cookies = None
if chain is not None:
cookies = chain.flow.cookies
d = {}
d.update(self.nsdict)
d.update(nsdict)
if soapaction is not None:
self.addHTTPHeader(, soapaction)
chain = self.factory.newInstance()
soapdata = chain.processRequest(pyobj, nsdict=nsdict,
soapaction=soapaction, **kw)
if self.trace:
print >>self.trace, "_" * 33, time.ctime(time.time()), "REQUEST:"
print >>self.trace, soapdata
f = getPage(str(url), contextFactory=self.contextFactory,
postdata=soapdata, agent=self.agent,
method=, headers=self.getHTTPHeaders(),
cookies=cookies)
if isinstance(f, Failure):
return f
chain.flow = f
self.chain = chain
return chain | Returns a ProcessingChain which needs to be passed to Receive if
Send is being called consecutively. |
353 | def __delete_action(self, revision):
delete_response = yield self.collection.delete(revision.get("master_id"))
if delete_response.get("n") == 0:
raise DocumentRevisionDeleteFailed() | Handle a delete action to a partiular master id via the revision.
:param dict revision:
:return: |
354 | def columnSchema(self):
if self._columnSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.columnSchema()
self._columnSchema = _parse_datatype_json_string(jschema.json())
return self._columnSchema | Returns the schema for the image column.
:return: a :class:`StructType` for image column,
``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``.
.. versionadded:: 2.4.0 |
355 | def update(self, unique_name=values.unset, default_ttl=values.unset,
callback_url=values.unset, geo_match_level=values.unset,
number_selection_behavior=values.unset,
intercept_callback_url=values.unset,
out_of_session_callback_url=values.unset,
chat_instance_sid=values.unset):
return self._proxy.update(
unique_name=unique_name,
default_ttl=default_ttl,
callback_url=callback_url,
geo_match_level=geo_match_level,
number_selection_behavior=number_selection_behavior,
intercept_callback_url=intercept_callback_url,
out_of_session_callback_url=out_of_session_callback_url,
chat_instance_sid=chat_instance_sid,
) | Update the ServiceInstance
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param unicode default_ttl: Default TTL for a Session, in seconds
:param unicode callback_url: The URL we should call when the interaction status changes
:param ServiceInstance.GeoMatchLevel geo_match_level: Where a proxy number must be located relative to the participant identifier
:param ServiceInstance.NumberSelectionBehavior number_selection_behavior: The preference for Proxy Number selection for the Service instance
:param unicode intercept_callback_url: The URL we call on each interaction
:param unicode out_of_session_callback_url: The URL we call when an inbound call or SMS action occurs on a closed or non-existent Session
:param unicode chat_instance_sid: The SID of the Chat Service Instance
:returns: Updated ServiceInstance
:rtype: twilio.rest.proxy.v1.service.ServiceInstance |
356 | def get_lazystring_encoder(app):
from speaklater import _LazyString
class JSONEncoder(app.json_encoder):
def default(self, o):
if isinstance(o, _LazyString):
return text_type(o)
return super(JSONEncoder, self).default(o)
return JSONEncoder | Return a JSONEncoder for handling lazy strings from Babel.
Installed on Flask application by default by :class:`InvenioI18N`. |
357 | def clear(self, exclude=None):
if exclude is None:
self.cache = {}
else:
self.cache = {k: v for k, v in self.cache.items()
if k in exclude} | Remove all elements in the cache. |
358 | def on_message(self, message):
msg = tornado.escape.json_decode(message)
if msg[] == :
if self.application.verbose:
print(msg[])
self.config = list(yaml.load_all(msg[]))
if len(self.config) > 1:
error =
if self.application.verbose:
logger.error(error)
self.write_message({: ,
: error})
return
self.config = self.config[0]
self.send_log( + self.simulation_name,
.format(name=self.config[]))
if in self.config:
self.write_message({: ,
: self.config[]})
self.name = self.config[]
self.run_simulation()
settings = []
for key in self.config[]:
if type(self.config[][key]) == float or type(self.config[][key]) == int:
if self.config[][key] <= 1:
setting_type =
else:
setting_type =
elif type(self.config[][key]) == bool:
setting_type =
else:
setting_type =
settings.append({
: key,
: setting_type,
: self.config[][key]
})
self.write_message({: ,
: settings})
elif msg[] == :
if self.application.verbose:
logger.info(.format(msg[]))
self.send_log( + __name__, .format(msg[]))
self.write_message({: ,
: self.get_trial(int(msg[]))})
elif msg[] == :
if self.application.verbose:
logger.info(.format(name=self.config[]))
self.send_log( + self.simulation_name, .format(name=self.config[]))
self.config[] = msg[]
self.run_simulation()
elif msg[] == :
G = self.trials[ int(msg[]) ].history_to_graph()
for node in G.nodes():
if in G.node[node]:
G.node[node][] = {"position": {"x": G.node[node][][0], "y": G.node[node][][1], "z": 0.0}}
del (G.node[node][])
writer = nx.readwrite.gexf.GEXFWriter(version=)
writer.add_graph(G)
self.write_message({: ,
: self.config[] + + str(msg[]),
: tostring(writer.xml).decode(writer.encoding) })
elif msg[] == :
G = self.trials[ int(msg[]) ].history_to_graph()
for node in G.nodes():
if in G.node[node]:
G.node[node][] = {"position": {"x": G.node[node][][0], "y": G.node[node][][1], "z": 0.0}}
del (G.node[node][])
self.write_message({: ,
: self.config[] + + str(msg[]),
: nx.node_link_data(G) })
else:
if self.application.verbose:
logger.info() | Receiving a message from the websocket, parse, and act accordingly. |
359 | def cleanup_dead_jobs():
from .models import WooeyJob
inspect = celery_app.control.inspect()
active_tasks = {task[] for worker, tasks in six.iteritems(inspect.active()) for task in tasks}
active_jobs = WooeyJob.objects.filter(status=WooeyJob.RUNNING)
to_disable = set()
for job in active_jobs:
if job.celery_id not in active_tasks:
to_disable.add(job.pk)
WooeyJob.objects.filter(pk__in=to_disable).update(status=WooeyJob.FAILED) | This cleans up jobs that have been marked as ran, but are not queue'd in celery. It is meant
to cleanup jobs that have been lost due to a server crash or some other reason a job is
in limbo. |
360 | def partial_fit(self, X, y=None, classes=None, **fit_params):
if not self.initialized_:
self.initialize()
self.notify(, X=X, y=y)
try:
self.fit_loop(X, y, **fit_params)
except KeyboardInterrupt:
pass
self.notify(, X=X, y=y)
return self | Fit the module.
If the module is initialized, it is not re-initialized, which
means that this method should be used if you want to continue
training a model (warm start).
Parameters
----------
X : input data, compatible with skorch.dataset.Dataset
By default, you should be able to pass:
* numpy arrays
* torch tensors
* pandas DataFrame or Series
* scipy sparse CSR matrices
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
y : target data, compatible with skorch.dataset.Dataset
The same data types as for ``X`` are supported. If your X is
a Dataset that contains the target, ``y`` may be set to
None.
classes : array, sahpe (n_classes,)
Solely for sklearn compatibility, currently unused.
**fit_params : dict
Additional parameters passed to the ``forward`` method of
the module and to the ``self.train_split`` call. |
361 | def compute_number_edges(function):
n = 0
for node in function.nodes:
n += len(node.sons)
return n | Compute the number of edges of the CFG
Args:
function (core.declarations.function.Function)
Returns:
int |
362 | def reversed(self):
return Arc(self.end, self.radius, self.rotation, self.large_arc,
not self.sweep, self.start) | returns a copy of the Arc object with its orientation reversed. |
363 | def resolve_identifier(self, name, expected_type=None):
name = str(name)
if name in self._known_identifiers:
obj = self._known_identifiers[name]
if expected_type is not None and not isinstance(obj, expected_type):
raise UnresolvedIdentifierError(u"Identifier resolved to an object of an unexpected type", name=name, expected_type=expected_type.__name__, resolved_type=obj.__class__.__name__)
return obj
if self.parent is not None:
try:
return self.parent.resolve_identifier(name)
except UnresolvedIdentifierError:
pass
raise UnresolvedIdentifierError(u"Could not resolve identifier", name=name, scope=self.name) | Resolve an identifier to an object.
There is a single namespace for identifiers so the user also should
pass an expected type that will be checked against what the identifier
actually resolves to so that there are no surprises.
Args:
name (str): The name that we want to resolve
expected_type (type): The type of object that we expect to receive.
This is an optional parameter. If None is passed, no type checking
is performed.
Returns:
object: The resolved object |
364 | def log_event(self, text, timestamp=None):
try:
text = text.encode("mbcs")
except LookupError:
text = text.encode("ascii")
comment = b"Added by python-can"
marker = b"python-can"
data = GLOBAL_MARKER_STRUCT.pack(
0, 0xFFFFFF, 0xFF3300, 0, len(text), len(marker), len(comment))
self._add_object(GLOBAL_MARKER, data + text + marker + comment, timestamp) | Add an arbitrary message to the log file as a global marker.
:param str text:
The group name of the marker.
:param float timestamp:
Absolute timestamp in Unix timestamp format. If not given, the
marker will be placed along the last message. |
365 | def dict_dot(d, k, val=None, default=None):
if val is None and k == :
return d
def set_default(dict_or_model, key, default_value):
if isinstance(dict_or_model, models.Model):
if not hasattr(dict_or_model, key):
setattr(dict_or_model, key, default_value)
return getattr(dict_or_model, key)
else:
return dict_or_model.setdefault(key, default_value)
def get_item(dict_or_model, key):
if isinstance(dict_or_model, models.Model):
return getattr(dict_or_model, key)
else:
return dict_or_model[key]
def set_item(dict_or_model, key, value):
if isinstance(dict_or_model, models.Model):
setattr(dict_or_model, key, value)
else:
dict_or_model[key] = value
if val is None and callable(default):
return functools.reduce(lambda a, b: set_default(a, b, default()), k.split(), d)
elif val is None:
return functools.reduce(get_item, k.split(), d)
else:
try:
k, k_last = k.rsplit(, 1)
set_item(dict_dot(d, k, default=dict), k_last, val)
except ValueError:
set_item(d, k, val)
return val | Get or set value using a dot-notation key in a multilevel dict. |
366 | def q12d_local(vertices, lame, mu):
M = lame + 2*mu
R_11 = np.matrix([[2, -2, -1, 1],
[-2, 2, 1, -1],
[-1, 1, 2, -2],
[1, -1, -2, 2]]) / 6.0
R_12 = np.matrix([[1, 1, -1, -1],
[-1, -1, 1, 1],
[-1, -1, 1, 1],
[1, 1, -1, -1]]) / 4.0
R_22 = np.matrix([[2, 1, -1, -2],
[1, 2, -2, -1],
[-1, -2, 2, 1],
[-2, -1, 1, 2]]) / 6.0
F = inv(np.vstack((vertices[1] - vertices[0], vertices[3] - vertices[0])))
K = np.zeros((8, 8))
E = F.T * np.matrix([[M, 0], [0, mu]]) * F
K[0::2, 0::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\
E[1, 0] * R_12.T + E[1, 1] * R_22
E = F.T * np.matrix([[mu, 0], [0, M]]) * F
K[1::2, 1::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\
E[1, 0] * R_12.T + E[1, 1] * R_22
E = F.T * np.matrix([[0, mu], [lame, 0]]) * F
K[1::2, 0::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\
E[1, 0] * R_12.T + E[1, 1] * R_22
K[0::2, 1::2] = K[1::2, 0::2].T
K /= det(F)
return K | Local stiffness matrix for two dimensional elasticity on a square element.
Parameters
----------
lame : Float
Lame's first parameter
mu : Float
shear modulus
See Also
--------
linear_elasticity
Notes
-----
Vertices should be listed in counter-clockwise order::
[3]----[2]
| |
| |
[0]----[1]
Degrees of freedom are enumerated as follows::
[x=6,y=7]----[x=4,y=5]
| |
| |
[x=0,y=1]----[x=2,y=3] |
367 | def write_calculations_to_csv(funcs, states, columns, path, headers, out_name,
metaids=[], extension=".xls"):
if not isinstance(funcs, list):
funcs = [funcs] * len(headers)
if not isinstance(states, list):
states = [states] * len(headers)
if not isinstance(columns, list):
columns = [columns] * len(headers)
data_agg = []
for i in range(len(headers)):
ids, data = read_state_with_metafile(funcs[i], states[i], columns[i],
path, metaids, extension)
data_agg = np.append(data_agg, [data])
output = pd.DataFrame(data=np.vstack((ids, data_agg)).T,
columns=["ID"]+headers)
output.to_csv(out_name, sep=)
return output | Writes each output of the given functions on the given states and data
columns to a new column in the specified output file.
Note: Column 0 is time. The first data column is column 1.
:param funcs: A function or list of functions which will be applied in order to the data. If only one function is given it is applied to all the states/columns
:type funcs: function or function list
:param states: The state ID numbers for which data should be extracted. List should be in order of calculation or if only one state is given then it will be used for all the calculations
:type states: string or string list
:param columns: The index of a column, the header of a column, a list of indexes, OR a list of headers of the column(s) that you want to apply calculations to
:type columns: int, string, int list, or string list
:param path: Path to your ProCoDA metafile (must be tab-delimited)
:type path: string
:param headers: List of the desired header for each calculation, in order
:type headers: string list
:param out_name: Desired name for the output file. Can include a relative path
:type out_name: string
:param metaids: A list of the experiment IDs you'd like to analyze from the metafile
:type metaids: string list, optional
:param extension: The file extension of the tab delimited file. Defaults to ".xls" if no argument is passed in
:type extension: string, optional
:requires: funcs, states, columns, and headers are all of the same length if they are lists. Some being lists and some single values are okay.
:return: out_name.csv (CVS file) - A CSV file with the each column being a new calcuation and each row being a new experiment on which the calcuations were performed
:return: output (Pandas.DataFrame)- Pandas DataFrame holding the same data that was written to the output file |
368 | def diagonal_line(xi=None, yi=None, *, ax=None, c=None, ls=None, lw=None, zorder=3):
if ax is None:
ax = plt.gca()
if xi is None:
xi = ax.get_xlim()
if yi is None:
yi = ax.get_ylim()
if c is None:
c = matplotlib.rcParams["grid.color"]
if ls is None:
ls = matplotlib.rcParams["grid.linestyle"]
if lw is None:
lw = matplotlib.rcParams["grid.linewidth"]
if ax is None:
ax = plt.gca()
diag_min = max(min(xi), min(yi))
diag_max = min(max(xi), max(yi))
line = ax.plot([diag_min, diag_max], [diag_min, diag_max], c=c, ls=ls, lw=lw, zorder=zorder)
return line | Plot a diagonal line.
Parameters
----------
xi : 1D array-like (optional)
The x axis points. If None, taken from axis limits. Default is None.
yi : 1D array-like
The y axis points. If None, taken from axis limits. Default is None.
ax : axis (optional)
Axis to plot on. If none is supplied, the current axis is used.
c : string (optional)
Line color. Default derives from rcParams grid color.
ls : string (optional)
Line style. Default derives from rcParams linestyle.
lw : float (optional)
Line width. Default derives from rcParams linewidth.
zorder : number (optional)
Matplotlib zorder. Default is 3.
Returns
-------
matplotlib.lines.Line2D object
The plotted line. |
369 | def notify(
self,
force_notify=None,
use_email=None,
use_sms=None,
email_body_template=None,
**kwargs,
):
email_sent = None
sms_sent = None
use_email = use_email or getattr(settings, "EMAIL_ENABLED", False)
use_sms = use_sms or getattr(settings, "TWILIO_ENABLED", False)
if force_notify or self._notify_on_condition(**kwargs):
if use_email:
email_body_template = (
email_body_template or self.email_body_template
) + self.email_footer_template
email_sent = self.send_email(
email_body_template=email_body_template, **kwargs
)
if use_sms:
sms_sent = self.send_sms(**kwargs)
self.post_notification_actions(
email_sent=email_sent, sms_sent=sms_sent, **kwargs
)
return True if email_sent or sms_sent else False | Notify / send an email and/or SMS.
Main entry point.
This notification class (me) knows from whom and to whom the
notifications will be sent.
See signals and kwargs are:
* history_instance
* instance
* user |
370 | def trace():
def fget(self):
return self._options.get(, None)
def fset(self, value):
self._options[] = value
return locals() | Enables and disables request tracing. |
371 | def parse_acl(acl_string):
if not acl_string:
return [ALLOW_ALL]
aces_list = acl_string.replace(, ).split()
aces_list = [ace.strip().split(, 2) for ace in aces_list if ace]
aces_list = [(a, b, c.split()) for a, b, c in aces_list]
result_acl = []
for action_str, princ_str, perms in aces_list:
action_str = action_str.strip().lower()
action = actions.get(action_str)
if action is None:
raise ValueError(
.format(
action_str, list(actions.keys())))
princ_str = princ_str.strip().lower()
if princ_str in special_principals:
principal = special_principals[princ_str]
elif is_callable_tag(princ_str):
principal = resolve_to_callable(princ_str)
else:
principal = princ_str
permissions = parse_permissions(perms)
result_acl.append((action, principal, permissions))
return result_acl | Parse raw string :acl_string: of RAML-defined ACLs.
If :acl_string: is blank or None, all permissions are given.
Values of ACL action and principal are parsed using `actions` and
`special_principals` maps and are looked up after `strip()` and
`lower()`.
ACEs in :acl_string: may be separated by newlines or semicolons.
Action, principal and permission lists must be separated by spaces.
Permissions must be comma-separated.
E.g. 'allow everyone view,create,update' and 'deny authenticated delete'
:param acl_string: Raw RAML string containing defined ACEs. |
372 | def orcid_uri_to_orcid(value):
"Strip the uri schema from the start of ORCID URL strings"
if value is None:
return value
replace_values = [, ]
for replace_value in replace_values:
value = value.replace(replace_value, )
return value | Strip the uri schema from the start of ORCID URL strings |
373 | def __iter_read_spectrum_meta(self):
mz_group = int_group = None
slist = None
elem_iterator = self.iterparse(self.filename, events=("start", "end"))
if sys.version_info > (3,):
_, self.root = next(elem_iterator)
else:
_, self.root = elem_iterator.next()
for event, elem in elem_iterator:
if elem.tag == self.sl + "spectrumList" and event == "start":
slist = elem
elif elem.tag == self.sl + "spectrum" and event == "end":
self.__process_spectrum(elem)
slist.remove(elem)
elif elem.tag == self.sl + "referenceableParamGroup" and event == "end":
for param in elem:
if param.attrib["name"] == "m/z array":
self.mzGroupId = elem.attrib[]
mz_group = elem
elif param.attrib["name"] == "intensity array":
self.intGroupId = elem.attrib[]
int_group = elem
self.__assign_precision(int_group, mz_group)
self.__fix_offsets() | This method should only be called by __init__. Reads the data formats, coordinates and offsets from
the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum
metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty.
Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or
"IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer". |
374 | def delete_eventtype(self, test_type_str=None):
if test_type_str:
answer = test_type_str, True
else:
answer = QInputDialog.getText(self, ,
s name to delete')
if answer[1]:
self.annot.remove_event_type(answer[0])
self.display_eventtype()
self.update_annotations() | Action: create dialog to delete event type. |
375 | def introspect_access_token(self, access_token_value):
if access_token_value not in self.access_tokens:
raise InvalidAccessToken(.format(access_token_value))
authz_info = self.access_tokens[access_token_value]
introspection = {: authz_info[] >= int(time.time())}
introspection_params = {k: v for k, v in authz_info.items() if k in TokenIntrospectionResponse.c_param}
introspection.update(introspection_params)
return introspection | Returns authorization data associated with the access token.
See <a href="https://tools.ietf.org/html/rfc7662">"Token Introspection", Section 2.2</a>. |
376 | def _reload(self):
ConfigModel = apps.get_model()
cache = {}
data = dict(
ConfigModel.objects
.all()
.values_list(, ))
for form_class in self._registry:
empty_form = form_class()
cache.update({
name: field.initial
for name, field in empty_form.fields.items()})
form = form_class(data={
name: _deserialize(data[name], field)
for name, field in empty_form.fields.items()
if name in data and not isinstance(field, forms.FileField)})
form.is_valid()
cache.update({
name: _unlazify(value)
for name, value in form.cleaned_data.items()
if name in data})
self._cache = cache | Gets every registered form's field value.\
If a field name is found in the db, it will load it from there.\
Otherwise, the initial value from the field form is used |
377 | def open(self):
try:
self.project.open_main(self.filename)
except UnicodeDecodeError:
with open(self.filename, ) as openfile:
encoding = get_encoding(openfile.read())
try:
self.project.open_main(self.filename, encoding)
except UnicodeDecodeError:
LOGGER.error(" encountered a fatal encoding error",
self.filename)
sys.exit(1)
except:
open_error(self.filename)
except:
open_error(self.filename) | Open the subtitle file into an Aeidon project. |
378 | def from_gpx(gpx_segment):
points = []
for point in gpx_segment.points:
points.append(Point.from_gpx(point))
return Segment(points) | Creates a segment from a GPX format.
No preprocessing is done.
Arguments:
gpx_segment (:obj:`gpxpy.GPXTrackSegment`)
Return:
:obj:`Segment` |
379 | def get_options(self):
args = self.parse_options(self.args)
if args:
self.directory = args[0]
if self.develop:
self.skiptag = True
if not self.develop:
self.develop = self.defaults.develop
if not self.develop:
self.infoflags = self.setuptools.infoflags
if not self.formats:
self.formats = self.defaults.formats
for format in self.formats:
if format == :
self.distributions.append((, []))
elif format == :
self.distributions.append((, []))
elif format == :
self.distributions.append((, []))
elif format == :
self.distributions.append((, []))
if not self.distributions:
self.distributions.append((, []))
if self.list:
self.list_locations()
if not self.locations:
self.locations.extend(self.locations.get_default_location())
if not (self.skipregister and self.skipupload):
if not (self.get_skipregister() and self.get_skipupload()):
self.locations.check_empty_locations()
self.locations.check_valid_locations()
if len(args) > 1:
if self.urlparser.is_url(self.directory):
self.branch = args[1]
elif self.urlparser.is_ssh_url(self.directory):
self.branch = args[1]
else:
err_exit( % USAGE)
if len(args) > 2:
err_exit( % USAGE) | Process the command line. |
380 | def _get_mean(self, vs30, mag, rrup, imt, scale_fac):
C_HR, C_BC, C_SR, SC = self._extract_coeffs(imt)
rrup = self._clip_distances(rrup)
f0 = self._compute_f0_factor(rrup)
f1 = self._compute_f1_factor(rrup)
f2 = self._compute_f2_factor(rrup)
pga_bc = self._get_pga_bc(
f0, f1, f2, SC, mag, rrup, vs30, scale_fac
)
mean = np.zeros_like(vs30)
self._compute_mean(C_HR, f0, f1, f2, SC, mag, rrup,
vs30 >= 2000.0, mean, scale_fac)
self._compute_mean(C_BC, f0, f1, f2, SC, mag, rrup,
vs30 < 2000.0, mean, scale_fac)
self._compute_soil_amplification(C_SR, vs30, pga_bc, mean)
if imt == PGV():
mean = np.log(10 ** mean)
else:
mean = np.log((10 ** mean) * 1e-2 / g)
return mean | Compute and return mean |
381 | def plot_forest(
data,
kind="forestplot",
model_names=None,
var_names=None,
combined=False,
credible_interval=0.94,
rope=None,
quartiles=True,
ess=False,
r_hat=False,
colors="cycle",
textsize=None,
linewidth=None,
markersize=None,
ridgeplot_alpha=None,
ridgeplot_overlap=2,
figsize=None,
):
if not isinstance(data, (list, tuple)):
data = [data]
datasets = [convert_to_dataset(datum) for datum in reversed(data)]
var_names = _var_names(var_names, datasets)
ncols, width_ratios = 1, [3]
if ess:
ncols += 1
width_ratios.append(1)
if r_hat:
ncols += 1
width_ratios.append(1)
plot_handler = PlotHandler(
datasets, var_names=var_names, model_names=model_names, combined=combined, colors=colors
)
if figsize is None:
figsize = (min(12, sum(width_ratios) * 2), plot_handler.fig_height())
(figsize, _, titlesize, xt_labelsize, auto_linewidth, auto_markersize) = _scale_fig_size(
figsize, textsize, 1.1, 1
)
if linewidth is None:
linewidth = auto_linewidth
if markersize is None:
markersize = auto_markersize
fig, axes = plt.subplots(
nrows=1,
ncols=ncols,
figsize=figsize,
gridspec_kw={"width_ratios": width_ratios},
sharey=True,
constrained_layout=True,
)
axes = np.atleast_1d(axes)
if kind == "forestplot":
plot_handler.forestplot(
credible_interval,
quartiles,
xt_labelsize,
titlesize,
linewidth,
markersize,
axes[0],
rope,
)
elif kind == "ridgeplot":
plot_handler.ridgeplot(ridgeplot_overlap, linewidth, ridgeplot_alpha, axes[0])
else:
raise TypeError(
"Argument must be one of or "
" (you provided {})".format(kind)
)
idx = 1
if ess:
plot_handler.plot_neff(axes[idx], xt_labelsize, titlesize, markersize)
idx += 1
if r_hat:
plot_handler.plot_rhat(axes[idx], xt_labelsize, titlesize, markersize)
idx += 1
for ax in axes:
ax.grid(False)
for ticks in ax.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in ax.spines.items():
if loc in ["left", "right"]:
spine.set_visible(False)
if len(plot_handler.data) > 1:
plot_handler.make_bands(ax)
labels, ticks = plot_handler.labels_and_ticks()
axes[0].set_yticks(ticks)
axes[0].set_yticklabels(labels)
all_plotters = list(plot_handler.plotters.values())
y_max = plot_handler.y_max() - all_plotters[-1].group_offset
if kind == "ridgeplot":
y_max += ridgeplot_overlap
axes[0].set_ylim(-all_plotters[0].group_offset, y_max)
return fig, axes | Forest plot to compare credible intervals from a number of distributions.
Generates a forest plot of 100*(credible_interval)% credible intervals from
a trace or list of traces.
Parameters
----------
data : obj or list[obj]
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
kind : str
Choose kind of plot for main axis. Supports "forestplot" or "ridgeplot"
model_names : list[str], optional
List with names for the models in the list of data. Useful when
plotting more that one dataset
var_names: list[str], optional
List of variables to plot (defaults to None, which results in all
variables plotted)
combined : bool
Flag for combining multiple chains into a single chain. If False (default),
chains will be plotted separately.
credible_interval : float, optional
Credible interval to plot. Defaults to 0.94.
rope: tuple or dictionary of tuples
Lower and upper values of the Region Of Practical Equivalence. If a list with one
interval only is provided, the ROPE will be displayed across the y-axis. If more than one
interval is provided the length of the list should match the number of variables.
quartiles : bool, optional
Flag for plotting the interquartile range, in addition to the credible_interval intervals.
Defaults to True
r_hat : bool, optional
Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False
ess : bool, optional
Flag for plotting the effective sample size. Requires 2 or more chains. Defaults to False
colors : list or string, optional
list with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically chose a color per model from the
matplotlibs cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used
for all models. Defauls to 'cycle'.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
linewidth : int
Line width throughout. If None it will be autoscaled based on figsize.
markersize : int
Markersize throughout. If None it will be autoscaled based on figsize.
ridgeplot_alpha : float
Transparency for ridgeplot fill. If 0, border is colored by model, otherwise
a black outline is used.
ridgeplot_overlap : float
Overlap height for ridgeplots.
figsize : tuple
Figure size. If None it will be defined automatically.
Returns
-------
gridspec : matplotlib GridSpec
Examples
--------
Forestpĺot
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered_data = az.load_arviz_data('non_centered_eight')
>>> fig, axes = az.plot_forest(non_centered_data,
>>> kind='forestplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_overlap=3,
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
Ridgeplot
.. plot::
:context: close-figs
>>> fig, axes = az.plot_forest(non_centered_data,
>>> kind='ridgeplot',
>>> var_names=['theta'],
>>> combined=True,
>>> ridgeplot_overlap=3,
>>> colors='white',
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model') |
382 | def _timeout_thread(self, remain):
time.sleep(remain)
if not self._ended:
self._ended = True
self._release_all() | Timeout before releasing every thing, if nothing was returned |
383 | def raise_if(self, exception, message, *args, **kwargs):
if issubclass(exception, self.minimum_defect):
raise exception(*args, **kwargs)
warn(message, SyntaxWarning, *args, **kwargs) | If current exception has smaller priority than minimum, subclass of
this class only warns user, otherwise normal exception will be raised. |
384 | def k8s_ports_to_metadata_ports(k8s_ports):
ports = []
for k8s_port in k8s_ports:
if k8s_port.protocol is not None:
ports.append("%s/%s" % (k8s_port.port, k8s_port.protocol.lower()))
else:
ports.append(str(k8s_port.port))
return ports | :param k8s_ports: list of V1ServicePort
:return: list of str, list of exposed ports, example:
- ['1234/tcp', '8080/udp'] |
385 | def set_widgets(self):
self.parent.step_fc_agglayer_from_canvas.\
list_compatible_canvas_layers()
lst_wdg = self.parent.step_fc_agglayer_from_canvas.lstCanvasAggLayers
if lst_wdg.count():
self.rbAggLayerFromCanvas.setText(tr(
) % self.parent.keyword_creation_wizard_name)
self.rbAggLayerFromCanvas.setEnabled(True)
self.rbAggLayerFromCanvas.click()
else:
self.rbAggLayerFromCanvas.setText(tr(
))
self.rbAggLayerFromCanvas.setEnabled(False)
self.rbAggLayerFromBrowser.click()
self.lblIconIFCWAggregationOrigin.setPixmap(QPixmap(None)) | Set widgets on the Aggregation Layer Origin Type tab. |
386 | def summary(self):
if self.hasSummary:
if self.numClasses <= 2:
return BinaryLogisticRegressionTrainingSummary(super(LogisticRegressionModel,
self).summary)
else:
return LogisticRegressionTrainingSummary(super(LogisticRegressionModel,
self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) | Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`. |
387 | def set_creator(self, value: Union[Literal, Identifier, str], lang: str= None):
self.metadata.add(key=DC.creator, value=value, lang=lang) | Set the DC Creator literal value
:param value: Value of the creator node
:param lang: Language in which the value is |
388 | def get_clan(self, tag: crtag, timeout: int=None):
url = self.api.CLAN + + tag
return self._get_model(url, FullClan, timeout=timeout) | Get inforamtion about a clan
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout |
389 | def CSWAP(control, target_1, target_2):
qubits = [unpack_qubit(q) for q in (control, target_1, target_2)]
return Gate(name="CSWAP", params=[], qubits=qubits) | Produces a controlled-SWAP gate. This gate conditionally swaps the state of two qubits::
CSWAP = [[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
:param control: The control qubit.
:param target-1: The first target qubit.
:param target-2: The second target qubit. The two target states are swapped if the control is
in the ``|1>`` state. |
390 | def whitelisted(argument=None):
def is_whitelisted(remote_ip, whitelist):
user_ip = ipaddr.IPv4Address(remote_ip)
if any([user_ip in ipaddr.IPv4Network(entry) for entry in whitelist]):
return True
return False
if type(argument) is types.FunctionType:
def wrapper(self, *args, **kwargs):
if not in self.application.settings:
raise ValueError()
if is_whitelisted(self.request.remote_ip,
self.application.settings[]):
return argument(self, *args, **kwargs)
raise web.HTTPError(403)
return wrapper
else:
if isinstance(argument, str):
argument = [argument]
def argument_wrapper(method):
def validate(self, *args, **kwargs):
if is_whitelisted(self.request.remote_ip, argument):
return method(self, *args, **kwargs)
raise web.HTTPError(403)
return validate
return argument_wrapper | Decorates a method requiring that the requesting IP address is
whitelisted. Requires a whitelist value as a list in the
Application.settings dictionary. IP addresses can be an individual IP
address or a subnet.
Examples:
['10.0.0.0/8','192.168.1.0/24', '1.2.3.4/32']
:param list argument: List of whitelisted ip addresses or blocks
:raises: web.HTTPError
:raises: ValueError
:rtype: any |
391 | def insertDataset(self, businput):
if not ("primary_ds_name" in businput and "dataset" in businput
and "dataset_access_type" in businput and "processed_ds_name" in businput ):
dbsExceptionHandler(, "business/DBSDataset/insertDataset must have dataset,\
dataset_access_type, primary_ds_name, processed_ds_name as input")
if "data_tier_name" not in businput:
dbsExceptionHandler(, "insertDataset must have data_tier_name as input.")
conn = self.dbi.connection()
tran = conn.begin()
try:
dsdaoinput = {}
dsdaoinput["primary_ds_name"] = businput["primary_ds_name"]
dsdaoinput["data_tier_name"] = businput["data_tier_name"].upper()
dsdaoinput["dataset_access_type"] = businput["dataset_access_type"].upper()
if "acquisition_era_name" in businput and "processing_version" in businput:
erals=businput["processed_ds_name"].rsplit()
if erals[0]==businput["acquisition_era_name"] and erals[len(erals)-1]=="%s%s"%("v", businput["processing_version"]):
dsdaoinput["processed_ds_name"] = businput["processed_ds_name"]
else:
dbsExceptionHandler(, "insertDataset:\
processed_ds_name=acquisition_era_name[-filter_name][-processing_str]-vprocessing_version must be satisified.")
else:
dbsExceptionHandler("dbsException-missing-data", "insertDataset: Required acquisition_era_name or processing_version is not found in the input")
if "physics_group_name" in businput:
dsdaoinput["physics_group_id"] = self.phygrpid.execute(conn, businput["physics_group_name"])
if dsdaoinput["physics_group_id"] == -1:
dbsExceptionHandler("dbsException-missing-data", "insertDataset. physics_group_name not found in DB")
else:
dsdaoinput["physics_group_id"] = None
dsdaoinput["dataset_id"] = self.sm.increment(conn, "SEQ_DS")
dsdaoinput.update({
"dataset" : "/%s/%s/%s" %
(businput["primary_ds_name"],
businput["processed_ds_name"],
businput["data_tier_name"].upper()),
"prep_id" : businput.get("prep_id", None),
"xtcrosssection" : businput.get("xtcrosssection", None),
"creation_date" : businput.get("creation_date", dbsUtils().getTime() ),
"create_by" : businput.get("create_by", dbsUtils().getCreateBy()) ,
"last_modification_date" : businput.get("last_modification_date", dbsUtils().getTime()),
"last_modified_by" : dbsUtils().getModifiedBy()
})
if "processing_version" in businput and businput["processing_version"] != 0:
dsdaoinput["processing_era_id"] = self.proceraid.execute(conn, businput["processing_version"])
if dsdaoinput["processing_era_id"] == -1 :
dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset: processing_version not found in DB")
else:
dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/insertDataset: processing_version is required")
if "acquisition_era_name" in businput:
dsdaoinput["acquisition_era_id"] = self.acqeraid.execute(conn, businput["acquisition_era_name"])
if dsdaoinput["acquisition_era_id"] == -1:
dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset: acquisition_era_name not found in DB")
else:
dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/insertDataset: acquisition_era_name is required")
try:
self.datasetin.execute(conn, dsdaoinput, tran)
except SQLAlchemyIntegrityError as ex:
if (str(ex).lower().find("unique constraint") != -1 or
str(ex).lower().find("duplicate") != -1):
self.logger.warning(
"Unique constraint violation being ignored...")
self.logger.warning("%s" % ex)
ds = "/%s/%s/%s" % (businput["primary_ds_name"], businput["processed_ds_name"], businput["data_tier_name"].upper())
dsdaoinput["dataset_id"] = self.datasetid.execute(conn, ds )
if dsdaoinput["dataset_id"] == -1 :
dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset. Strange error, the dataset %s does not exist ?"
% ds )
if (str(ex).find("ORA-01400") ) != -1 :
dbsExceptionHandler("dbsException-missing-data", "insertDataset must have: dataset,\
primary_ds_name, processed_ds_name, data_tier_name ")
except Exception as e:
raise
if "output_configs" in businput:
for anOutConfig in businput["output_configs"]:
dsoutconfdaoin = {}
dsoutconfdaoin["dataset_id"] = dsdaoinput["dataset_id"]
dsoutconfdaoin["output_mod_config_id"] = self.outconfigid.execute(conn, anOutConfig["app_name"],
anOutConfig["release_version"],
anOutConfig["pset_hash"],
anOutConfig["output_module_label"],
anOutConfig["global_tag"])
if dsoutconfdaoin["output_mod_config_id"] == -1 :
dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset: Output config (%s, %s, %s, %s, %s) not found"
% (anOutConfig["app_name"],
anOutConfig["release_version"],
anOutConfig["pset_hash"],
anOutConfig["output_module_label"],
anOutConfig["global_tag"]))
try:
self.datasetoutmodconfigin.execute(conn, dsoutconfdaoin, tran)
except Exception as ex:
if str(ex).lower().find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1:
pass
else:
raise
tran.commit()
tran = None
except Exception:
if tran:
tran.rollback()
tran = None
raise
finally:
if tran:
tran.rollback()
if conn:
conn.close() | input dictionary must have the following keys:
dataset, primary_ds_name(name), processed_ds(name), data_tier(name),
acquisition_era(name), processing_version
It may have following keys:
physics_group(name), xtcrosssection, creation_date, create_by,
last_modification_date, last_modified_by |
392 | def _get_model(vehicle):
model = vehicle[]
model = model.replace(vehicle[], )
model = model.replace(vehicle[], )
return model.strip().split()[0] | Clean the model field. Best guess. |
393 | def mkdir(self, path, parents=True, mode=0o755, raise_if_exists=False):
if not parents or raise_if_exists:
warnings.warn()
permission = int(oct(mode)[2:])
self.client.makedirs(path, permission=permission) | Has no returnvalue (just like WebHDFS) |
394 | def cur_time(typ=, tz=DEFAULT_TZ, trading=True, cal=):
dt = pd.Timestamp(, tz=tz)
if typ == :
if trading: return trade_day(dt=dt, cal=cal).strftime()
else: return dt.strftime()
if typ == : return dt.strftime()
if typ == : return dt.strftime()
if typ == : return dt
return trade_day(dt).date() if trading else dt.date() | Current time
Args:
typ: one of ['date', 'time', 'time_path', 'raw', '']
tz: timezone
trading: check if current date is trading day
cal: trading calendar
Returns:
relevant current time or date
Examples:
>>> cur_dt = pd.Timestamp('now')
>>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d')
True
>>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S')
True
>>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')
True
>>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)
True
>>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp)
True
>>> cur_time(typ='', trading=False) == cur_dt.date()
True |
395 | def _worker_queue_scheduled_tasks(self):
queues = set(self._filter_queues(self.connection.smembers(
self._key(SCHEDULED))))
now = time.time()
for queue in queues:
self._did_work = True | Helper method that takes due tasks from the SCHEDULED queue and puts
them in the QUEUED queue for execution. This should be called
periodically. |
396 | def insertTopLevelItem( self, index, item ):
self.treeWidget().insertTopLevelItem(index, item)
if self.updatesEnabled():
try:
item.sync(recursive = True)
except AttributeError:
pass | Inserts the inputed item at the given index in the tree.
:param index | <int>
item | <XGanttWidgetItem> |
397 | def make_symbols(symbols, *args):
if (hasattr(symbols, ) and not any(symbols)) \
or (isinstance(symbols, (list, tuple, Mapping)) and not symbols):
return []
if isinstance(symbols, basestring):
return [s.upper().strip() for s in (symbols.split() + list(str(a) for a in args))]
else:
ans = []
for sym in (list(symbols) + list(args)):
tmp = make_symbols(sym)
ans = ans + tmp
return list(set(ans)) | Return a list of uppercase strings like "GOOG", "$SPX, "XOM"...
Arguments:
symbols (str or list of str): list of market ticker symbols to normalize
If `symbols` is a str a get_symbols_from_list() call is used to retrieve the list of symbols
Returns:
list of str: list of cananical ticker symbol strings (typically after .upper().strip())
See Also:
pug.dj.db.normalize_names
Examples:
>>> make_symbols("Goog")
['GOOG']
>>> make_symbols(" $SPX ", " aaPL ")
['$SPX', 'AAPL']
>>> make_symbols(["$SPX", ["GOOG", "AAPL"]])
['GOOG', 'AAPL', '$SPX']
>>> make_symbols(" $Spy, Goog, aAPL ")
['$SPY', 'GOOG', 'AAPL'] |
398 | def win_menu_select_item(title, *items, **kwargs):
text = kwargs.get("text", "")
if not (0 < len(items) < 8):
raise ValueError("accepted none item or number of items exceed eight")
f_items = [LPCWSTR(item) for item in items]
for i in xrange(8 - len(f_items)):
f_items.append(LPCWSTR(""))
ret = AUTO_IT.AU3_WinMenuSelectItem(LPCWSTR(title), LPCWSTR(text),
*f_items)
return ret | Usage:
win_menu_select_item("[CLASS:Notepad]", "", u"文件(&F)", u"退出(&X)")
:param title:
:param text:
:param items:
:return: |
399 | def handle_no_start_state(self):
start_state = self.get_start_state(set_final_outcome=True)
while not start_state:
execution_signal = state_machine_execution_engine.handle_execution_mode(self)
if execution_signal is StateMachineExecutionStatus.STOPPED:
return None
self._transitions_cv.acquire()
self._transitions_cv.wait(3.0)
self._transitions_cv.release()
start_state = self.get_start_state(set_final_outcome=True)
return start_state | Handles the situation, when no start state exists during execution
The method waits, until a transition is created. It then checks again for an existing start state and waits
again, if this is not the case. It returns the None state if the the state machine was stopped. |